content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def resnet101(pretrained=False, num_groups=None, weight_std=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], num_groups=num_groups, weight_std=weight_std, **kwargs)
if pretrained:
model_dict = model.state_dict()
if num_groups and weight_std:
pretrained_dict = torch.load('data/R-101-GN-WS.pth.tar')
overlap_dict = {k[7:]: v for k, v in pretrained_dict.items() if k[7:] in model_dict}
assert len(overlap_dict) == 312
elif not num_groups and not weight_std:
pretrained_dict = model_zoo.load_url(model_urls['resnet101'])
overlap_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
else:
raise ValueError('Currently only support BN or GN+WS')
model_dict.update(overlap_dict)
model.load_state_dict(model_dict)
return model | 1,000 |
def cron(cronline, venusian_category='irc3.plugins.cron'):
"""main decorator"""
def wrapper(func):
def callback(context, name, ob):
obj = context.context
crons = obj.get_plugin(Crons)
if info.scope == 'class':
callback = getattr(
obj.get_plugin(ob),
func.__name__)
else:
callback = irc3.utils.wraps_with_context(func, obj)
crons.add_cron(cronline, callback)
info = venusian.attach(func, callback, category=venusian_category)
return func
return wrapper | 1,001 |
def test_all_pr():
"""
Tests that all_pr matches a hand-obtained solution.
"""
pos_scores = [-1., 0., 2.]
neg_scores = [-2., 0., 1.]
# scores: [2., 1., 0., 0., -1., -2.]
# labels: [1, 0, 1, 0, 1, 0 ]
precision = [1., 1., .5, .5, .6, 3. / 6.]
recall = [0., 1. / 3., 1. / 3., 2. / 3., 1., 1.]
p, r = all_pr(pos_scores, neg_scores)
assert len(p) == len(precision)
assert len(r) == len(recall)
# The actual function should do exactly the same arithmetic on
# integers so we should get exactly the same floating point values
for i in xrange(len(p)):
assert p[i] == precision[i], (i, p[i], precision[i])
assert recall[i] == recall[i] | 1,002 |
def merge(
_0: pandas.core.frame.DataFrame,
_1: pandas.core.frame.DataFrame,
/,
*,
how: Literal["inner"],
shuffle: Literal["disk"],
):
"""
usage.dask: 1
"""
... | 1,003 |
def oauth_type():
"""Check if Slack or another OAuth has been configured"""
if "OAUTH_TYPE" in current_app.config:
return current_app.config["OAUTH_TYPE"].lower()
else:
return None | 1,004 |
def _get_scenarios(rule_dir, scripts, scenarios_regex, benchmark_cpes):
""" Returns only valid scenario files, rest is ignored (is not meant
to be executed directly.
"""
if scenarios_regex is not None:
scenarios_pattern = re.compile(scenarios_regex)
scenarios = []
for script in scripts:
if scenarios_regex is not None:
if scenarios_pattern.match(script) is None:
logging.debug("Skipping script %s - it did not match --scenarios regex" % script)
continue
script_context = _get_script_context(script)
if script_context is not None:
script_params = _parse_parameters(os.path.join(rule_dir, script))
if common.matches_platform(script_params["platform"], benchmark_cpes):
scenarios += [Scenario(script, script_context, script_params)]
else:
logging.info("Script %s is not applicable on given platform" % script)
return scenarios | 1,005 |
def create_categories():
"""Create a group of random strings for each column in the table."""
return [
[
''.join(random.choices(string.ascii_lowercase, k=random.randint(STR_MIN, STR_MAX)))
for _i in range(CAT_COUNT)
]
for _j in range(COL_COUNT)
] | 1,006 |
def get_asp_output_folder(project_name):
"""
:type project_name: string
"""
loc = PROJECT_RESULTS_LOC + project_name + '/' + PROJECT_ASP_OUTPUT_FOLDER
mkdir_p(loc)
return os.path.abspath(loc) | 1,007 |
def _compute_comm_classes(
A: Union[np.ndarray, spmatrix]
) -> Tuple[List[List[Any]], bool]:
"""Compute communication classes for a graph given by A."""
di_graph = (
nx.from_scipy_sparse_matrix(A, create_using=nx.DiGraph)
if issparse(A)
else nx.from_numpy_array(A, create_using=nx.DiGraph)
)
nx.strongly_connected_components(di_graph)
comm_classes = sorted(
nx.strongly_connected_components(di_graph), key=len, reverse=True
)
is_irreducible = len(comm_classes) == 1
return comm_classes, is_irreducible | 1,008 |
def parse_lines(lines: typing.List[str],
units: Units,
use_na: bool = True) -> typing.List[typing.Dict[str, typing.Any]]:
"""
Returns a list of parsed line dictionaries
"""
parsed_lines = []
prob = ''
while lines:
raw_line = lines[0].strip()
line = core.sanitize_line(raw_line)
# Remove prob from the beginning of a line
if line.startswith('PROB'):
# Add standalone prob to next line
if len(line) == 6:
prob = line
line = ''
# Add to current line
elif len(line) > 6:
prob = line[:6]
line = line[6:].strip()
if line:
parsed_line = (parse_na_line if use_na else parse_in_line)(line, units)
for key in ('start_time', 'end_time'):
parsed_line[key] = core.make_timestamp(parsed_line[key])
parsed_line['probability'] = core.make_number(prob[4:])
parsed_line['raw'] = raw_line
parsed_line['sanitized'] = prob + ' ' + line if prob else line
prob = ''
parsed_lines.append(parsed_line)
lines.pop(0)
return parsed_lines | 1,009 |
def rand_cutout(np_img, pcts=(0.05, 0.4), depth=(1., 0.), max_k=1):
"""Cut out from image, and edges of rectangles are smooth.
Returns:
applied image, cut mask
"""
cut = np.ones(np_img.shape[:2])
k = random.randint(1, max_k)
for _ in range(k):
d = random.random() * depth[0] + depth[1]
hill = rand_solid_hill((np_img.shape[1], np_img.shape[0]), pcts=pcts)
cut = cut * (1 - d * hill)
return np_img * cut[..., np.newaxis], (cut < 0.9).astype(np.int8) | 1,010 |
def create_content_list(contents: List[str]) -> str:
"""Format list of string into markdown list
Args:
contents: (List[string]), list of string to be formatted
Returns:
String
"""
return '\n'.join(
[template.LIST_TEMPLATE.format(
level='',
content=item
) for item in contents]) | 1,011 |
def publish_agent(ctx: Context):
"""Publish an agent."""
try_to_load_agent_config(ctx)
check_is_author_logged_in(ctx.agent_config.author)
name = ctx.agent_config.agent_name
config_file_source_path = os.path.join(ctx.cwd, DEFAULT_AEA_CONFIG_FILE)
output_tar = os.path.join(ctx.cwd, "{}.tar.gz".format(name))
with tempfile.TemporaryDirectory() as temp_dir:
package_dir = os.path.join(temp_dir, name)
os.makedirs(package_dir)
config_file_target_path = os.path.join(package_dir, DEFAULT_AEA_CONFIG_FILE)
shutil.copy(config_file_source_path, config_file_target_path)
_compress(output_tar, package_dir)
data = {
"name": name,
"description": ctx.agent_config.description,
"version": ctx.agent_config.version,
"connections": ctx.agent_config.connections,
"contracts": ctx.agent_config.contracts,
"protocols": ctx.agent_config.protocols,
"skills": ctx.agent_config.skills,
}
path = "/agents/create"
logger.debug("Publishing agent {} to Registry ...".format(name))
resp = request_api("POST", path, data=data, is_auth=True, filepath=output_tar)
click.echo(
"Successfully published agent {} to the Registry. Public ID: {}".format(
name, resp["public_id"]
)
) | 1,012 |
def test_sample_clobber_config(tmpdir):
"""Verify --sample won't clobber config if it already exists."""
with tmpdir.as_cwd(), pytest.raises(sh.ErrorReturnCode_1) as error:
Path("mailmerge_server.conf").touch()
sh.mailmerge("--sample")
stdout = error.value.stdout.decode("utf-8")
stderr = error.value.stderr.decode("utf-8")
assert stdout == ""
assert "Error: file exists: mailmerge_server.conf" in stderr | 1,013 |
def determine_degree_of_endstopping(model, model_folder, block_managers, blocks_to_look_at=None):
"""
For a list of blocks:
Determines the degree of end-stopping and saves these values to the
'[folder]/meta.json' file and saves the activations to an npy file:
'[folder]/[prefix]-deg_of_es_activations.npy' (see _extract_deg_of_es)
Parameters
----------
model: nn.Module
A pytorch neural network
model_folder : str
model : nn.Module
block_managers : list of IBlockManager
blocks_to_look_at : list of nn.Module, optional
by default None, look at all modules of a model
"""
print('Degree of end-stopping...')
block_functions = [
mngr.extract_deg_of_es for mngr in block_managers
]
_iterate_blocks(
model_folder, model, block_functions, blocks_to_look_at=blocks_to_look_at
) | 1,014 |
def autolabel(ax: plt.Axes, rects: List[plt.Rectangle],
y_range: Tuple[float, float],
bottom: bool = False,
color: str = 'black') -> None:
"""
Attach a text label above each bar displaying its height
"""
for rect in rects:
height = rect.get_height()
x_pos = rect.get_x() + rect.get_width() / 2.0
if not bottom:
y_pos = 0.2 * (max(*y_range) - min(*y_range))
ax.text(x_pos, y_pos,
'{:02.2f}'.format(height),
ha='center', va='bottom', weight='bold',
rotation='vertical',
color=color)
else:
y_pos = rect.get_y()
ax.text(x_pos, y_pos,
'{:02.2f}'.format(height),
ha='center', va='top', weight='bold',
rotation='vertical',
color=color) | 1,015 |
def convert(key: str, content: str, output_format: OWLFormat=OWLFormat.func) -> Optional[str]:
"""
Convert content into output_format
:param key: Key of content for error reporting
:param content: OWL representation
:param output_format: target format
:return: Converted information if successful
"""
try:
resp = requests.post('https://www.ldf.fi/service/owl-converter/',
data=dict(onto=content, to=output_format.name))
except ConnectionError as e:
logging.getLogger().error(f"{key}: {str(e)}")
return None
if resp.ok:
return resp.text
logging.getLogger().error(f"{key}: {str(resp)}") | 1,016 |
def _post_single_image(client: Imgur, image_path, title, description=None):
"""
Limit to 1250 POST requests per hour and 12500 per day
"""
image = client.image_upload(image_path, title, description)
# album_id = client.album_get('Family Photos')['response']['data']['id']
# client.album_add(album_id, image['response']['data']['id'])
return image['response']['data']['link'] | 1,017 |
def load_data(path, start=0, end=99999, step=1, returnNames = False):
"""Load images into a list
#Arguments
paths: List of strings representing paths to folders containing
images that must be named as numbers
start,end,step: Refers to the number of name of images. Only loads
images with in this range.
"""
imgs = load_imgs(path,start,end,step,returnNames = returnNames)
return imgs | 1,018 |
async def test_patch_self_relative(api_client):
"""
Проверяем что жителю можно указать себя родственником.
Напоминает фильм Патруль времени, не так ли?
"""
dataset = [
generate_citizen(
citizen_id=1, name='Джейн', gender='male',
birth_date='13.09.1945', town='Нью-Йорк', relatives=[]
),
]
import_id = await import_data(api_client, dataset)
dataset[0]['relatives'] = [dataset[0]['citizen_id']]
actual = await patch_citizen(
api_client, import_id, dataset[0]['citizen_id'],
data={k: v for k, v in dataset[0].items() if k != 'citizen_id'}
)
assert compare_citizens(dataset[0], actual) | 1,019 |
def test_list_nmtoken_max_length_nistxml_sv_iv_list_nmtoken_max_length_1_3(mode, save_output, output_format):
"""
Type list/NMTOKEN is restricted by facet maxLength with value 5.
"""
assert_bindings(
schema="nistData/list/NMTOKEN/Schema+Instance/NISTSchema-SV-IV-list-NMTOKEN-maxLength-1.xsd",
instance="nistData/list/NMTOKEN/Schema+Instance/NISTXML-SV-IV-list-NMTOKEN-maxLength-1-3.xml",
class_name="NistschemaSvIvListNmtokenMaxLength1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 1,020 |
def run_build_commands_with(msg, cmds):
"""Run general build commands"""
window, view, file_shown_in_view = get_haskell_command_window_view_file_project()
if not file_shown_in_view:
return
syntax_file_for_view = view.settings().get('syntax').lower()
if 'haskell' not in syntax_file_for_view:
return
cabal_project_dir, cabal_project_name = get_cabal_project_dir_and_name_of_view(view)
if not cabal_project_dir:
return
run_chain_build_thread(view, cabal_project_dir, msg(cabal_project_name), cmds) | 1,021 |
def test_app_creation_failed_no_message_to_has_defects(subtests):
"""Test exception raised when Mailgun Domain is undefined."""
for to_header, defects_listing in [
('a', '- addr-spec local part with no domain'),
('a, <b@c', ('- addr-spec local part with no domain\n'
"- missing trailing '>' on angle-addr"))
]:
with subtests.test(to_header=to_header,
defects_listing=defects_listing):
with mock.patch.dict('os.environ',
_an_environment_with(
message_to=to_header),
clear=True):
with pytest.raises(
misakoba_mail.exceptions.InvalidMessageToError,
match=f"MESSAGE_TO config value '{to_header}' "
'has the following defects:\n'
f'{defects_listing}'):
misakoba_mail.app.create_app() | 1,022 |
def animated_1d_plot(probe_data_dnf: np.ndarray,
probe_data_input1: np.ndarray,
probe_data_input2: np.ndarray,
interval: ty.Optional[int] = 30) -> None:
"""Generates an animated plot for examples in the DNF regimes tutorial.
Parameters
----------
probe_data_dnf : numpy.ndarray
probe data of the DNF
probe_data_input1 : numpy.ndarray
probe data of the first spiking input
probe_data_input2 : numpy.ndarray
probe data of the second spiking input
interval : int
interval to use in matplotlib.animation.FuncAnimation
"""
probe_data_input = probe_data_input1 + probe_data_input2
probe_data_input = probe_data_input.astype(np.float)
probe_data_dnf = probe_data_dnf.astype(np.float)
probe_data_input = np.transpose(probe_data_input)
probe_data_dnf = np.transpose(probe_data_dnf)
num_neurons = np.size(probe_data_input, axis=1)
num_time_steps = np.size(probe_data_dnf, axis=0)
input_spike_rates = compute_spike_rates(probe_data_input)
dnf_spike_rates = compute_spike_rates(probe_data_dnf)
fig, ax = plt.subplots(2, 1, figsize=(10, 5))
line0, = ax[0].plot(np.zeros((num_neurons,)), 'bo-')
line1, = ax[1].plot(np.zeros((num_neurons,)), 'ro-')
im = [line0, line1]
ax[0].set_xlabel("")
ax[1].set_xlabel("Input neuron idx")
ax[0].set_ylabel("Input spike rate")
ax[1].set_ylabel("DNF spike rate")
ax[0].set_xticks([])
ax[1].set_xticks([0, num_neurons - 1])
ax[0].set_yticks([0, 1])
ax[1].set_yticks([0, 1])
ax[0].set_xlim(-1, num_neurons)
ax[1].set_xlim(-1, num_neurons)
offset = 0.1
ax[0].set_ylim(np.min(input_spike_rates) - offset,
np.max(input_spike_rates) + offset)
ax[1].set_ylim(np.min(dnf_spike_rates) - offset,
np.max(dnf_spike_rates) + offset)
plt.tight_layout()
def animate(i: int) -> ty.List:
x = range(num_neurons)
im[0].set_data(x, input_spike_rates[i, :])
im[1].set_data(x, dnf_spike_rates[i, :])
return im
anim = animation.FuncAnimation(fig,
animate,
frames=num_time_steps,
interval=interval,
blit=True)
html = display.HTML(anim.to_jshtml())
display.display(html)
plt.close() | 1,023 |
def main():
"""
Main entry point for module execution
:returns: the result form module invocation
"""
required_if = [
("state", "merged", ("config",)),
("state", "replaced", ("config",)),
("state", "rendered", ("config",)),
("state", "overridden", ("config",)),
("state", "parsed", ("running_config",)),
]
mutually_exclusive = [("config", "running_config")]
module = AnsibleModule(
argument_spec=Lldp_interfacesArgs.argument_spec,
required_if=required_if,
supports_check_mode=True,
mutually_exclusive=mutually_exclusive,
)
result = Lldp_interfaces(module).execute_module()
module.exit_json(**result) | 1,024 |
def create_tendencies(params, return_inner_products=False, return_qgtensor=False):
"""Function to handle the inner products and tendencies tensors construction.
Returns the tendencies function :math:`\\boldsymbol{f}` determining the model's ordinary differential
equations:
.. math:: \dot{\\boldsymbol{x}} = \\boldsymbol{f}(\\boldsymbol{x})
which is for the model's integration.
It returns also the linearized tendencies
:math:`\\boldsymbol{\mathrm{J}} \equiv \\boldsymbol{\mathrm{D}f} = \\frac{\partial \\boldsymbol{f}}{\partial \\boldsymbol{x}}`
(Jacobian matrix) which are used by the tangent linear model:
.. math :: \dot{\\boldsymbol{\delta x}} = \\boldsymbol{\mathrm{J}}(\\boldsymbol{x}) \cdot \\boldsymbol{\delta x}
Parameters
----------
params: ~params.params.QgParams
The parameters fully specifying the model configuration.
return_inner_products: bool
If True, return the inner products of the model. Default to False.
return_qgtensor: bool
If True, return the tendencies tensor of the model. Default to False.
Returns
-------
f: callable
The numba-jitted tendencies function.
Df: callable
The numba-jitted linearized tendencies function.
inner_products: (AtmosphericInnerProducts, OceanicInnerProducts)
If `return_inner_products` is True, the inner products of the system.
qgtensor: QgsTensor
If `return_qgtensor` is True, the tendencies tensor of the system.
"""
if params.ablocks is not None:
aip = AtmosphericInnerProducts(params)
else:
aip = None
if params.goblocks is not None and params.gotemperature_params._name == "Oceanic Temperature":
oip = OceanicInnerProducts(params)
else:
oip = None
if aip is not None and oip is not None:
aip.connect_to_ocean(oip)
agotensor = QgsTensor(aip, oip)
coo = agotensor.tensor.coords.T
val = agotensor.tensor.data
@njit
def f(t, x):
xx = np.concatenate((np.full((1,), 1.), x))
xr = sparse_mul3(coo, val, xx, xx)
return xr[1:]
jcoo = agotensor.jacobian_tensor.coords.T
jval = agotensor.jacobian_tensor.data
@njit
def Df(t, x):
xx = np.concatenate((np.full((1,), 1.), x))
mul_jac = sparse_mul2(jcoo, jval, xx)
return mul_jac[1:, 1:]
ret = list()
ret.append(f)
ret.append(Df)
if return_inner_products:
ret.append((aip, oip))
if return_qgtensor:
ret.append(agotensor)
return ret | 1,025 |
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
with open(versionfile_abs, "r") as fobj:
for line in fobj:
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
except OSError:
pass
return keywords | 1,026 |
def combined_city_hexes(gpkg_inputs, gpkg_output_hex, cities):
"""
Create a combined layer of all city hexes to facilitate later grouped analyses and plotting.
This is ordered by Continent, Country, City, and hex index.
Parameters
----------
gpkg_inputs: list
list of sample point input geopackages
gpkg_output_hex: str
file path of output geopackage
cities: list
list of city study region names
Returns
-------
none
"""
print(" - combining city hex and basic covariate data")
for i, city in enumerate(tqdm(cities)):
if i==0:
all_city_hexes_combined = gpd.read_file(gpkg_output_hex, layer=city).to_crs(4326)
urban_covariates_combined = gpd.read_file(gpkg_inputs[i], layer='urban_covariates')
else:
all_city_hexes_combined = all_city_hexes_combined.append(gpd.read_file(gpkg_output_hex,
layer=city).to_crs(4326))
urban_covariates_combined = urban_covariates_combined.append(gpd.read_file(gpkg_inputs[i],
layer='urban_covariates'))
print(" - saving to geopackage, ordered by Continent, Country, City, and hex index")
urban_covariate_fields = ['Continent','Country','ISO 3166-1 alpha-2','City']
all_city_hexes_combined = all_city_hexes_combined.set_index('study_region')\
.join(urban_covariates_combined[urban_covariate_fields]\
.set_index('City'))\
.rename_axis('City').reset_index()\
.sort_values(['Continent','Country','City','index']).reset_index(drop=True)
all_city_hexes_combined = all_city_hexes_combined[urban_covariate_fields +
[x for x in all_city_hexes_combined if x not in urban_covariate_fields]]
all_city_hexes_combined.to_file(gpkg_output_hex, layer='all_city_hexes_combined', driver="GPKG")
all_city_hexes_combined[[x for x in all_city_hexes_combined.columns if x!='geometry']]\
.to_csv(gpkg_output_hex.replace('gpkg','csv'),index=False) | 1,027 |
def script(text, interpreter="sh"):
"""Execute a shell script.
The script is passed to the interpreter via stdin and the return
code of the interpreter is returned."""
process = Popen(interpreter, stdin=PIPE)
process.communicate(input=text)
process.wait()
return process.returncode | 1,028 |
def add_login_arguments_group(parser, full=False):
"""Adds login arguments to the passed parser
:param parser: The parser to add the login option group to
:type parser: ArgumentParser.
:param full: Flag to include seldom used options
:type full: bool
"""
group = parser.add_argument_group('LOGIN OPTIONS', 'Options for logging in to a system '\
'before the command is run.')
group.add_argument(
'--url',
dest='url',
help="Use the provided iLO URL to login.",
default=None)
group.add_argument(
'-u',
'--user',
dest='user',
help="If you are not logged in yet, including this flag along"\
" with the password and URL flags can be used to login to a"\
" server in the same command.",
default=None)
group.add_argument(
'-p',
'--password',
dest='password',
help="""Use the provided iLO password to log in.""",
default=None)
group.add_argument(
'--https',
dest='https_cert',
help="Use the provided CA bundle or SSL certificate with your login to connect "\
"securely to the system in remote mode. This flag has no effect in local mode.",
default=None)
group.add_argument(
'-e',
'--enc',
dest='encode',
action='store_true',
help=SUPPRESS,
default=False)
if full:
group.add_argument(
'--includelogs',
dest='includelogs',
action="store_true",
help="Optionally include logs in the data retrieval process.",
default=False)
group.add_argument(
'--path',
dest='path',
help="Optionally set a starting point for data collection during login."\
" If you do not specify a starting point, the default path"\
" will be /redfish/v1/. Note: The path flag can only be specified"\
" at the time of login. Warning: Only for advanced users, and generally "\
"not needed for normal operations.",
default=None) | 1,029 |
def _xml_create_tag_with_parents(xmltree, xpath, node):
"""
Create a tag at the given xpath together with it's parents if they are missing
xml_create_tag cannot create subtags, but since we know that we have simple xpaths
we can do it here
No tag order is enforced here, since we are in intermediate steps
:param xmltree: etree ElementTree to operate on
:param xpath: xpath of the parent node
:param node: node to create
"""
parent_nodes = eval_xpath(xmltree, xpath, list_return=True)
to_create = []
while not parent_nodes:
parent_path, parent_name = split_off_tag(xpath)
to_create.append((parent_path, parent_name))
parent_nodes = eval_xpath(xmltree, parent_path, list_return=True)
for parent_path, name in reversed(to_create):
xml_create_tag(xmltree, parent_path, name)
xml_create_tag(xmltree, xpath, node) | 1,030 |
def copy_ncbi(corpus_path, directory_path):
"""Divides files from the corpus directory into different directories
:param corpus_path: corpus path
:param directory_path: new corpus path
"""
os.system('rm -rf ' + directory_path + '/* || true')
for (dir_path, dir_names, file_names) in os.walk(corpus_path):
for filename in file_names:
os.system('cp ' + corpus_path + filename + ' ' + directory_path + '/') | 1,031 |
def generateImage(boardfilename, outputfilename, dpi, pcbdrawArgs, back):
"""
Generate board image for the diagram. Returns bounding box (top let, bottom
right) active areas of the images in KiCAD native units.
"""
# For now, use PcbDraw as a process until we rewrite the tool so it can be
# used as a library. Also note that we always generate SVG first as we can
# easily read the active area from it. Then we manually convert it to PNG
with tempfile.TemporaryDirectory() as d:
tmpdir = Path(d)
svgfilename = tmpdir / "img.svg"
command = ["pcbdraw", "--shrink", "0"]
if back:
command.append("--back")
if pcbdrawArgs["style"] is not None:
command.extend(["--style", pcbdrawArgs["style"]])
if pcbdrawArgs["libs"] is not None:
command.extend(["--libs", pcbdrawArgs["libs"]])
if pcbdrawArgs["remap"] is not None:
command.extend(["--remap", pcbdrawArgs["remap"]])
if pcbdrawArgs["filter"] is not None:
command.extend(["--filter", pcbdrawArgs["filter"]])
command.append(boardfilename)
command.append(str(svgfilename))
subprocess.run(command, check=True)
svgToBitmap(svgfilename, outputfilename, dpi)
document = etree.parse(str(svgfilename))
tlx, tly, w, h = map(float, document.getroot().attrib["viewBox"].split())
return {
"tl": (ki2mm(svg2ki(tlx)), ki2mm(svg2ki(tly))),
"br": (ki2mm(svg2ki(tlx + w)), ki2mm(svg2ki(tly + h)))
} | 1,032 |
def mask_to_image(
mask: _T_input, batch_first: bool = False,
color: Optional[str] = None,
origin: str = 'lower'
) -> np.ndarray:
"""
Creates an image from a mask `Tensor` or `ndarray`.
For more details of the output shape, see the tensorboardx docs
Note:
Clips mask to range [0, 1]. Any values outside of this range will be
ignored.
Args:
mask: Mask to plot
batch_first: If `True`, `signal` is expected to have shape
`(batch [optional], frames, features)`. If `False`, the batch axis
is assumed to be in the second position, i.e.,
`(frames, batch [optional], features)`.
color: A color map name. The name is forwarded to
`matplotlib.pyplot.cm.get_cmap` to get the color map. If `None`,
grayscale is used.
origin: Origin of the plot. Can be `'upper'` or `'lower'`.
Returns:
Colorized image with shape (color (1 or 3), features, frames)
"""
mask = to_numpy(mask, detach=True)
clipped_values = np.sum((mask < 0) | (mask > 1))
if clipped_values:
import warnings
warnings.warn(
f'Mask value passed to mask_to_image out of range ([0, 1])! '
f'{clipped_values} values are clipped!'
)
image = np.clip(mask * 255, 0, 255)
image = image.astype(np.uint8)
image = _remove_batch_axis(image, batch_first=batch_first)
return _colorize(_apply_origin(image.T, origin), color) | 1,033 |
def lerp(x0, x1, t):
""" Linear interpolation """
return (1.0 - t) * x0 + t * x1 | 1,034 |
def read_uint4(f):
"""
>>> import io
>>> read_uint4(io.BytesIO(b'\\xff\\x00\\x00\\x00'))
255
>>> read_uint4(io.BytesIO(b'\\x00\\x00\\x00\\x80')) == 2**31
True
"""
data = f.read(4)
if len(data) == 4:
return _unpack('<I', data)[0]
raise ValueError('not enough data in stream to read uint4') | 1,035 |
def bound_contribution(w, g, G, nmax, squeeze=True):
# TODO docstring
# This method assumes g, if it is multidimensional varies along the first axis while w varies along the zeroth axis.
"""
"""
# helper methods
def f1(g):
return np.sqrt(g).astype(int)
# * L(w + (1 / ell - ell / g) ** 2, 0, G)
def f2(w, g, G, ell):
return (
np.pi
* L(w + (1 / ell - ell / g) ** 2, 0, G)
* 2
* (g - ell ** 2)
* (2 * ell ** 2 - g)
/ (ell ** 3 * g ** 2)
)
def f3(ell):
out = np.ones(ell.shape)
out[ell == 0] = 0
return out
def f4(g, ell, n):
# we aren't ensuring that we don't compute ell == n.
# TODO catch ell == n with a more robust method
out = (
n ** 2
* (n ** 2 * ell ** 2 - (g - ell ** 2) ** 2)
/ ((n ** 2 - ell ** 2) * (n ** 2 * ell ** 2 - g ** 2))
)
out[out == -np.inf] = np.nan
out[out == np.inf] = np.nan
return out
# check input arrays
ndim_org = w.ndim
w, g, G = _checkndim_copy_reshape([w, g, G], 2)
# create array to sum over
ellmax = int(np.max(np.sqrt(g)))
ell_base = np.arange(1, ellmax + 1, 1)
new_shape = (1,) * ndim_org + (ell_base.size,) + (1,)
ell_base = np.reshape(ell_base, new_shape)
ell = np.repeat(
ell_base, g.size, axis=1
) # TODO np.argmax g.shape | assumes g is varying along 1st axis
ell[f1(g) < ell] = 0
# create array to product over
n = np.arange(1, nmax + 1, 1)
new_shape = (1,) * (ndim_org + 1) + n.shape
n = np.reshape(n, new_shape)
# now actually create output arrays
out = f4(g, ell, n)
out = np.nanprod(out, axis=-1, keepdims=True)
out = out * f2(w, g, G, ell) * f3(ell)
out = np.nansum(
out, axis=-2, keepdims=True
) # TODO figure out why I need nansum here
if squeeze:
out = np.squeeze(out)
return out | 1,036 |
def configure_typogrify(pelicanobj, mathjax_settings):
"""Instructs Typogrify to ignore math tags - which allows Typogfrify
to play nicely with math related content"""
# If Typogrify is not being used, then just exit
if not pelicanobj.settings.get('TYPOGRIFY', False):
return
try:
import typogrify
from distutils.version import LooseVersion
if LooseVersion(typogrify.__version__) < LooseVersion('2.0.7'):
raise TypeError('Incorrect version of Typogrify')
from typogrify.filters import typogrify
# At this point, we are happy to use Typogrify, meaning
# it is installed and it is a recent enough version
# that can be used to ignore all math
# Instantiate markdown extension and append it to the current extensions
pelicanobj.settings['TYPOGRIFY_IGNORE_TAGS'].extend(['.math', 'script']) # ignore math class and script
except (ImportError, TypeError, KeyError) as e:
pelicanobj.settings['TYPOGRIFY'] = False # disable Typogrify
if isinstance(e, ImportError):
print("\nTypogrify is not installed, so it is being ignored.\nIf you want to use it, please install via: pip install typogrify\n")
if isinstance(e, TypeError):
print("\nA more recent version of Typogrify is needed for the render_math module.\nPlease upgrade Typogrify to the latest version (anything equal or above version 2.0.7 is okay).\nTypogrify will be turned off due to this reason.\n")
if isinstance(e, KeyError):
print("\nA more recent version of Pelican is needed for Typogrify to work with render_math.\nPlease upgrade Pelican to the latest version or clone it directly from the master GitHub branch\nTypogrify will be turned off due to this reason\n") | 1,037 |
def karatsuba_256x256(ab, a, b, t0, t1, t2, t3, t4):
"""assumes a and b are two ymm registers"""
z0, z2 = ab
a0, a1 = a, t0
b0, b1 = b, t1
z1 = t2
p("vextracti128 $1, %ymm{}, %xmm{}".format(a, a1))
p("vextracti128 $1, %ymm{}, %xmm{}".format(b, b1))
mult_128x128(z2, a1, b1, t3, t4)
p("vpxor %xmm{}, %xmm{}, %xmm{}".format(a0, a1, a1)) # a1 contains [0][a0 xor a1]
p("vpxor %xmm{}, %xmm{}, %xmm{}".format(b0, b1, b1))
mult_128x128(z1, a1, b1, t3, t4)
mult_128x128(z0, a0, b0, t3, t4)
p("vpxor %ymm{}, %ymm{}, %ymm{}".format(z1, z2, z1))
p("vpxor %ymm{}, %ymm{}, %ymm{}".format(z1, z0, z1))
# put top half of z1 into t (contains [0][z1top])
p("vpxor %ymm{}, %ymm{}, %ymm{}".format(t0, t0, t0))
p("vextracti128 $1, %ymm{}, %xmm{}".format(z1, t0))
p("vpxor %ymm{}, %ymm{}, %ymm{}".format(z2, t0, z2)) # compose into z2
p("vpxor %ymm{}, %ymm{}, %ymm{}".format(t0, t0, t0))
p("vinserti128 $1, %xmm{}, %ymm{}, %ymm{}".format(z1, t0, t0))
p("vpxor %ymm{}, %ymm{}, %ymm{}".format(t0, z0, z0))
# ~512bit result is now in z2 and z0 | 1,038 |
def get_onto_class_by_node_type(ont: owlready2.namespace.Ontology, node_label: str):
"""Get an object corresponding to an ontology class given the node label.
`owlready2` doesn't make it easy to dynamically retrieve ontology classes.
This uses some (relatively unsafe) string manipulation to hack together a
solution.
Notes
-----
This should be refactored if/when a better solution is available!
"""
matches = [c for c in ont.classes() if str(c).split(".")[-1] == node_label]
if len(matches) == 1:
return matches[0]
elif len(matches) == 0:
return None
else:
raise ValueError(
"Error: Something is wrong with your ontology's class hierarchy! Check for duplicate classes with '{0}' in the name".format(
node_label
)
) | 1,039 |
def validate_dtype_freq(dtype, freq):
"""
If both a dtype and a freq are available, ensure they match. If only
dtype is available, extract the implied freq.
Parameters
----------
dtype : dtype
freq : DateOffset or None
Returns
-------
freq : DateOffset
Raises
------
ValueError : non-period dtype
IncompatibleFrequency : mismatch between dtype and freq
"""
if freq is not None:
freq = frequencies.to_offset(freq)
if dtype is not None:
dtype = pandas_dtype(dtype)
if not is_period_dtype(dtype):
raise ValueError("dtype must be PeriodDtype")
if freq is None:
freq = dtype.freq
elif freq != dtype.freq:
raise IncompatibleFrequency("specified freq and dtype are different")
return freq | 1,040 |
def extract_filtered_series(data_frame, column_list):
"""
Returns a filtered Panda Series one-dimensional ndarray from a targeted column.
Duplicate values and NaN or blank values are dropped from the result set which is
returned sorted (ascending).
:param data_frame: Pandas DataFrame
:param column_list: list of columns
:return: Panda Series one-dimensional ndarray
"""
return data_frame[column_list].drop_duplicates().dropna(axis=0, how='all').sort_values(
column_list)
# return data_frame[column_list].str.strip().drop_duplicates().dropna().sort_values() | 1,041 |
def event_rheader(r):
""" Resource headers for component views """
rheader = None
if r.representation == "html":
if r.name == "event":
# Event Controller
tabs = [(T("Event Details"), None)]
#if settings.has_module("req"):
# tabs.append((T("Requests"), "req"))
rheader_tabs = s3_rheader_tabs(r, tabs)
event = r.record
if event:
if event.exercise:
exercise = TH(T("EXERCISE"))
else:
exercise = TH()
if event.closed:
closed = TH(T("CLOSED"))
else:
closed = TH()
rheader = DIV(TABLE(TR(exercise),
TR(TH("%s: " % T("Name")),
event.name),
TH("%s: " % T("Comments")),
event.comments,
TR(TH("%s: " % T("Zero Hour")),
event.zero_hour),
TR(closed),
), rheader_tabs)
if r.name == "incident":
# Incident Controller
tabs = [(T("Incident Details"), None)]
if settings.has_module("project"):
tabs.append((T("Tasks"), "task"))
if settings.has_module("hrm"):
tabs.append((T("Human Resources"), "human_resource"))
if settings.has_module("asset"):
tabs.append((T("Assets"), "asset"))
tabs.append((T("Facilities"), "site"))
tabs.append((T("Map Configuration"), "config"))
rheader_tabs = s3_rheader_tabs(r, tabs)
record = r.record
if record:
if record.exercise:
exercise = TH(T("EXERCISE"))
else:
exercise = TH()
if record.closed:
closed = TH(T("CLOSED"))
else:
closed = TH()
rheader = DIV(TABLE(TR(exercise),
TR(TH("%s: " % T("Name")),
record.name),
TH("%s: " % T("Comments")),
record.comments,
TR(TH("%s: " % T("Zero Hour")),
record.zero_hour),
TR(closed),
), rheader_tabs)
return rheader | 1,042 |
def parse_args():
"""Read arguments from config file and command line args."""
options_default = {
'permissions': True,
'checksum': True,
'interval': 60,
'pidfile': '~/.dropboxhandler.pid',
'daemon': False,
'umask': 0o077,
}
parser = argparse.ArgumentParser(
description="Listen for new files in " +
"dropboxdirs and move to ObenBis/storage",
)
parser.add_argument("-c", "--conf-file",
help="Specify config file", metavar="FILE",
default="~/.dropboxhandler.conf")
parser.add_argument("--print-example-config",
help="Print a example config file to stdout.",
action="store_true", default=False)
parser.add_argument('-d', '--daemon', action='store_true', default=None)
parser.add_argument('--pidfile', default=None)
parser.add_argument('--check-config', default=False, action='store_true',
help="Do not start the daemon, but check the " +
"config file")
args = parser.parse_args()
if args.print_example_config:
print_example_config()
sys.exit(0)
try:
with open(args.conf_file) as f:
config = yaml.load(f)
except dropboxhandler.FileNotFoundError:
error_exit("Could not find config file (default location: " +
"~/.dropboxhandler.conf")
except yaml.parser.ParserError as e:
error_exit("Could not parse config file. Error was %s" % e)
for key in ['incoming', 'outgoing', 'openbis', 'options']:
if key not in config:
error_exit("Config file must include section '%s'" % key)
options_default.update(config['options'])
config['options'] = options_default
if args.pidfile is not None:
config['options']['pidfile'] = args.pidfile
if args.daemon is not None:
config['options']['daemon'] = args.daemon
config['check_config'] = args.check_config
return config | 1,043 |
def backend_is_up(backend):
"""Returns whether a server is receiving traffic in HAProxy.
:param backend: backend dict, like one of those returned by smartstack_tools.get_multiple_backends.
:returns is_up: Whether the backend is in a state that receives traffic.
"""
return str(backend['status']).startswith('UP') | 1,044 |
def get_bot_files_glob(**kwargs):
"""Returns a `list` with the matching file names using the format string for BOT data """
outdict = {}
kwcopy = kwargs.copy()
test_name = kwcopy.pop('testName').lower()
nfiles = kwcopy.get('nfiles', None)
rafts = get_raft_names_dc(kwcopy['run'], kwcopy.get('teststand', 'bot'))
for raft in rafts:
raftdict = {}
slots = getSlotList(raft)
for slot in slots:
glob_string = BOT_FORMATTER(raft=raft, slot=slot, testName=test_name, **kwcopy)
files = sorted(glob.glob(glob_string))
if nfiles is None:
raftdict[slot] = files
else:
raftdict[slot] = files[0:nfiles]
outdict[raft] = raftdict
return outdict | 1,045 |
def plot_univariate_categorical_columns(categorical_cols: Sequence[str], dataframe: pd.DataFrame, **kwargs) -> None:
"""plots categorical variable bars
Args:
categorical_cols (Sequence[str]): categorical columns
dataframe (pd.DataFrame): DataFrame
"""
for c in categorical_cols:
value_counts_ser = dataframe[c].value_counts()
cnt_len = len(value_counts_ser)
if cnt_len < 16:
t, xl, yl = get_univariate_cat_plot_strs(c)
plot_univariate_series(value_counts_ser, t, xl, yl, **kwargs) | 1,046 |
def _submit_drmaa(args, unknown_args):
""" Submit multiple 'pisces run' jobs to the cluster using libdrmaa """
if args.local:
submit_local(args.metadata, args.config, args.max_memory, args.runtime, unknown_args, args.dry_run, args.debug, args.workdir)
else:
submit_drmaa(args.metadata, args.config, args.max_memory, args.runtime, unknown_args, args.dry_run, args.debug, args.workdir) | 1,047 |
def minutiae_selection(minutiae):
""" Selects the subset of most reliable minutiae.
"""
M = np.array([(m['x'], m['y'], m['direction'], m['reliability']) for m in minutiae])
M[:,2] = np.round(np.rad2deg(nbis_idx2angle(M[:,2], N=16)))
M[:,3] = np.round(M[:,3] * 100.0)
M = M.astype(int)
M = M[M[:,3] > np.percentile(M[:,3], 5), :]
return M | 1,048 |
def test_imshowhs_grid_1():
"""
Show DEM draped over the shaded topographic relief
"""
# %%
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
var_name="Topo",
var_units=r"m",
grid_units=("m", "m"),
cmap="terrain",
ticks_km=False,
symmetric_cbar=True,
limits=(0, 10),
) | 1,049 |
def alt_blend_value(data, i, j, k):
"""Computes the average value of the three vertices of a triangle in the
simplex triangulation, where two of the vertices are on the upper
horizontal."""
keys = alt_triangle_coordinates(i, j, k)
return blend_value(data, i, j, k, keys=keys) | 1,050 |
async def test_set_licensing_info_put_mhlm(test_server):
"""Test to check endpoint : "/set_licensing_info"
Test which sends HTTP PUT request with MHLM licensing information.
Args:
test_server (aiohttp_client): A aiohttp_client server to send HTTP GET request.
"""
data = {
"type": "MHLM",
"status": "starting",
"version": "R2020b",
"token": "abc@nlm",
"emailaddress": "abc@nlm",
"sourceId": "abc@nlm",
}
resp = await test_server.put("/set_licensing_info", data=json.dumps(data))
assert resp.status == 200 | 1,051 |
def _BinaryCrossEntropy():
"""Returns a layer that computes prediction-target cross entropies."""
def f(model_output, target_category): # pylint: disable=invalid-name
shapes.assert_same_shape(model_output, target_category)
batch_size = model_output.shape[0]
j = jnp.dot(jnp.transpose(target_category), jnp.log(model_output))
j += jnp.dot(jnp.transpose(1 - target_category), jnp.log(1 - model_output))
j = -1.0/batch_size * jnp.squeeze(j)
return j
return Fn('_BinaryCrossEntropy', f) | 1,052 |
def compile_exe():
"""
Creates a standalone EXE file.
"""
print('\nRunning compile using distutils and py2exe:\n')
from distutils.core import setup
import py2exe # required for proper inclusion
typelibs = []
com_server = []
dll_excludes = ['w9xpopen.exe']
sys.argv[1] = 'py2exe'
setup(
console=[file_path],
com_server=com_server,
options=dict(
py2exe=dict(
typelibs=typelibs,
includes=[],
excludes=['tkinter', 'Tkinter', '_tkinter', 'Tkconstants', 'tcl', 'doctest', 'pdb', 'inspect', 'email'],
dll_excludes=dll_excludes,
bundle_files=1,
compressed=True
),
),
zipfile=None
)
sys.exit(0) | 1,053 |
def normalize(subs, strict):
"""
Normalises subtitles.
:param subs: :py:class:`Subtitle` objects
:param bool strict: Whether to enable strict mode, see
:py:func:`Subtitle.to_srt` for more information
:returns: A single SRT formatted string, with each input
:py:class:`Subtitle` represented as an SRT block
:rtype: str
:raises SRTParseError: If parsing fails.
"""
return _cli.compose_suggest_on_fail(subs, strict) | 1,054 |
def update_weights(model, weights):
"""
Package up a trained/finetuned model as a new bioimageio model
"""
from bioimageio.core.build_spec import build_model
# create a subfolder to store the files for the new model
model_root = Path("./sample_data")
model_root.mkdir(exist_ok=True)
# create the expected output tensor
new_output = None
new_output_path = f"{model_root}/test_output.npy"
np.save(new_output_path, new_output)
# add thresholding as post-processing procedure to our model
preprocessing = [
[{"name": prep.name, "kwargs": prep.kwargs} for prep in inp.preprocessing]
for inp in model_resource.inputs
]
postprocessing = [
[{"name": prep.name, "kwargs": prep.kwargs} for prep in inp.postprocessing]
for inp in model_resource.outputs
]
# get the model architecture
# note that this is only necessary for pytorch state dict models
model_source = get_architecture_source(rdf_doi)
# we use the `parent` field to indicate that the new model is created based on
# the nucleus segmentation model we have obtained from bioimage.io
# this field is optional and only needs to be given for models that are created based on other models from bioimage.io
# the parent is specified via it's doi and the hash of its rdf file
model_root_folder = os.path.split(
model_resource.weights["pytorch_state_dict"].source
)[0]
rdf_file = model_root_folder / "rdf.yaml"
with rdf_file.open("rb") as f:
rdf_hash = hashlib.sha256(f.read()).hexdigest()
parent = {"uri": rdf_doi, "sha256": rdf_hash}
# the name of the new model and where to save the zipped model package
name = f"{old_model_name}_finetuned"
zip_path = model_root / f"{name}.zip"
# `build_model` needs some additional information about the model, like citation information
# all this additional information is passed as plain python types and will be converted into the bioimageio representation internally
# for more informantion, check out the function signature
# https://github.com/bioimage-io/core-bioimage-io-python/blob/main/bioimageio/core/build_spec/build_model.py#L252
cite = [
{"text": cite_entry.text, "url": cite_entry.url}
for cite_entry in model_resource.cite
]
# TODO: provide this option if data being looked at is available on bioimage.io
# the training data used for the model can also be specified by linking to a dataset available on bioimage.io
# training_data = {"id": "ilastik/stradist_dsb_training_data"}
# the axes descriptions for the inputs / outputs
input_axes = model_resource.input_axes
output_axes = model_resource.output_axes
# the pytorch_state_dict weight file
weight_file = model_resource.weights["pytorch_state_dict"].source
# the path to save the new model with torchscript weights
zip_path = f"{model_root}/new_model2.zip"
# build the model! it will be saved to 'zip_path'
new_model_raw = build_model(
weight_uri=weight_file,
test_inputs=model_resource.test_inputs,
test_outputs=[new_output_path],
input_axes=input_axes,
output_axes=output_axes,
output_path=zip_path,
name=name,
description=f"{model_resource.description} (Finetuned with Napari-affinities plugin)",
authors=[{"name": "Jane Doe"}], # TODO: let users plug in their own name
license="CC-BY-4.0", # TODO: configurable?
documentation=model_resource.documentation,
covers=[str(cover) for cover in model_resource.covers],
tags=model_resource.tags + ["Napari-affinities"],
cite=cite,
parent=parent,
architecture=model_source,
model_kwargs=model_resource.weights["pytorch_state_dict"].kwargs,
preprocessing=preprocessing,
postprocessing=postprocessing,
training_data=model_resource.training_data, # TODO: add our data here, identify it as finetuning data?
) | 1,055 |
def Stern_Brocot(n):
"""
Another way to iterate over rationals
References:
https://stackoverflow.com/questions/24997970/iterating-over-parts-of-the-stern-brocot-tree-in-python
"""
states = [(0, 1, 1, 1)]
result = []
while len(states) != 0:
a, b, c, d = states.pop()
if a + b + c + d <= n:
result.append((a + c, b + d))
states.append((a, b, a + c, b + d))
states.append((a + c, b + d, c, d))
return result | 1,056 |
def compare_elements(prev_hash_dict, current_hash_dict):
"""Compare elements that have changed between prev_hash_dict and current_hash_dict.
Check if any elements have been added, removed or modified.
"""
changed = {}
for key in prev_hash_dict:
elem = current_hash_dict.get(key, '')
if elem == '':
changed[key] = 'deleted'
elif elem != prev_hash_dict[key]:
changed[key] = 'changed'
for key in current_hash_dict:
elem = prev_hash_dict.get(key, '')
if elem == '':
changed[key] = 'added'
return changed | 1,057 |
def test_list(script_collection):
"""Ensure the method returns the expected value on success"""
x_resp = script_collection._client.http_get.return_value
x_resp.status_code = 200
result = script_collection.list()
assert result == x_resp.json.return_value | 1,058 |
def test_interp_time(
time: types_time_like,
time_ref: types_timestamp_like,
systems: List[str],
csm_has_time_ref: bool,
num_abs_systems: int,
):
"""Test the ``interp_time`` method.
Parameters
----------
time :
The value passed to the functions as ``time`` parameter
time_ref :
The value passed to the functions as ``time_ref`` parameter
systems :
The value passed to the functions as ``affected_coordinate_systems``
parameter
csm_has_time_ref :
If `True`, a reference time is added to the CSM
num_abs_systems :
The number of time dependent systems that get a reference time assigned to
them.
"""
# csm data
csm_time_ref = "2000-01-10" if csm_has_time_ref else None
abs_systems = [f"lcs_{i}" for i in range(num_abs_systems)]
lcs_data = dict(
lcs_0=("root", [1, 4, 7], TS("2000-01-09")),
lcs_1=("lcs_0", [1, 5, 9], TS("2000-01-14")),
lcs_2=("root", [1, 6, 11], TS("2000-01-11")),
)
# time data
time_class = Time(time, time_ref)
days_interp = time_class.as_quantity().to("days").m
if len(days_interp.shape) == 0:
days_interp = days_interp.reshape(1)
# create csm
csm = tf.CoordinateSystemManager("root", time_ref=csm_time_ref)
for k, v in lcs_data.items():
csm.create_cs(
k,
v[0],
_orientation_from_value(v[1]),
_coordinates_from_value(v[1]),
Q_(v[1], "day"),
v[2] if k in abs_systems else None,
)
lcs_3 = tf.LocalCoordinateSystem(
WXRotation.from_euler("y", 1).as_matrix(), [4, 2, 0]
)
csm.add_cs("lcs_3", "lcs_2", lcs_3)
# interpolate
csm_interp = csm.interp_time(time, time_ref, systems)
# evaluate results
time_exp = time_class if len(time_class) > 1 else None
time_ref_exp = time_class.reference_time
for k, v in lcs_data.items():
# create expected lcs
if systems is None or k in systems:
diff = 0
if time_ref_exp is not None:
if k in abs_systems:
diff = Time(time_class.reference_time - v[2])
else:
diff = Time(time_class.reference_time - csm.reference_time)
diff = diff.as_quantity().to("days").m
lcs_exp = tf.LocalCoordinateSystem(
_orientation_from_value(days_interp + diff, v[1][0], v[1][-1]),
_coordinates_from_value(days_interp + diff, v[1][0], v[1][-1]),
time_exp,
csm.reference_time if csm.has_reference_time else time_ref_exp,
)
else:
lcs_exp = csm.get_cs(k)
# check results
check_cs_close(csm_interp.get_cs(k), lcs_exp)
check_cs_close(csm_interp.get_cs(v[0], k), lcs_exp.invert())
# check static lcs unmodified
check_cs_close(csm_interp.get_cs("lcs_3"), lcs_3)
# check time union
if systems is None or len(systems) == 3:
assert np.all(csm_interp.time_union() == time_exp) | 1,059 |
def add_to_mongo(data):
"""
入库
:param data:
:return:
"""
connection = pymongo.MongoClient()
tdb = connection.Spider
db = tdb.ip_pool
if isinstance(data, list):
db.insert_many(data)
else:
db.insert(data) | 1,060 |
def generator_string(lang_uses: str = 'all', char_count: int = 1,
char_size: str = 'lower') -> str:
"""Generator string
:param lang_uses: набор символов
:type lang_uses: str
:param char_count: сколько символов отдать
:type char_count: int
:param char_size: размер символов
:type char_size: str
:return: str
"""
random_string = ''.join(choices(get_alphabet(lang_uses=lang_uses, char_size=char_size), k=char_count))
return random_string | 1,061 |
def create_cvmfs_persistent_volume_claim(cvmfs_volume):
"""Create CVMFS persistent volume claim."""
from kubernetes.client.rest import ApiException
from reana_commons.k8s.api_client import current_k8s_corev1_api_client
try:
current_k8s_corev1_api_client.create_namespaced_persistent_volume_claim(
"default", render_cvmfs_pvc(cvmfs_volume)
)
except ApiException as e:
if e.status != 409:
raise e | 1,062 |
def SLC_copy(SLC_in, SLC_par_in, SLC_out, SLC_par_out, fcase='-', sc='-', roff='-', nr='-', loff='-', nl='-', swap='-',
header_lines='-', logpath=None, outdir=None, shellscript=None):
"""
| Copy SLC with options for data format conversion, segment extraction, and byte swapping
| Copyright 2015, Gamma Remote Sensing, v5.1 13-Aug-2015 uw/clw
Parameters
----------
SLC_in:
(input) SLC (FCOMPLEX or SCOMPLEX format)
SLC_par_in:
(input) ISP SLC parameter file for input SLC
SLC_out:
(output) selected SLC section (FCOMPLEX or SCOMPLEX format)
SLC_par_out:
(output) ISP SLC parameter file of output SLC
fcase:
data format conversion (enter - for default: output format = input format)
* 1: FCOMPLEX --> FCOMPLEX (default sc = 1.0)
* 2: FCOMPLEX --> SCOMPLEX (default sc = 10000.0)
* 3: SCOMPLEX --> FCOMPLEX (default sc = 0.0001)
* 4: SCOMPLEX --> SCOMPLEX (default sc = 1.0)
sc:
scale factor for input SLC data (enter - for default)
roff:
offset to starting range sample (enter - for default: 0)
nr:
number of range samples (enter - for default: to end of line)
loff:
offset to starting line (enter - for default: 0)
nl:
number of lines to copy (enter - for default: to end of file)
swap:
swap data (enter - for default)
* 0: normal (default)
* 1: swap real/imaginary part of complex data
* 2: swap left/right (near/far range)
header_lines:
number of input file header lines (enter - for default: 0)
* NOTE: CEOS format SLC data have 1 header line
* NOTE: file offset pointer size (bytes): 8
logpath: str or None
a directory to write command logfiles to
outdir: str or None
the directory to execute the command in
shellscript: str or None
a file to write the Gamma commands to in shell format
"""
process(['/usr/local/GAMMA_SOFTWARE-20180703/ISP/bin/SLC_copy', SLC_in, SLC_par_in, SLC_out, SLC_par_out, fcase, sc,
roff, nr, loff, nl, swap, header_lines], logpath=logpath, outdir=outdir, shellscript=shellscript) | 1,063 |
def get_host_ips(version=4, exclude=None):
"""
Gets all IP addresses assigned to this host.
Ignores Loopback Addresses
This function is fail-safe and will return an empty array instead of
raising any exceptions.
:param version: Desired IP address version. Can be 4 or 6. defaults to 4
:param exclude: list of interface name regular expressions to ignore
(ex. ["^lo$","docker0.*"])
:return: List of IPAddress objects.
"""
exclude = exclude or []
ip_addrs = []
# Select Regex for IPv6 or IPv4.
ip_re = IPV4_RE if version is 4 else IPV6_RE
# Call `ip addr`.
try:
ip_addr_output = check_output(["ip", "-%d" % version, "addr"])
except (CalledProcessError, OSError):
print("Call to 'ip addr' Failed")
sys.exit(1)
# Separate interface blocks from ip addr output and iterate.
for iface_block in INTERFACE_SPLIT_RE.findall(ip_addr_output):
# Try to get the interface name from the block
match = IFACE_RE.match(iface_block)
iface = match.group(1)
# Ignore the interface if it is explicitly excluded
if match and not any(re.match(regex, iface) for regex in exclude):
# Iterate through Addresses on interface.
for address in ip_re.findall(iface_block):
# Append non-loopback addresses.
if not IPNetwork(address).ip.is_loopback():
ip_addrs.append(IPAddress(address))
return ip_addrs | 1,064 |
def rate_string(rate, work_unit, computer_prefix=False):
"""Return a human-friendly string representing a rate. 'rate' is given
in 'work_unit's per second. If the rate is less than 0.1 then the inverse
is shown.
Examples:
>>> rate_string(200000, "B", True)
'195KB/s'
>>> rate_string(0.01, "file")
'1m40s/file'
>>> rate_string(1.0 / 24 / 3600, "earthrot")
'1d0h/earthrot'
"""
if rate > 0 and rate < 0.1:
return "%s/%s" % (time_string(1.0 / rate), work_unit)
else:
return "%s/s" % (quantity_string(rate, work_unit, computer_prefix)) | 1,065 |
def get_queue_arn(sqs_client, queue_url: str) -> str:
"""Encapsulates SQS::get_queue_attributes with special attribute QueueArn.
:param sqs_client: The Boto3 AWS SQS client object.
:param queue_url: URL of the queue
:return: The Amazon Resource Name (ARN) of the queue.
"""
try:
response = sqs_client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=["QueueArn"])
queue_arn = response["Attributes"]["QueueArn"]
logger.info("Retrieved queue ARN: '%s' for URL: '%s'.", queue_arn, queue_url)
except ClientError:
logger.exception("Couldn't retrieve ARN for queue URL: %s.", queue_url)
raise
else:
return queue_arn | 1,066 |
def any_flexloggers_running() -> bool:
"""Returns whether any FlexLogger.exe processes are running."""
for proc in psutil.process_iter(["pid", "name"]):
if proc.info["name"].lower() == "flexlogger.exe":
return True
return False | 1,067 |
def _get_non_white_runs(mask):
"""Returns those runs that are delimeted by white cells."""
res = []
in_a_block = False
last_idx = len(mask) - 1
for idx, cell in enumerate(mask):
if cell != WHITE and not in_a_block:
in_a_block = True
start = idx
if cell == WHITE and in_a_block:
in_a_block = False
end = idx - 1
res.append(Block(start, end, length=end - start + 1))
if idx == last_idx and in_a_block:
res.append(Block(start, last_idx, length=last_idx - start + 1))
return res | 1,068 |
def createMask(input=None, static_sig=4.0, group=None, editpars=False, configObj=None, **inputDict):
""" The user can input a list of images if they like to create static masks
as well as optional values for static_sig and inputDict.
The configObj.cfg file will set the defaults and then override them
with the user options.
"""
if input is not None:
inputDict["static_sig"]=static_sig
inputDict["group"]=group
inputDict["updatewcs"]=False
inputDict["input"]=input
else:
print >> sys.stderr, "Please supply an input image\n"
raise ValueError
#this accounts for a user-called init where config is not defined yet
configObj = util.getDefaultConfigObj(__taskname__,configObj,inputDict,loadOnly=(not editpars))
if configObj is None:
return
if not editpars:
run(configObj) | 1,069 |
def cov(x, y, w):
"""Calculates weighted covariance"""
return np.sum(
w
* (x - np.average(x, axis=0, weights=w))
* (y - np.average(y, axis=0, weights=w))
) / np.sum(w) | 1,070 |
def eval_metrics_all(
y: List[np.ndarray],
y_hat: List[np.ndarray]
) -> Dict[str, float]:
"""Calculates combined accuracy, f1, precision, recall and AUC scores for
multiple arrays. The arrays are shorted to the minimum length of the
corresponding partner and stacked on top of each other to calculated the
combined scores.
Arguments:
y (np.ndarray): Ground truth.
y_hat (np.ndarray): Prediction.
Returns:
Dict[str, float]: Returns a dict with all scores.
Example:
>>> y = [np.ones((10, 1)), np.zeros((10, 1))]
>>> y_hat = [np.ones((10, 1)), np.zeros((10, 1))]
>>> eval_metrics_all(y, y_hat)
{'accuracy': 1.0, 'precision': 1.0, 'recall': 1.0, 'f1': 1.0,
'roc_auc': 1.0}
"""
if len(y) != len(y_hat):
raise ValueError('y and y_hat must have the same number elements.')
# allow 1d or 2d arrays with the 2nd dimension of 1
check_ndim(*y, *y_hat, ndim=2, strict=False)
check_size(*y, *y_hat, size=1, axis=1, strict=False)
y = list(map(lambda x: x.reshape(-1), y))
y_hat = list(map(lambda x: x.reshape(-1), y_hat))
# truncate corresponding arrays to the same length
y_, y_hat_ = np.hstack(list(truncate(*zip(y, y_hat))))
return eval_metrics(y_, y_hat_) | 1,071 |
def test_multiple_primary():
"""test prevention of multiple primary keys"""
with pytest.raises(MultiplePrimaryKeysError):
class Test(Model): # pylint: disable=unused-variable
"""bad model with multiple primary keys"""
id = Field(default=0, is_primary=True)
idd = Field(default=0, is_primary=True) | 1,072 |
def cli():
"""
Tools for user manipulation
""" | 1,073 |
def fake_dataset_no_label(path, range1, batch_size=32, shuffle=False):
"""
Create fake dataset with no label
Args:
path (str) : provide the data settings
range1 (tuple) : range of generated images
batch_size (int): number of samples contained in each generated batch
shuffle (bool) : shuffle the data
Returns:
data loader
"""
list_ids = []
labels = {}
for i in range(range1[0], range1[1]):
list_ids.append(path + 'gen_'+str(i)+'.jpg')
labels[path + 'gen_'+str(i)+'.jpg'] = -1
# as per the author's citation, we have transformed the input image
# (resize to 64 * 64, 256 * 256, 224 * 224)
pre_process = [(64, 64), (256, 256), (224, 224)]
mean_normalize = (0.485, 0.456, 0.406)
std_normalize = (0.229, 0.224, 0.225)
transform = A.Compose([
A.Resize(pre_process[0][0], pre_process[0][1]),
A.Resize(pre_process[1][0], pre_process[1][1]),
A.CenterCrop(width=pre_process[2][0], height=pre_process[2][1]),
A.Normalize(mean=mean_normalize, std=std_normalize)
])
loader = data_iterator_celeba(list_ids, labels,
transform=transform, batch_size=batch_size, shuffle=shuffle)
return loader | 1,074 |
def output_influx(customer, show_hourly=False):
"""Print data using influxDB format."""
_params = {"name": "InfluxDB",
"bucket" : "HydroQuebec",
#"batch_size" : 100,
}
db = InfluxDB(_params)
db.write_data_to_db(customer, show_hourly=show_hourly)
print("Sent this to InfluxDB")
output_text(customer, show_hourly=show_hourly) | 1,075 |
def number_of_songs_match(folder, songs):
"""
Checks if the number of music files in folder matches the number of tracks
listed in songs.
Arguments:
- folder: path to folder where music files are found
- songs: list of track numbers
Returns:
True / False
"""
files = [f for f in listdir(folder) if isfile(join(folder, f)) and f.endswith('.mp3')]
if len(files) != len(songs):
return False
return True | 1,076 |
def spaces_to_pluses(q, city, state):
"""
"""
if city and state:
return split_text(q), split_text(city), split_text(state)
else:
return split_text(q), 'Nationwide', ' ' | 1,077 |
def add_date(start, unit, addend):
"""
Find the date so many days/months/years into the future from the given date
"""
start = _parse_date(start)
if unit == 'days':
print(start.replace(days=addend))
elif unit == 'months':
print(start.replace(months=addend))
elif unit == 'years':
print(start.replace(years=addend))
else:
print('ERROR: Do not recognise unit {}'.format(unit))
# End if/else | 1,078 |
def add_parent_path(plevel=1):
"""
Solve "Parent module '' not loaded, cannot perform relative import" Issue
:param plevel:
:return:
"""
from minghu6.etc.path import get_pre_path
import os
path = get_pre_path(__file__, plevel)
os.path.join(path) | 1,079 |
def _check_str_input(var, input_name: str, valid_options: Optional[List[str]] = None) -> str:
"""
_check_str_input
Convenience function to check if an input is a string. If argument valid_options is given, this
function will also check that var is a valid option from the valid_options specified.
Parameters
----------
var
the input variable to check
input_name : str
the name of the variable to include if an error is raised
valid_options: List[str], optional
a list of valid options for var
Returns
-------
str
the input var after lowering ans stripping the string
"""
if not isinstance(var, str):
raise ValueError("Invalid input {0} for {1}. Input {1} must be a string.".format(
var, input_name))
var = var.strip().lower()
if valid_options is not None:
valid_options = [option.strip().lower() for option in valid_options]
if var not in valid_options:
raise ValueError("Invalid input {0} for {1}. Input {1} must be one of the following "
"options: {2}.".format(var, input_name, valid_options))
return var | 1,080 |
def os_compress(filename, ctype, remove_original=False):
""" compress a file to any of the formats:
['.Z', '.gz', '.tar.gz', '.zip']
If the instance is already compressed (to any format), no operation
will be performed. If it is uncompressed:
1) then the file will be compressed
3) if remove_original is set to True, then the original uncompressed
file will be removed (only if the compression process is succeseful)
"""
if not os.path.isfile(filename):
raise RuntimeError(
"[ERROR] compress::os_compress File {:} does not exist".format(
filename))
if ctype is None:
return filename, filename
if not ctype.startswith('.'):
ctype = '.' + ctype
compressed_file = '{:}{:}'.format(filename, ctype)
status = 0
if ctype == '.Z':
try:
subprocess.call(["compress", "-f", "{:}".format(filename)])
except:
status = 1
elif ctype == '.gz':
try:
with open(filename,
'rb') as f_in, gzip.open(compressed_fileg, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
except:
status = 2
elif ctype == '.tar.gz':
try:
with tarfile.open(compressed_fileg, "w:gz") as tarout:
tarout.add(filename, os.path.basename(filename))
except:
status = 3
elif ctype == '.zip':
try:
with zipfile.ZipFile(compressed_fileg, "w") as zipout:
zipout.write(filename, os.path.basename(filename))
except:
status = 4
else:
status = 5
if status > 0 or not os.path.isfile(compressed_file):
msg = "[ERROR] Failed to compress RINEX file {:} (code: {:})".format(
filename, status)
raise RuntimeError(msg)
else:
if remove_original and ctype != '.Z':
os.remove(filename)
return filename, compressed_file | 1,081 |
def run_U_fixed_dynamics(**kwargs):
"""
Run simulation for a given set of parameter values
and generate relevant plots
"""
# Steady state checks
#print('============================== U fixed, U='+str(kwargs['U']))
a = mpde(**kwargs)
#lib.disp_params(a) # display non-array parameters
#t0 = time.time()
a.run()
#t1 = time.time()
#print('*\t Run time',t1-t0)
initial_mass = np.sum(a.sol[0,:])*a.dx
mass_true = lib.mass_fn(a.t,initial_mass,**kwargs)
#lib.disp_norms(a,ground_truth_values)
fig = plt.figure(figsize=(10,5))
ax11 = fig.add_subplot(121)
ax12 = fig.add_subplot(122)
mass_pde = np.sum(a.sol,axis=1)*a.dx
ax11.plot(a.t,mass_true,label='mass true')
ax11.plot(a.t,mass_pde,label='mass pde')
ax12.plot(a.t,np.abs(mass_pde - mass_true),label='|pde-(true)|')
ax11.set_title('mass over time')
ax12.set_title('mass diff')
ax11.set_xlabel('t')
ax12.set_xlabel('t')
ax11.legend()
ax12.legend()
plt.tight_layout()
# include dt
kwargs = {**kwargs, **{'dt':a.dt}}
fname = (DIR_TESTS
+ 'U_fixed_dynamics_'
+ lib.fname_suffix(**kwargs))
plt.savefig(fname)
plt.close()
return np.amax(np.abs(mass_true - mass_pde)) | 1,082 |
def removeDuplicates(bookmarks, newBookmarks):
"""Creates and returns a new list of bookmarks
without any duplicates"""
nodup = []
for bmNew in newBookmarks:
foundDup = False
for bm in bookmarks:
if (bm.linkURL == bmNew.linkURL):
foundDup = True
break
if (not foundDup):
nodup.append(bmNew)
return nodup | 1,083 |
def TNaming_Naming_GetID(*args):
"""
* following code from TDesignStd ==============================
:rtype: Standard_GUID
"""
return _TNaming.TNaming_Naming_GetID(*args) | 1,084 |
def assigned_user_add(request, location_id, destination):
"""
Assigned user add is a POST function where it will ADD a user to a project/task/opportunity/requirement.
:param request:
:param location_id:
:param destination:
:return:
"""
# Load the template
t = loader.get_template('NearBeach/blank.html')
# context
c = {
}
return HttpResponse(t.render(c, request)) | 1,085 |
def test_MeshAdaptRestart_adaptiveTime_BackwardEuler_baseline_withRDMC(verbose=0):
"""Get baseline data"""
currentPath = os.path.dirname(os.path.abspath(__file__))
runCommand = "cd "+currentPath+"; parun -C \"gen_mesh=False usePUMI=True adapt=0 fixedTimeStep=False\" -D \"baseline\" dambreak_Colagrossi_so.py;"
subprocess.call(runCommand,shell=True)
assert(True) | 1,086 |
def _parse_challenge(header):
# type: (str) -> Dict[str, str]
"""Parse challenge header into service and scope"""
ret = {}
if header.startswith(BEARER):
challenge_params = header[len(BEARER) + 1 :]
matches = re.split(AUTHENTICATION_CHALLENGE_PARAMS_PATTERN, challenge_params)
_clean(matches)
ret = {}
for i in range(0, len(matches), 2):
ret[matches[i]] = matches[i + 1]
return ret | 1,087 |
def test_get_batch(source):
""" Creates an input/target pair for evaluation """
seq_len = len(source) - 1
data = source[:seq_len]
target = source[1:1+seq_len].view(-1)
return data, target | 1,088 |
def ShowChance(chance):
"""show how many chances are left"""
print("{:^40s}".format("you have {} more chance".format(chance))) | 1,089 |
def getReceptorResidues(filename=None, data=None):
"""Accepts a PDB(TQ) file and returns a
nested dictionary of:
chain:residue:atoms
"""
if filename:
lines = getLines(filename)
else:
lines = data
structure = {}
for l in lines:
if l.startswith("ATOM") or l.startswith("HETATM"):
res_t=l[17:20].strip()
res_n=l[22:27].strip()
res=res_t+res_n
chain=l[21].strip()
atom=l[12:17].strip()
if not chain in structure:
structure[chain]={}
if not res in structure[chain]:
structure[chain][res] = []
if not atom in structure[chain][res]:
structure[chain][res].append(atom)
return structure | 1,090 |
def extract_first_compute_cell(text):
"""
INPUT: a block of wiki-like marked up text OUTPUT:
- ``meta`` - meta information about the cell (as a
dictionary)
- ``input`` - string, the input text
- ``output`` - string, the output text
- ``end`` - integer, first position after }}} in
text.
"""
# Find the input block
i = text.find('{{{')
if i == -1:
raise EOFError
j = text[i:].find('\n')
if j == -1:
raise EOFError
k = text[i:].find('|')
if k != -1 and k < j:
try:
meta = dictify(text[i+3:i+k])
except TypeError:
meta = {}
i += k + 1
else:
meta = {}
i += 3
j = text[i:].find('\n}}}')
if j == -1:
j = len(text)
else:
j += i
k = text[i:].find('\n///')
if k == -1 or k+i > j:
input = text[i:j]
output = ''
else:
input = text[i:i+k].strip()
output = text[i+k+4:j]
return meta, input.strip(), output, j+4 | 1,091 |
def test_matrix2dict():
""" Test Utils: matrix 2 dict """
m = [[0, 1, 3], [1, 0, 2], [3, 2, 0]]
d = matrix2dict(m)
assert (d == {0: {0: 0, 1: 1, 2:3}, 1: {0: 1, 1: 0, 2:2}, 2: {0: 3, 1:2, 2:0}} ) | 1,092 |
def setup_env_with_project_ini_self_destruct():
"""Set up the environment base structure"""
setup_env = EnvSetUp(p_make_project_ini=True)
yield setup_env
rm_tree(setup_env.anchor_dir, p_crash=False) | 1,093 |
def check_schema(loader_impl: LoaderImpl) -> LoaderImpl:
"""Wrapper method to check column names and types."""
@wraps(loader_impl)
def wrapped_loader(fp: Union[str, IO], extra_fields: Dict[str, str] = None) -> DataFrame:
name = fp if isinstance(fp, str) else fp.name
data = loader_impl(fp, extra_fields)
schema = FILE_SCHEMA if not extra_fields else {**FILE_SCHEMA, **extra_fields}
for column in list(data.columns):
if column not in schema:
log.info(f'From file ({name}): ignoring column \'{column}\'')
data.drop([column], axis=1, inplace=True)
for column, dtype in schema.items():
if column not in data.columns:
raise RuntimeError(f'From file ({name}): missing column \'{column}\'')
else:
try:
data[column] = data[column].astype(dtype)
except (TypeError, ValueError) as error:
raise RuntimeError(f'From file ({name}), column \'{column}\': {error}') from error
return data
return wrapped_loader | 1,094 |
def fail_quality(name, message):
"""
Fail the specified quality check by generating the JUnit XML results file
and raising a ``BuildFailure``.
"""
write_junit_xml(name, message)
raise BuildFailure(message) | 1,095 |
def get_album_photos(album, offset, vk_session):
"""Retrieves list of photos within given album from VK.com
:param album:
:type album: str
:param offset:
:type offset: int or None
:param vk_session: instance of :class:`vk_api.VkApi`
:type vk_session: :class:`vk_api.VkApi`
:return:
"""
def normpath(filename):
keepcharacters = [' ', '.', '_', ',']
return "".join(c for c in filename
if c.isalnum() or c in keepcharacters).rstrip()
items = []
try:
if USER_PHOTOS_ALBUM_ID == album['id']:
response = vk_session.method(
'photos.getUserPhotos',
values={
'user_id': vk_session.token['user_id'],
'count': 1000,
'offset': offset or 0,
'photo_sizes': 1
})
else:
response = vk_session.method(
'photos.get',
values={
'owner_id': vk_session.token['user_id'],
'album_id': album['id'],
'offset': offset or 0,
'photo_sizes': 1
})
except Exception as e:
logging.error(e)
return items
image_types = {
's': 0,
'm': 1,
'x': 2,
'o': 3,
'p': 4,
'q': 5,
'r': 6,
'y': 7,
'z': 8,
'w': 9
}
if 'items' in response:
for item in response['items']:
sizes = item.get('sizes')
if not sizes:
logging.info('Item skipped!')
continue
newlist = sorted(
sizes,
key=lambda x: image_types.get(x.get('type')),
reverse=True)
image = {
'id': item['id'],
'date': datetime.datetime.fromtimestamp(item['date']),
'url': newlist[0].get('url')
}
if item.get('text'):
image['title'] = normpath(item['text'])
items.append(image)
return items | 1,096 |
def get_view_cursor(**kwargs) -> 'XTextViewCursor':
"""
Gets current view cursor which is a XTextViewCursor
Keyword Args:
o_doc (object, optional): current document (xModel)
Returns:
object: View Cursor
"""
o_doc = kwargs.get('o_doc', None)
if o_doc is None:
o_doc = get_xModel()
# https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1text_1_1XTextViewCursor.html
frame: object = o_doc.CurrentController.Frame
current_controler: object = frame.getController() # XController
view_cursor = current_controler.getViewCursor()
return view_cursor | 1,097 |
def get_common_metrics(test_values, predicted):
"""
Return some common classifier metrics
:param test_values: values to test with
:param predicted: predicted values
:return: accuracy, precision and recall value
"""
accuracy = metrics.accuracy_score(test_values, predicted)
precision = metrics.precision_score(test_values, predicted)
recall = metrics.recall_score(test_values, predicted)
return accuracy, precision, recall | 1,098 |
def update_maintenance_window_target(WindowId=None, WindowTargetId=None, Targets=None, OwnerInformation=None, Name=None, Description=None, Replace=None):
"""
Modifies the target of an existing maintenance window. You can change the following:
See also: AWS API Documentation
Exceptions
:example: response = client.update_maintenance_window_target(
WindowId='string',
WindowTargetId='string',
Targets=[
{
'Key': 'string',
'Values': [
'string',
]
},
],
OwnerInformation='string',
Name='string',
Description='string',
Replace=True|False
)
:type WindowId: string
:param WindowId: [REQUIRED]\nThe maintenance window ID with which to modify the target.\n
:type WindowTargetId: string
:param WindowTargetId: [REQUIRED]\nThe target ID to modify.\n
:type Targets: list
:param Targets: The targets to add or replace.\n\n(dict) --An array of search criteria that targets instances using a Key,Value combination that you specify.\nSupported formats include the following.\n\n``Key=InstanceIds,Values=*instance-id-1* ,*instance-id-2* ,*instance-id-3* ``\n``Key=tag:my-tag-key ,Values=*my-tag-value-1* ,*my-tag-value-2* ``\n``Key=tag-key,Values=*my-tag-key-1* ,*my-tag-key-2* ``\n(Maintenance window targets only) ``Key=resource-groups:Name,Values=*resource-group-name* ``\n(Maintenance window targets only) ``Key=resource-groups:ResourceTypeFilters,Values=*resource-type-1* ,*resource-type-2* ``\n\nFor example:\n\nKey=InstanceIds,Values=i-02573cafcfEXAMPLE,i-0471e04240EXAMPLE,i-07782c72faEXAMPLE\nKey=tag:CostCenter,Values=CostCenter1,CostCenter2,CostCenter3\nKey=tag-key,Values=Name,Instance-Type,CostCenter\n(Maintenance window targets only) Key=resource-groups:Name,Values=ProductionResourceGroup This example demonstrates how to target all resources in the resource group ProductionResourceGroup in your maintenance window.\n(Maintenance window targets only) ``Key=resource-groups:ResourceTypeFilters,Values=*AWS::EC2::INSTANCE* ,*AWS::EC2::VPC* `` This example demonstrates how to target only EC2 instances and VPCs in your maintenance window.\n(State Manager association targets only) Key=InstanceIds,Values=* This example demonstrates how to target all managed instances in the AWS Region where the association was created.\n\nFor information about how to send commands that target instances using Key,Value parameters, see Targeting multiple instances in the AWS Systems Manager User Guide .\n\nKey (string) --User-defined criteria for sending commands that target instances that meet the criteria.\n\nValues (list) --User-defined criteria that maps to Key . For example, if you specified tag:ServerRole , you could specify value:WebServer to run a command on instances that include EC2 tags of ServerRole,WebServer .\n\n(string) --\n\n\n\n\n\n
:type OwnerInformation: string
:param OwnerInformation: User-provided value that will be included in any CloudWatch events raised while running tasks for these targets in this maintenance window.
:type Name: string
:param Name: A name for the update.
:type Description: string
:param Description: An optional description for the update.
:type Replace: boolean
:param Replace: If True, then all fields that are required by the RegisterTargetWithMaintenanceWindow action are also required for this API request. Optional fields that are not specified are set to null.
:rtype: dict
ReturnsResponse Syntax
{
'WindowId': 'string',
'WindowTargetId': 'string',
'Targets': [
{
'Key': 'string',
'Values': [
'string',
]
},
],
'OwnerInformation': 'string',
'Name': 'string',
'Description': 'string'
}
Response Structure
(dict) --
WindowId (string) --
The maintenance window ID specified in the update request.
WindowTargetId (string) --
The target ID specified in the update request.
Targets (list) --
The updated targets.
(dict) --
An array of search criteria that targets instances using a Key,Value combination that you specify.
Supported formats include the following.
``Key=InstanceIds,Values=*instance-id-1* ,*instance-id-2* ,*instance-id-3* ``
``Key=tag:my-tag-key ,Values=*my-tag-value-1* ,*my-tag-value-2* ``
``Key=tag-key,Values=*my-tag-key-1* ,*my-tag-key-2* ``
(Maintenance window targets only) ``Key=resource-groups:Name,Values=*resource-group-name* ``
(Maintenance window targets only) ``Key=resource-groups:ResourceTypeFilters,Values=*resource-type-1* ,*resource-type-2* ``
For example:
Key=InstanceIds,Values=i-02573cafcfEXAMPLE,i-0471e04240EXAMPLE,i-07782c72faEXAMPLE
Key=tag:CostCenter,Values=CostCenter1,CostCenter2,CostCenter3
Key=tag-key,Values=Name,Instance-Type,CostCenter
(Maintenance window targets only) Key=resource-groups:Name,Values=ProductionResourceGroup This example demonstrates how to target all resources in the resource group ProductionResourceGroup in your maintenance window.
(Maintenance window targets only) ``Key=resource-groups:ResourceTypeFilters,Values=*AWS::EC2::INSTANCE* ,*AWS::EC2::VPC* `` This example demonstrates how to target only EC2 instances and VPCs in your maintenance window.
(State Manager association targets only) Key=InstanceIds,Values=* This example demonstrates how to target all managed instances in the AWS Region where the association was created.
For information about how to send commands that target instances using Key,Value parameters, see Targeting multiple instances in the AWS Systems Manager User Guide .
Key (string) --
User-defined criteria for sending commands that target instances that meet the criteria.
Values (list) --
User-defined criteria that maps to Key . For example, if you specified tag:ServerRole , you could specify value:WebServer to run a command on instances that include EC2 tags of ServerRole,WebServer .
(string) --
OwnerInformation (string) --
The updated owner.
Name (string) --
The updated name.
Description (string) --
The updated description.
Exceptions
SSM.Client.exceptions.DoesNotExistException
SSM.Client.exceptions.InternalServerError
:return: {
'WindowId': 'string',
'WindowTargetId': 'string',
'Targets': [
{
'Key': 'string',
'Values': [
'string',
]
},
],
'OwnerInformation': 'string',
'Name': 'string',
'Description': 'string'
}
:returns:
WindowId (string) -- [REQUIRED]
The maintenance window ID with which to modify the target.
WindowTargetId (string) -- [REQUIRED]
The target ID to modify.
Targets (list) -- The targets to add or replace.
(dict) --An array of search criteria that targets instances using a Key,Value combination that you specify.
Supported formats include the following.
``Key=InstanceIds,Values=*instance-id-1* ,*instance-id-2* ,*instance-id-3* ``
``Key=tag:my-tag-key ,Values=*my-tag-value-1* ,*my-tag-value-2* ``
``Key=tag-key,Values=*my-tag-key-1* ,*my-tag-key-2* ``
(Maintenance window targets only) ``Key=resource-groups:Name,Values=*resource-group-name* ``
(Maintenance window targets only) ``Key=resource-groups:ResourceTypeFilters,Values=*resource-type-1* ,*resource-type-2* ``
For example:
Key=InstanceIds,Values=i-02573cafcfEXAMPLE,i-0471e04240EXAMPLE,i-07782c72faEXAMPLE
Key=tag:CostCenter,Values=CostCenter1,CostCenter2,CostCenter3
Key=tag-key,Values=Name,Instance-Type,CostCenter
(Maintenance window targets only) Key=resource-groups:Name,Values=ProductionResourceGroup This example demonstrates how to target all resources in the resource group ProductionResourceGroup in your maintenance window.
(Maintenance window targets only) ``Key=resource-groups:ResourceTypeFilters,Values=*AWS::EC2::INSTANCE* ,*AWS::EC2::VPC* `` This example demonstrates how to target only EC2 instances and VPCs in your maintenance window.
(State Manager association targets only) Key=InstanceIds,Values=* This example demonstrates how to target all managed instances in the AWS Region where the association was created.
For information about how to send commands that target instances using Key,Value parameters, see Targeting multiple instances in the AWS Systems Manager User Guide .
Key (string) --User-defined criteria for sending commands that target instances that meet the criteria.
Values (list) --User-defined criteria that maps to Key . For example, if you specified tag:ServerRole , you could specify value:WebServer to run a command on instances that include EC2 tags of ServerRole,WebServer .
(string) --
OwnerInformation (string) -- User-provided value that will be included in any CloudWatch events raised while running tasks for these targets in this maintenance window.
Name (string) -- A name for the update.
Description (string) -- An optional description for the update.
Replace (boolean) -- If True, then all fields that are required by the RegisterTargetWithMaintenanceWindow action are also required for this API request. Optional fields that are not specified are set to null.
"""
pass | 1,099 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.