content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def pick_an_experiment(i):
"""
Input: {
(repo_uoa) - experiment repository name (defaults to hackathon_local_repo, but can be overridden by '*')
(extra_tags) - extra tags to filter
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
repo_uoa = i.get('repo_uoa', hackathon_local_repo)
extra_tags = i.get('extra_tags')
list_exp_adict = { 'action': 'list_experiments',
'module_uoa': work['self_module_uoa'],
'repo_uoa': repo_uoa,
'extra_tags': extra_tags,
}
r=ck.access( list_exp_adict )
if r['return']>0: return r
if len(r['lst'])==0:
return {'return':1, 'error':'No experiments to choose from - please relax your filters'}
all_experiment_names = [ '{repo_uoa}:{module_uoa}:{data_uoa}'.format(**entry_dict) for entry_dict in r['lst']]
number_of_experiments = len(all_experiment_names)
select_adict = {'action': 'select_string',
'module_uoa': 'misc',
'options': all_experiment_names,
'default': str(number_of_experiments-1),
'question': 'Please select the experiment entry',
}
r=ck.access( select_adict )
if r['return']>0:
return r
else:
cid = r['selected_value']
return {'return':0, 'cid': cid} | 800 |
def run(args, image: str) -> str:
"""
Run docker image and mount user-provided folder with C++ files.
Parameters
----------
args : dict-like
User provided arguments parsed by argparse.ArgumentParser instance.
image : str
Name of image from which container is run
Returns
-------
str:
Name of created container. Consist of torchlambda prefix and random string
"""
def _add_components(args):
return (
'"' + ";".join(args.aws_components) + '"'
if args.aws_components
else '"core"'
)
def _compilation(args):
return '"' + args.compilation + '"' if args.compilation else ""
container_name = "torchlambda-" + str(uuid.uuid4())
source_directory = pathlib.Path(args.source).absolute()
if source_directory.is_dir():
command = "docker {} run {} -v {}:/usr/local/user_code --name {} {} {} ".format(
*general.parse_none(
args.docker,
args.docker_run,
source_directory,
container_name,
image,
_add_components(args),
)
)
command += _compilation(args)
general.run(
command,
operation="building inference AWS Lambda package.",
silent=args.silent,
)
return container_name
print("torchlambda:: Provided source files are not directory, exiting.")
exit(1) | 801 |
def get_top_design_list(oProject):
"""
Returns a list of the names of the top-level designs.
Parameters
----------
oProject : pywin32 COMObject
The HFSS project in which the operation will be performed.
designname : str
Name of the design to insert.
Returns
-------
design_list : list of str
The top-level design list.
"""
design_list = list(oProject.GetTopDesignList())
return map(str,design_list) | 802 |
def gere_operation_unaire(op, valeur1):
"""Redirige vers pourcent, racine_carre, cosinus ou sinus."""
pass | 803 |
def test_atomic_language_pattern_3_nistxml_sv_iv_atomic_language_pattern_4_5(mode, save_output, output_format):
"""
Type atomic/language is restricted by facet pattern with value
([a-zA-Z]{2}|[iI]-[a-zA-Z]+|[xX]-[a-zA-Z]{1,8})(-[a-zA-Z]{3})*.
"""
assert_bindings(
schema="nistData/atomic/language/Schema+Instance/NISTSchema-SV-IV-atomic-language-pattern-4.xsd",
instance="nistData/atomic/language/Schema+Instance/NISTXML-SV-IV-atomic-language-pattern-4-5.xml",
class_name="NistschemaSvIvAtomicLanguagePattern4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 804 |
def test_amazon_review_dataset_distribution():
"""
Feature: Test AmazonReviewDataset in distribution.
Description: test in a distributed state.
Expectation: the data is processed successfully.
"""
data = ds.AmazonReviewDataset(FULL_DIR, usage='test', shuffle=False, num_shards=2, shard_id=0)
count = 0
for _ in data.create_dict_iterator(num_epochs=1, output_numpy=True):
count += 1
assert count == 2 | 805 |
def chi01(param_name: Union[str, None], yval: float, **kwargs) -> Dict[str, Any]:
"""Plot defaults for sweep_plotting.chi01"""
kwargs["xlabel"] = kwargs.get("xlabel") or recast_name(param_name)
kwargs["ylabel"] = kwargs.get("ylabel") or r"$\chi_{{01}}$ [{}]".format(
units.get_units()
)
kwargs["title"] = kwargs.get("title") or r"$\chi_{{01}}=${:.4f} {}".format(
yval, units.get_units()
)
return kwargs | 806 |
def cstring(*args, **kwargs):
"""Return a colored string.
Parameters
----------
args : iterable of str
bold : bool
color : str, {'HEADER', 'LIGHTBLUE', 'LIGHTGREEN', 'WARNING', 'FAIL',
'ENDC', 'BOLD', 'UNDERLINE' 'BLACK', 'RED', 'GREEN',
'YELLOW', 'BLUE', 'MAGENTA', 'CYAN', 'WHITE'}
Terminal color to use.
"""
args, kwargs = _colorize(*args, **kwargs)
cstr = " ".join(args)
return cstr | 807 |
def create_app():
"""Construct the core application."""
app = Flask(__name__, instance_relative_config=False)
app.config.from_object('config.Config')
db.init_app(app)
admin.init_app(app)
basic_auth.init_app(app)
with app.app_context():
from . import routes # Import routes
db.create_all() # Create sql tables for our data models
admin.add_view(ArticleView(Articles, db.session))
return app | 808 |
def bytes_index(x: bytes, sub: bytes, start: int, end: int) -> int:
"""Where is the first location of a subsequence within a given slice of a bytes object?
Compiling bytes.index compiles this function, when sub is a bytes object.
This function is only intended to be executed in this compiled form.
Args:
x: The bytes object in which to search.
sub: The subsequence to look for.
start: Beginning of slice of x. Interpreted as slice notation.
end: End of slice of x. Interpreted as slice notation.
Returns:
Lowest index of match within slice of x.
Raises:
ValueError: If sub is not found.
"""
ret = bytes_find(x, sub, start, end)
if ret == -1:
raise ValueError("subsection not found")
return ret | 809 |
def pickup_path(start_path, filename, default=None):
"""pickupping the config file path
start path = "/foo/bar/boo", filename = "config.ini"
finding candidates are ["/foo/bar/boo/config.ini", "/foo/bar/config.ini", "/foo/config.ini", "/config.ini"]
"""
start_point = os.path.normpath(os.path.abspath(start_path))
current = start_point
candidates = []
while True:
candidates.append(os.path.join(current, filename))
if current == "/":
break
current, dropped = os.path.split(current)
for path in candidates:
if os.path.exists(path):
return path
return default | 810 |
def uniform(minimum, maximum, shape=[]):
"""uniform(minimum, maximum, shape=[]) returns array of given shape of random reals
in given range"""
if shape == []:
shape = None
return mt.uniform(minimum, maximum, shape) | 811 |
def unquote_to_bytes(urlencoded_string):
"""Replace %xx escapes by their single-character equivalent,
using the “iso-8859-1” encoding to decode all 8-bit values.
"""
return bytes(
unquote(urlencoded_string, encoding='iso-8859-1'),
encoding='iso-8859-1'
) | 812 |
def plot_history(history):
"""Plots training historical training and validation accuracies and losses
Args:
history (dict): training and validation losses and accuracies history.
{'train': {'loss': [], 'acc': []},
'valid': {'loss': [], 'acc': []}}
"""
fig, ax1 = plt.subplots()
# Correctly number epochs starting from 1
epochs = np.arange(1, len(history['train']['loss']) + 1)
# Plot losses
tl, = ax1.plot(epochs, history['train']['loss'], 'g-', label='Training Loss')
vl, = ax1.plot(epochs, history['valid']['loss'], 'b-', label='Validation Loss')
ax1.set_xlabel('Training Epoch')
ax1.set_ylabel('Loss')
leg1 = ax1.legend(loc='lower right')
# Plot accuracies
ax2 = ax1.twinx()
ta, = ax2.plot(epochs, history['train']['acc'], 'y-')
va, = ax2.plot(epochs, history['valid']['acc'], 'r-')
ax2.set_ylabel('Accuracy')
leg2 = ax1.legend([ta, va], ['Training Accuracy','Validation Accuracy'],
loc='upper right')
ax1.add_artist(leg1)
plt.legend(frameon=False)
plt.show() | 813 |
def qualifiedName(item):
"""Return the full name of an item, including any projects that it's in.
If the item does not have a name, return ``None``.
XXX: Doesn't include folders.
"""
names = []
# Note: assumes that the presence of a single null name in the parent tree
# means that the item is not properly named.
for i in iterParents(item):
name = i.name()
if name is None:
return None
names.append(name)
return " / ".join(reversed(names)) | 814 |
def assign_columns_of_sector_levels(df_load):
"""
Add additional column capturing the sector level in the two columns
:param df_load: df with at least on sector column
:param ambiguous_sector_assignment: if there are sectors that can be
assigned to multiple sector lengths (e.g., for government or household
sectors), option to specify which sector assignment to keep.
:return: df with new column for sector length
"""
df = replace_NoneType_with_empty_cells(df_load)
# load cw with column of sector levels
cw = load_sector_length_cw_melt()
# merge df assigning sector lengths
for s in ['Produced', 'Consumed']:
df = df.merge(cw, how='left', left_on=f'Sector{s}By',
right_on='Sector').drop(columns=['Sector']).rename(
columns={'SectorLength': f'Sector{s}ByLength'})
df[f'Sector{s}ByLength'] = df[f'Sector{s}ByLength'].fillna(0)
# There are cases where non-traditional sectors (non naics) have
# multiple naics assignments. If there is a non-zero value in the other
# sector length column, keep that row because sector lengths must always
# match.
# subset df into two dfs, one where one sector column length has a zero
# value and the second where both sector length columns have non-zero
# values
df1 = df[(df['SectorProducedByLength'] == 0) |
(df['SectorConsumedByLength'] == 0)]
df2 = df[(df['SectorProducedByLength'] != 0) &
(df['SectorConsumedByLength'] != 0)]
# only keep rows where the values are equal
df2e = df2[df2['SectorProducedByLength'] == df2['SectorConsumedByLength']]
# concat dfs
dfc = pd.concat([df1, df2e], ignore_index=True)
# check for duplicates. Rows might be duplicated if a sector is the same
# for multiple sector lengths
duplicate_cols = [e for e in dfc.columns if e not in [
'SectorProducedByLength', 'SectorConsumedByLength']]
duplicate_df = dfc[dfc.duplicated(subset=duplicate_cols,
keep=False)].reset_index(drop=True)
if len(duplicate_df) > 0:
log.warning('There are duplicate rows caused by ambiguous sectors.')
dfc = dfc.sort_values(['SectorProducedByLength',
'SectorConsumedByLength']).reset_index(drop=True)
return dfc | 815 |
def add_matrices(matrix_a, matrix_b):
"""Add two n x n matrices
"""
return [[x + y for x, y in zip(matrix_a[i], matrix_b[i])]
for i in range(len(matrix_a))] | 816 |
def dataset() -> Generator[Dataset, None, None]:
"""Fetches a consistent view of the rows to be serialized.
Serializers may further filter this list down if they wish."""
with transaction.atomic():
ds = Dataset()
# The sheer width of the number of columns being pulled out of
# the database is a significant fractino of the time (~5s/6s)
# to run the query. Limiting the columns to just those that
# are necessary accalerates this significantly -- with the
# caveat that if any /other/ column is requested, it adds O(n)
# additional queries, at significant cost!
ds.locations = (
models.Location.objects.filter(state__abbreviation="CA", soft_deleted=False)
.exclude(
dn_latest_non_skip_report__planned_closure__lt=datetime.date.today()
)
.select_related("dn_latest_non_skip_report__appointment_tag")
.select_related("county")
.select_related("location_type")
.select_related("provider")
.prefetch_related("dn_latest_non_skip_report__availability_tags")
).only(
"public_id",
"name",
"county__name",
"full_address",
"latitude",
"longitude",
"location_type__name",
"vaccinefinder_location_id",
"vaccinespotter_location_id",
"google_places_id",
"county__vaccine_reservations_url",
"dn_latest_non_skip_report__appointment_tag__slug",
"dn_latest_non_skip_report__appointment_details",
"dn_latest_non_skip_report__location_id",
"dn_latest_non_skip_report__created_at",
"dn_latest_non_skip_report__public_notes",
"website",
"provider__name",
"provider__appointments_url",
)
ds.counties = (
models.County.objects.filter(state__abbreviation="CA")
.annotate(
locations_with_reports=Count(
"locations",
filter=Q(locations__dn_latest_non_skip_report_id__isnull=False),
)
)
.annotate(
locations_with_latest_yes=Count(
"locations",
filter=Q(
locations__dn_latest_non_skip_report_id=F(
"locations__dn_latest_yes_report_id"
),
),
)
)
)
ds.providers = (
models.Provider.objects.all()
.select_related("provider_type")
.prefetch_related("phases")
)
yield ds | 817 |
def create_error_handlers(blueprint):
"""Create error handlers on blueprint."""
blueprint.errorhandler(PIDInvalidAction)(create_api_errorhandler(
status=403, message='Invalid action'
))
records_rest_error_handlers(blueprint) | 818 |
def assert_allclose(
actual: int, desired: int, rtol: float, err_msg: Literal["ngroupsfailed"]
):
"""
usage.statsmodels: 1
"""
... | 819 |
def no_pretrain_inner_speech(subject):
"""This function aims at training a model without pretraining by training
only on the inner speech condition of a sigle subject
:return: metric history for every of the n k-folds
:rtype: list of dictonaries
"""
###### DATA
data, events = dp.load_data(subjects=[subject], filter_action=True)
# shuffle data and labels
data, events = sklearn.utils.shuffle(data, events)
# save memory by converting from 64bit to 32bit floats
data = data.astype(np.float32)
# filter out only the inner speech condition
data, events = dp.choose_condition(data, events, 'inner speech')
# select the column containing directions (up, down, left, right)
events = events[:, 1]
# one-hot event data
events = np_utils.to_categorical(events, 4)
# zscore normalize the data
data = scipy.stats.zscore(data, axis=2)
# reshape
data = data.reshape(*data.shape, 1)
print("Data Prepared.")
###### MODEL
gpus = tf.config.list_logical_devices('GPU')
mirrored_strategy = tf.distribute.MirroredStrategy(gpus)
with mirrored_strategy.scope():
# create EEGNet (source: https://github.com/vlawhern/arl-eegmodels)
model = EEGNet(nb_classes=4, Chans=data.shape[1],
Samples=data.shape[2], dropoutRate=DROPOUT,
kernLength=KERNEL_LENGTH, F1=8, D=2, F2=16,
dropoutType='Dropout')
# adam optimizer
optimizer = tf.keras.optimizers.Adam()
# compile model
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
model.build(input_shape=(BATCH_SIZE, *data.shape[1:]))
path = './models/saved_models/no_pretrain_inner_speech'
model.save(path)
del model
###### KFOLD TRAINING
history_accumulator = []
for _ in range(N_CHECKS):
history = kfold_training(data, events, path, BATCH_SIZE, EPOCHS)
history_accumulator += history
print(history_accumulator)
print("Subject", subject, " Mean Accuracy:", np.mean([h['val_accuracy'][-1] for h in history_accumulator]))
return history_accumulator | 820 |
async def _expect_io():
"""Wait until a line of text comes in from the EQ log."""
try:
with eqlog.tap() as t:
await t.next_line()
except asyncio.CancelledError:
pass | 821 |
def main():
"""Entrypoint"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--cpu-arch',
metavar='ARCH',
default=platform.architecture()[0],
choices=('64bit', '32bit'),
help=('Filter build outputs by a target CPU. '
'This is the same as the "arch" key in FILES.cfg. '
'Default (from platform.architecture()): %(default)s'))
args = parser.parse_args()
build_outputs = Path('out/Default')
output = Path('../ungoogled-chromium_{}-{}_windows.zip'.format(get_chromium_version(),
get_release_revision()))
files_generator = buildkit.filescfg.filescfg_generator(
Path('chrome/tools/build/win/FILES.cfg'), build_outputs, args.cpu_arch)
buildkit.filescfg.create_archive(files_generator, tuple(), build_outputs, output) | 822 |
def build_put_big_decimal_negative_decimal_request(**kwargs: Any) -> HttpRequest:
"""Put big decimal value -99999999.99.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:keyword json: The default value is -99999999.99. Note that overriding this default value may
result in unsupported behavior.
:paramtype json: float
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
"""
content_type = kwargs.pop("content_type", None) # type: Optional[str]
json = kwargs.pop("json", -99999999.99) # type: float
accept = "application/json"
# Construct URL
url = "/number/big/decimal/-99999999.99"
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=url, headers=header_parameters, json=json, **kwargs) | 823 |
def handle_record_end():
"""Forward internal bus message to external bus."""
LOG.info("End Recording...")
context = {'client_name': 'mycroft_listener',
'source': 'audio',
'destination': ["skills"]}
bus.emit(Message('recognizer_loop:record_end', context=context)) | 824 |
def make_subplots(
rows=1,
cols=1,
shared_xaxes=False,
shared_yaxes=False,
start_cell="top-left",
print_grid=False,
horizontal_spacing=None,
vertical_spacing=None,
subplot_titles=None,
column_widths=None,
row_heights=None,
specs=None,
insets=None,
column_titles=None,
row_titles=None,
x_title=None,
y_title=None,
figure=None,
**kwargs,
) -> go.Figure:
"""
Return an instance of plotly.graph_objs.Figure with predefined subplots
configured in 'layout'.
Parameters
----------
rows: int (default 1)
Number of rows in the subplot grid. Must be greater than zero.
cols: int (default 1)
Number of columns in the subplot grid. Must be greater than zero.
shared_xaxes: boolean or str (default False)
Assign shared (linked) x-axes for 2D cartesian subplots
- True or 'columns': Share axes among subplots in the same column
- 'rows': Share axes among subplots in the same row
- 'all': Share axes across all subplots in the grid.
shared_yaxes: boolean or str (default False)
Assign shared (linked) y-axes for 2D cartesian subplots
- 'columns': Share axes among subplots in the same column
- True or 'rows': Share axes among subplots in the same row
- 'all': Share axes across all subplots in the grid.
start_cell: 'bottom-left' or 'top-left' (default 'top-left')
Choose the starting cell in the subplot grid used to set the
domains_grid of the subplots.
- 'top-left': Subplots are numbered with (1, 1) in the top
left corner
- 'bottom-left': Subplots are numbererd with (1, 1) in the bottom
left corner
print_grid: boolean (default True):
If True, prints a string representation of the plot grid. Grid may
also be printed using the `Figure.print_grid()` method on the
resulting figure.
horizontal_spacing: float (default 0.2 / cols)
Space between subplot columns in normalized plot coordinates. Must be
a float between 0 and 1.
Applies to all columns (use 'specs' subplot-dependents spacing)
vertical_spacing: float (default 0.3 / rows)
Space between subplot rows in normalized plot coordinates. Must be
a float between 0 and 1.
Applies to all rows (use 'specs' subplot-dependents spacing)
subplot_titles: list of str or None (default None)
Title of each subplot as a list in row-major ordering.
Empty strings ("") can be included in the list if no subplot title
is desired in that space so that the titles are properly indexed.
specs: list of lists of dict or None (default None)
Per subplot specifications of subplot type, row/column spanning, and
spacing.
ex1: specs=[[{}, {}], [{'colspan': 2}, None]]
ex2: specs=[[{'rowspan': 2}, {}], [None, {}]]
- Indices of the outer list correspond to subplot grid rows
starting from the top, if start_cell='top-left',
or bottom, if start_cell='bottom-left'.
The number of rows in 'specs' must be equal to 'rows'.
- Indices of the inner lists correspond to subplot grid columns
starting from the left. The number of columns in 'specs'
must be equal to 'cols'.
- Each item in the 'specs' list corresponds to one subplot
in a subplot grid. (N.B. The subplot grid has exactly 'rows'
times 'cols' cells.)
- Use None for a blank a subplot cell (or to move past a col/row span).
- Note that specs[0][0] has the specs of the 'start_cell' subplot.
- Each item in 'specs' is a dictionary.
The available keys are:
* type (string, default 'xy'): Subplot type. One of
- 'xy': 2D Cartesian subplot type for scatter, bar, etc.
- 'scene': 3D Cartesian subplot for scatter3d, cone, etc.
- 'polar': Polar subplot for scatterpolar, barpolar, etc.
- 'ternary': Ternary subplot for scatterternary
- 'mapbox': Mapbox subplot for scattermapbox
- 'domain': Subplot type for traces that are individually
positioned. pie, parcoords, parcats, etc.
- trace type: A trace type which will be used to determine
the appropriate subplot type for that trace
* secondary_y (bool, default False): If True, create a secondary
y-axis positioned on the right side of the subplot. Only valid
if type='xy'.
* colspan (int, default 1): number of subplot columns
for this subplot to span.
* rowspan (int, default 1): number of subplot rows
for this subplot to span.
* l (float, default 0.0): padding left of cell
* r (float, default 0.0): padding right of cell
* t (float, default 0.0): padding right of cell
* b (float, default 0.0): padding bottom of cell
- Note: Use 'horizontal_spacing' and 'vertical_spacing' to adjust
the spacing in between the subplots.
insets: list of dict or None (default None):
Inset specifications. Insets are subplots that overlay grid subplots
- Each item in 'insets' is a dictionary.
The available keys are:
* cell (tuple, default=(1,1)): (row, col) index of the
subplot cell to overlay inset axes onto.
* type (string, default 'xy'): Subplot type
* l (float, default=0.0): padding left of inset
in fraction of cell width
* w (float or 'to_end', default='to_end') inset width
in fraction of cell width ('to_end': to cell right edge)
* b (float, default=0.0): padding bottom of inset
in fraction of cell height
* h (float or 'to_end', default='to_end') inset height
in fraction of cell height ('to_end': to cell top edge)
column_widths: list of numbers or None (default None)
list of length `cols` of the relative widths of each column of suplots.
Values are normalized internally and used to distribute overall width
of the figure (excluding padding) among the columns.
For backward compatibility, may also be specified using the
`column_width` keyword argument.
row_heights: list of numbers or None (default None)
list of length `rows` of the relative heights of each row of subplots.
If start_cell='top-left' then row heights are applied top to bottom.
Otherwise, if start_cell='bottom-left' then row heights are applied
bottom to top.
For backward compatibility, may also be specified using the
`row_width` kwarg. If specified as `row_width`, then the width values
are applied from bottom to top regardless of the value of start_cell.
This matches the legacy behavior of the `row_width` argument.
column_titles: list of str or None (default None)
list of length `cols` of titles to place above the top subplot in
each column.
row_titles: list of str or None (default None)
list of length `rows` of titles to place on the right side of each
row of subplots. If start_cell='top-left' then row titles are
applied top to bottom. Otherwise, if start_cell='bottom-left' then
row titles are applied bottom to top.
x_title: str or None (default None)
Title to place below the bottom row of subplots,
centered horizontally
y_title: str or None (default None)
Title to place to the left of the left column of subplots,
centered vertically
figure: go.Figure or None (default None)
If None, a new go.Figure instance will be created and its axes will be
populated with those corresponding to the requested subplot geometry and
this new figure will be returned.
If a go.Figure instance, the axes will be added to the
layout of this figure and this figure will be returned. If the figure
already contains axes, they will be overwritten.
Examples
--------
Example 1:
>>> # Stack two subplots vertically, and add a scatter trace to each
>>> from plotly.subplots import make_subplots
>>> import plotly.graph_objects as go
>>> fig = make_subplots(rows=2)
This is the format of your plot grid:
[ (1,1) xaxis1,yaxis1 ]
[ (2,1) xaxis2,yaxis2 ]
>>> fig.add_scatter(y=[2, 1, 3], row=1, col=1) # doctest: +ELLIPSIS
Figure(...)
>>> fig.add_scatter(y=[1, 3, 2], row=2, col=1) # doctest: +ELLIPSIS
Figure(...)
or see Figure.append_trace
Example 2:
>>> # Stack a scatter plot
>>> fig = make_subplots(rows=2, shared_xaxes=True)
This is the format of your plot grid:
[ (1,1) xaxis1,yaxis1 ]
[ (2,1) xaxis2,yaxis2 ]
>>> fig.add_scatter(y=[2, 1, 3], row=1, col=1) # doctest: +ELLIPSIS
Figure(...)
>>> fig.add_scatter(y=[1, 3, 2], row=2, col=1) # doctest: +ELLIPSIS
Figure(...)
Example 3:
>>> # irregular subplot layout (more examples below under 'specs')
>>> fig = make_subplots(rows=2, cols=2,
... specs=[[{}, {}],
... [{'colspan': 2}, None]])
This is the format of your plot grid:
[ (1,1) xaxis1,yaxis1 ] [ (1,2) xaxis2,yaxis2 ]
[ (2,1) xaxis3,yaxis3 - ]
>>> fig.add_trace(go.Scatter(x=[1,2,3], y=[2,1,2]), row=1, col=1) # doctest: +ELLIPSIS
Figure(...)
>>> fig.add_trace(go.Scatter(x=[1,2,3], y=[2,1,2]), row=1, col=2) # doctest: +ELLIPSIS
Figure(...)
>>> fig.add_trace(go.Scatter(x=[1,2,3], y=[2,1,2]), row=2, col=1) # doctest: +ELLIPSIS
Figure(...)
Example 4:
>>> # insets
>>> fig = make_subplots(insets=[{'cell': (1,1), 'l': 0.7, 'b': 0.3}])
This is the format of your plot grid:
[ (1,1) xaxis1,yaxis1 ]
With insets:
[ xaxis2,yaxis2 ] over [ (1,1) xaxis1,yaxis1 ]
>>> fig.add_scatter(x=[1,2,3], y=[2,1,1]) # doctest: +ELLIPSIS
Figure(...)
>>> fig.add_scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2') # doctest: +ELLIPSIS
Figure(...)
Example 5:
>>> # include subplot titles
>>> fig = make_subplots(rows=2, subplot_titles=('Plot 1','Plot 2'))
This is the format of your plot grid:
[ (1,1) x1,y1 ]
[ (2,1) x2,y2 ]
>>> fig.add_scatter(x=[1,2,3], y=[2,1,2], row=1, col=1) # doctest: +ELLIPSIS
Figure(...)
>>> fig.add_bar(x=[1,2,3], y=[2,1,2], row=2, col=1) # doctest: +ELLIPSIS
Figure(...)
Example 6:
Subplot with mixed subplot types
>>> fig = make_subplots(rows=2, cols=2,
... specs=[[{'type': 'xy'}, {'type': 'polar'}],
... [{'type': 'scene'}, {'type': 'ternary'}]])
>>> fig.add_traces(
... [go.Scatter(y=[2, 3, 1]),
... go.Scatterpolar(r=[1, 3, 2], theta=[0, 45, 90]),
... go.Scatter3d(x=[1, 2, 1], y=[2, 3, 1], z=[0, 3, 5]),
... go.Scatterternary(a=[0.1, 0.2, 0.1],
... b=[0.2, 0.3, 0.1],
... c=[0.7, 0.5, 0.8])],
... rows=[1, 1, 2, 2],
... cols=[1, 2, 1, 2]) # doctest: +ELLIPSIS
Figure(...)
"""
return _sub.make_subplots(
rows,
cols,
shared_xaxes,
shared_yaxes,
start_cell,
print_grid,
horizontal_spacing,
vertical_spacing,
subplot_titles,
column_widths,
row_heights,
specs,
insets,
column_titles,
row_titles,
x_title,
y_title,
figure,
**kwargs,
) | 825 |
def op_table(name):
"""Get the symbol `name' as an int8_t[]."""
return gdb.parse_and_eval("&'" + name + "'").cast(T('int8_t').pointer()) | 826 |
def add_pyramid_paths(
spec,
route_name,
request=None,
request_method=None,
operations=None,
autodoc=True,
**kwargs
):
"""
Adds a route and view info to spec
:param spec:
ApiSpec object
:param route_name:
Route name to inspect
:param request:
Request object, if `None` then `get_current_request()` will be used
:param request_method:
Request method predicate
:param operations:
Operations dict that will be used instead of introspection
:param autodoc:
Include information about endpoints without markdown docstring
:param kwargs:
Additional kwargs for predicate matching
:return:
"""
if request is None:
request = get_current_request()
registry = request.registry
introspector = registry.introspector
route = introspector.get("routes", route_name)
introspectables = introspector.related(route)
ignored_view_names = kwargs.pop("ignored_view_names", None)
# needs to be rewritten to internal name
if request_method:
kwargs["request_methods"] = request_method
for maybe_view in introspectables:
# skip excluded views/non-views
if (
not is_view(maybe_view)
or not check_methods_matching(maybe_view, **kwargs)
or should_ignore_view(maybe_view, ignored_views=ignored_view_names)
):
continue
pattern = route["pattern"]
pattern = reformat_pattern(pattern)
if maybe_view.get("match_param"):
# replace route patterns that are specified in the view's match_param argument,
# so that route URLs are unique and accurate
match_params = MatchParamPredicate(maybe_view["match_param"], None)
for key, value in match_params.reqs:
pattern = pattern.replace("{%s}" % key, value)
spec.path(
pattern, operations=get_operations(maybe_view, operations, autodoc=autodoc)
) | 827 |
def nf_masks_to_neurof_dict(binary_masks: np.ndarray, dataset_name: str) -> Dict[str, Any]:
"""
Take as input a tensor of binary mask and produces dict format for neurofinder
Args:
binary_masks: 3d ndarray (components x dimension 1 x dimension 2)
dataset_filename: name of the dataset
Returns:
dset: dict
dataset in neurofinder format to be saved in json
"""
regions = []
for m in binary_masks:
coords = [[int(x), int(y)] for x, y in zip(*np.where(m))]
regions.append({"coordinates": coords})
dset = {"regions": regions, "dataset": dataset_name}
return dset | 828 |
def get_domain_machine_command():
"""Retrieves a collection of Machines that have communicated to or from a given domain address.
Returns:
(str, dict, dict). Human readable, context, raw response
"""
headers = ['ID', 'ComputerDNSName', 'OSPlatform', 'LastIPAddress', 'LastExternalIPAddress', 'HealthStatus',
'RiskScore', 'ExposureLevel']
domain = demisto.args().get('domain')
response = get_domain_machines_request(domain)
machines_list = get_machines_list(response)
human_readable = tableToMarkdown(f'Machines that have communicated with {domain} domain:', machines_list,
headers=headers, removeNull=True)
context_output = {
'Domain': domain,
'Machines': machines_list
}
entry_context = {
'MicrosoftATP.DomainMachine(val.Domain === obj.Domain)': context_output
}
return human_readable, entry_context, response | 829 |
def flatten_dict(source_dict, name_delimiter='_', inner_name=False):
"""
flatten nest dict
Parameters
----------
source_dict : nest dict
name_delimiter : flatten name delimiter(non-use when inner_name is True)
inner_name : False, use innermost name as retrun dict key or not
Returns
-------
flatten dict
Examples
--------
>>> from tidyframe import flatten_dict
>>> nest_dict = {
... 'a': 1,
... 'b': [1, 2],
... 'c': {
... 'cc1': 3,
... 'cc2': 4
... },
... 'd': {
... 'd1': 5,
... 'd2': {
... 'dd1': 6,
... 'dd2': 7
... }
... }
... }
>>> flatten_dict(nest_dict)
{'a': 1, 'b': [1, 2], 'c_cc1': 3, 'c_cc2': 4, 'd_d1': 5, 'd_d2_dd1': 6, 'd_d2_dd2': 7}
>>> flatten_dict(nest_dict, inner_name=True)
{'a': 1, 'b': [1, 2], 'cc1': 3, 'cc2': 4, 'd1': 5, 'dd1': 6, 'dd2': 7}
"""
assert isinstance(source_dict, dict), "import source_dict is not dict"
json_name = {}
for key in source_dict.keys():
if isinstance(get_in(source_dict, [key]), dict):
val = [True, [key]]
json_name.update({key: val})
else:
val = [False, [key]]
json_name.update({key: val})
while True:
key_inner = list(filter(lambda x: json_name.get(x)[0], json_name))
if key_inner:
for x in key_inner:
dict_to_update_json_name = {}
val = json_name.get(x)[1]
for key in get_in(source_dict, val).keys():
val_in = copy(val)
val_in.append(key)
if isinstance(get_in(source_dict, val_in), dict):
dict_to_update = {
reduce(lambda x, y: x + name_delimiter + y, val_in):
[True, val_in]
}
else:
dict_to_update = {
reduce(lambda x, y: x + name_delimiter + y, val_in):
[False, val_in]
}
dict_to_update_json_name.update(dict_to_update)
json_name.update(dict_to_update_json_name)
json_name.pop(x)
else:
break
if inner_name:
return {
json_name.get(x)[1][-1]: get_in(source_dict,
json_name.get(x)[1])
for x in json_name.keys()
}
else:
return {
x: get_in(source_dict,
json_name.get(x)[1])
for x in json_name.keys()
} | 830 |
def matrix_multiply(A, B):
""" Multiply two matrices A and B.
:param A: the right matrix
:param B: the left matrix
:return: A * B
"""
# define m and n for the matrix as well as l, the connecting dimension between A and B
m, l, n = len(A), len(A[0]), len(B[0])
# initialize an all zeros matrix
C = [[0.0 for _ in range(len(B[0]))] for _ in range(len(A))]
# iterative over the rows of C
for i in range(m):
# iterative over the columns of C
for j in range(n):
# set C[i][j] to the dot product of ith row of A and the jth column of B
C[i][j] = sum(A[i][k] * B[k][j] for k in range(l))
# return the matrix C = A @ B
return C | 831 |
def create_normalized_frequency_files(scenario, character_set, letter_files, bigram_files):
"""
Filters the given frequency files with respect to the given character set and normalizes the frequencies such
that they sum up to 1.
Takes care of combined characters and distributing the frequencies accordingly. Then writes them to a file for later use.
Both contain the frequencies of both, letters and characters (+ fixed characters),
as well as letter-character and character-character pairs
The letters and characters that have no frequency available, get zero frequency.
At the end writes the frequencies to the corresponding file in the input folder.
Parameters:
scenario: the scenario to obtain the fixed characters
char_set: the name of the character set to filter for
letter_files: a list of file names that should be filtered for letter frequencies
bigram_files: a list of file names that should be filtered for bigram frequencies
"""
if scenario != "" and character_set != "":
set_scenario_files(scenario, character_set)
# character, fixed character, letters
all_chars = get_characters() + get_letters() + list(get_fixed_mapping().keys())
# characters that should be replaced by others in the frequency table because they represent the same character
# according to our understanding
replace = get_replace_characters()
# 1. read the frequencies from the corresponding files as they are
for letter_file in letter_files:
name_addition = "_"+letter_file.split("_")[-1][:-4]
p_single_all = pd.read_csv(letter_file, sep=" ", encoding="utf-8", index_col=0, quoting=3)
p_single_all = p_single_all.dropna()
p_single_all = p_single_all.to_dict()[u'frequency']
p_single = {c: 0 for c in all_chars}
# -------------- SINGLE ------------------------
# Go through all symbols in the given frequency list.
# If the symbol is in the character list, add its frequency to the one in the probability list. If not check if it is a
# composed character. If yes and the diacritic mark is in the character list, add its frequency to the frequency
# of the corresponding characters (character + diacritic mark)
for c, v in p_single_all.items():
if c in replace:
c = replace[c]
c = correct_diacritic(c) # in case this is a wrong form of a "d" annotated diacritic
if c in p_single.keys():
p_single[c] += v
else:
c_dec = decompose(c)
if len(c_dec) > 1:
# it's a composed character, take it apart and add the frequency to the frequency of its components
if c_dec[1] in unicode_diacritic.keys():
diacritic = unicode_diacritic[c_dec[1]]
if diacritic in p_single.keys():
p_single[diacritic] += v
letter = c_dec[0]
if letter in p_single.keys():
p_single[letter] += v
elif c in diacritic_unicode.keys():
# it's a single form of the diacritic mark, e.g. ~. Add frequency to diacritic and to space but only if
# it's not available in the characterset
if not c in all_chars:
diacritic = unicode_diacritic[diacritic_unicode[c]]
if diacritic in all_chars:
p_single[diacritic] += v
p_single["space"] += v
# else it's a character we don't care about
# Check if any of the values remained 0:
for c, v in p_single.items():
if v == 0:
print(u"No frequency for %s" % c)
# Normalize again
s = np.sum(list(p_single.values()))
p_single_normalized = {c: v / float(s) for c, v in p_single.items()}
# Write LETTERS to file
f = codecs.open("input/frequencies/frequency_letters" + name_addition + "_" + character_set + ".txt", 'w',
encoding="utf-8")
f.write("letter frequency\n")
for c, v in p_single_normalized.items():
f.write("%s %s" % (c, repr(v)))
f.write("\n")
f.close()
# -------------- BIGRAM ------------------------
# 3. go through bigrams and correct them according to the given characters,
# that is letter pairs with accented characters need to be distributed to other letter pairs accoridng to the
# keypresses that needed to be made.
for bigram_file in bigram_files:
name_addition = "_"+bigram_file.split("_")[-1][:-4]
p_bigrams_all = _read_tuple_list_to_dict(bigram_file)
p_bigrams = {(c1, c2): 0 for c1 in all_chars for c2 in all_chars}
for (c1, c2), v in p_bigrams_all.items():
c1 = correct_diacritic(c1)
c2 = correct_diacritic(c2)
if c1 in replace:
c1 = replace[c1]
if c2 in replace:
c2 = replace[c2]
# decompose first character
c_c1 = []
if len(c1) == 0:
print(c1, c2, v)
if c1 in all_chars:
# to-be-mapped character
c_c1 = [c1]
elif c1[0] in diacritic_unicode.keys():
# single form of the diacritic mark, e.g. ~, which needs to be produced by the bigram ~ + space
diacritic = unicode_diacritic[diacritic_unicode[c1[0]]] # get the right diacritic
if diacritic in all_chars:
c_c1 = [diacritic, "space"]
else:
c1_d = decompose(c1)
if len(c1_d) > 1:
# composed character, otherwise its a character we don't care about
c1_1 = get_unicode_diacritic(c1_d[1]) # decompose give diacritic and letter in wrong order
c1_2 = get_unicode_diacritic(c1_d[0])
if c1_1 in all_chars and c1_2 in all_chars:
c_c1 = [c1_1, c1_2]
# decompose second character
c_c2 = []
if c2 in all_chars:
# to-be-mapped character
c_c2 = [c2]
elif c2[0] in diacritic_unicode.keys():
# single form of the diacritic mark, e.g. ~, which needs to be produced by the bigram ~ + space
diacritic = unicode_diacritic[diacritic_unicode[c2[0]]] # get the right diacritic
if diacritic in all_chars:
c_c2 = [diacritic, "space"]
else:
c2_d = decompose(c2)
if len(c2_d) > 1:
# composed character, otherwise its a character we don't care about
c2_1 = get_unicode_diacritic(c2_d[1]) # decompose give diacritic and letter in wrong order
c2_2 = get_unicode_diacritic(c2_d[0])
if c2_1 in all_chars and c2_2 in all_chars:
c_c2 = [c2_1, c2_2]
# now add the frequency to the corresponding bigrams
# c_c1 and c_c2 can contain 0, 1, or 2 letters. If 0 ignore the bigram.
if len(c_c1) > 0 and len(c_c2) > 0:
# add bigram for decomposed c1
if len(c_c1) == 2:
p_bigrams[(c_c1[0], c_c1[1])] += v
# add bigram for decomposed c2
if len(c_c2) == 2:
p_bigrams[(c_c2[0], c_c2[1])] += v
# add bigram for transition from c1 to c2
p_bigrams[(c_c1[-1], c_c2[0])] += v
# normalize
s = np.sum(list(p_bigrams.values()))
p_bigrams_normalized = {(c1, c2): v / float(s) for (c1, c2), v in p_bigrams.items()}
# Write BIGRAMS to file
f = codecs.open("input/frequencies/frequency_bigrams" + name_addition + "_" + character_set + ".txt", 'w',
encoding="utf-8")
for (c1, c2), v in p_bigrams_normalized.items():
f.write("%s %s %s" % (c1, c2, repr(float(v))))
f.write("\n")
f.close() | 832 |
def hard_light(image1, image2):
"""
Superimposes two videos on top of each other using the Hard Light algorithm
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_hard_light(image2.im)) | 833 |
def carveThumbs(offset, length, thumbfile, thumbname, width, height, export):
"""
:param offset: Offset in thumbnails.data for thumbnail
:param length: Lenght of data to carve for thumbnail in thumbnails.data
:param thumbfile: Source thumbnails.data file to carve from
:param thumbname: Name of the file that has the thumbnail
:param export: Either output directory in single plugin mode or mac_info object
:return: Nothing
"""
if length is not None:
# Parse via mac_info
if type(export) is not str:
handle = openDeadbox(thumbfile, export)
# Parse via single plugin
else:
handle = openSingle(thumbfile)
# Seek and read thumbnails.data from offsets and lengths found in the index.sqlite
handle.seek(offset)
thumb = handle.read(length)
handle.close()
# Use the Pillow Library Image to parse and export files as images
imgSize = (width, height)
img = Image.frombytes('RGBA', imgSize, thumb, decoder_name='raw')
# Parse via mac_info
if type(export) is not str:
export_folder = os.path.join(export.output_params.export_path, __Plugin_Name, "Thumbnails")
# Parse via single plugin
else:
export_folder = os.path.join(export, __Plugin_Name, "Thumbnails")
# Create output directory if doesn't exist
if not os.path.exists(export_folder):
os.makedirs(export_folder)
# Set up output file with png extension attached
export_file = os.path.join(export_folder, thumbname + " - " + str(width) + "x" + str(height) + ".png")
log.debug("Attempting to copy out thumbnail to file: " + export_file)
img.save(export_file) | 834 |
def show(*actors, **options):
"""
Create on the fly an instance of class ``Plotter`` and show the object(s) provided.
Allowed input objects types are:
``str``, ``Mesh``, ``Volume``, ``Picture``, ``Assembly``
``vtkPolyData``, ``vtkActor``, ``vtkActor2D``, ``vtkImageActor``,
``vtkAssembly`` or ``vtkVolume``.
If filename is given, its type is guessed based on its extension.
Supported formats are:
`vtu, vts, vtp, ply, obj, stl, 3ds, xml, neutral, gmsh, pcd, xyz, txt, byu,
tif, slc, vti, mhd, png, jpg`.
:param int at: number of the renderer to plot to, if more than one exists
:param list shape: Number of sub-render windows inside of the main window.
Specify two across with ``shape=(2, 1)`` and a two by two grid
with ``shape=(2, 2)``. By default there is only one renderer.
Can also accept a shape as string descriptor. E.g.:
- shape="3|1" means 3 plots on the left and 1 on the right,
- shape="4/2" means 4 plots on top of 2 at bottom.
:param int axes: set the type of axes to be shown
- 0, no axes
- 1, draw three gray grid walls
- 2, show cartesian axes from (0,0,0)
- 3, show positive range of cartesian axes from (0,0,0)
- 4, show a triad at bottom left
- 5, show a cube at bottom left
- 6, mark the corners of the bounding box
- 7, draw a 3D ruler at each side of the cartesian axes
- 8, show the ``vtkCubeAxesActor`` object
- 9, show the bounding box outLine
- 10, show three circles representing the maximum bounding box
- 11, show a large grid on the x-y plane
- 12, show polar axes
- 13, draw a simple ruler at the bottom of the window
Axis type-1 can be fully customized by passing a dictionary ``axes=dict()`` where:
Check ``addons.Axes()`` for the full list of options.
:param float azimuth/elevation/roll: move camera accordingly
:param str viewup: either ['x', 'y', 'z'] or a vector to set vertical direction
:param bool resetcam: re-adjust camera position to fit objects
:param dict camera: Camera parameters can further be specified with a dictionary
assigned to the ``camera`` keyword (E.g. `show(camera={'pos':(1,2,3), 'thickness':1000,})`)
- pos, `(list)`, the position of the camera in world coordinates
- focalPoint `(list)`, the focal point of the camera in world coordinates
- viewup `(list)`, the view up direction for the camera
- distance `(float)`, set the focal point to the specified distance from the camera position.
- clippingRange `(float)`, distance of the near and far clipping planes along the direction
of projection.
- parallelScale `(float)`,
scaling used for a parallel projection, i.e. the height of the viewport
in world-coordinate distances. The default is 1. Note that the "scale" parameter works as
an "inverse scale", larger numbers produce smaller images.
This method has no effect in perspective projection mode.
- thickness `(float)`,
set the distance between clipping planes. This method adjusts the far clipping
plane to be set a distance 'thickness' beyond the near clipping plane.
- viewAngle `(float)`,
the camera view angle, which is the angular height of the camera view
measured in degrees. The default angle is 30 degrees.
This method has no effect in parallel projection mode.
The formula for setting the angle up for perfect perspective viewing is:
angle = 2*atan((h/2)/d) where h is the height of the RenderWindow
(measured by holding a ruler up to your screen) and d is the distance
from your eyes to the screen.
:param bool interactive: pause and interact with window (True)
or continue execution (False)
:param float rate: maximum rate of `show()` in Hertz
:param int interactorStyle: set the type of interaction
- 0 = TrackballCamera [default]
- 1 = TrackballActor
- 2 = JoystickCamera
- 3 = JoystickActor
- 4 = Flight
- 5 = RubberBand2D
- 6 = RubberBand3D
- 7 = RubberBandZoom
- 8 = Context
- 9 = 3D
- 10 = Terrain
- 11 = Unicam
:param bool q: force program to quit after `show()` command returns.
:param bool new: if set to `True`, a call to ``show`` will instantiate
a new ``Plotter`` object (a new window) instead of reusing the first created.
:return: the current ``Plotter`` class instance.
.. note:: With multiple renderers, keyword ``at`` can become a `list`, e.g.
.. code-block:: python
from vedo import *
s = Sphere()
c = Cube()
p = Paraboloid()
show(s, c, at=[0, 1], shape=(3,1))
show(p, at=2, interactive=True)
#
# is equivalent to:
plt = Plotter(shape=(3,1))
s = Sphere()
c = Cube()
p = Paraboloid()
plt.show(s, at=0)
plt.show(p, at=1)
plt.show(c, at=2, interactive=True)
"""
at = options.pop("at", None)
shape = options.pop("shape", (1, 1))
N = options.pop("N", None)
pos = options.pop("pos", (0, 0))
size = options.pop("size", "auto")
screensize = options.pop("screensize", "auto")
title = options.pop("title", "")
bg = options.pop("bg", "white")
bg2 = options.pop("bg2", None)
axes = options.pop("axes", settings.defaultAxesType)
interactive = options.pop("interactive", None)
offscreen = options.pop("offscreen", False)
sharecam = options.pop("sharecam", True)
resetcam = options.pop("resetcam", True)
zoom = options.pop("zoom", None)
viewup = options.pop("viewup", "")
azimuth = options.pop("azimuth", 0)
elevation = options.pop("elevation", 0)
roll = options.pop("roll", 0)
camera = options.pop("camera", None)
interactorStyle = options.pop("interactorStyle", 0)
q = options.pop("q", False)
newPlotter = options.pop("new", False)
if len(options):
for op in options:
printc("Warning: unknown keyword in show():", op, c='y')
if len(actors) == 0:
actors = None
elif len(actors) == 1:
actors = actors[0]
else:
actors = utils.flatten(actors)
if settings.plotter_instance and not newPlotter: # Plotter exists
plt = settings.plotter_instance
else: # Plotter must be created
if utils.isSequence(at): # user passed a sequence for "at"
if not utils.isSequence(actors):
printc("show() Error: input must be a list.", c='r')
raise RuntimeError()
if len(at) != len(actors):
printc("show() Error: lists 'input' and 'at', must have equal lengths.", c='r')
raise RuntimeError()
if len(at) > 1 and (shape == (1, 1) and N is None):
N = max(at) + 1
elif at is None and (N or shape != (1, 1)):
if not utils.isSequence(actors):
printc('show() Error: N or shape is set, but input is not a sequence.', c='r')
printc(' you may need to specify e.g. at=0', c='r')
raise RuntimeError()
at = list(range(len(actors)))
plt = Plotter(
shape=shape,
N=N,
pos=pos,
size=size,
screensize=screensize,
title=title,
axes=axes,
sharecam=sharecam,
resetcam=resetcam,
interactive=interactive,
offscreen=offscreen,
bg=bg,
bg2=bg2,
)
# use _plt_to_return because plt.show() can return a k3d/panel plot
_plt_to_return = None
if utils.isSequence(at):
for i, a in enumerate(actors):
_plt_to_return = plt.show(
a,
at=i,
zoom=zoom,
resetcam=resetcam,
viewup=viewup,
azimuth=azimuth,
elevation=elevation,
roll=roll,
camera=camera,
interactive=False,
interactorStyle=interactorStyle,
bg=bg,
bg2=bg2,
axes=axes,
q=q,
)
plt.interactive = interactive
if interactive or len(at)==N \
or (isinstance(shape[0],int) and len(at)==shape[0]*shape[1]):
# note that shape can be a string
if not offscreen:
plt.interactor.Start()
else:
_plt_to_return = plt.show(
actors,
at=at,
zoom=zoom,
resetcam=resetcam,
viewup=viewup,
azimuth=azimuth,
elevation=elevation,
roll=roll,
camera=camera,
interactive=interactive,
interactorStyle=interactorStyle,
bg=bg,
bg2=bg2,
axes=axes,
q=q,
)
return _plt_to_return | 835 |
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=3 k=0"],
):
"""
usage.scipy: 1
"""
... | 836 |
def compute_on_cpu():
"""Compute all run configurations over a sigle CPU."""
sweep_config = OmegaConf.load("tools/benchmarking/benchmark_params.yaml")
for run_config in get_run_config(sweep_config.grid_search):
model_metrics = sweep(run_config, 0, sweep_config.seed)
write_metrics(model_metrics, sweep_config.writer) | 837 |
def convert_book(book):
"""
Attempt to convert any books of type in `CONVERTIBLE_MIMETYPES` to .mobi,
in the same folder as the given temporary path.
"""
tmp_path = book.get_tmp_pathname(u'send_books')
mobi_tmp_path = convert_to_mobi_path(tmp_path)
if mobi_tmp_path is None:
return None
log.info(u"Converting book for user id {0}".format(book.user_id))
try:
subprocess.check_output(['ebook-convert', tmp_path, mobi_tmp_path],
timeout=CONVERSION_TIMEOUT)
except subprocess.CalledProcessError as e:
return e.output
except subprocess.TimeoutExpired as e:
return "Timed out converting book"
except Exception as e:
return e.message | 838 |
def set_loggers_level(names, level=logging.CRITICAL):
"""
Set a log level on multiple loggers.
Arguments:
names (list): A list of logger names to set level.
Keyword Arguments:
level (integer): Logging level to set on all given logger names. Default to
value from ``logging.CRITICAL``.
"""
for item in names:
logging.getLogger(item).setLevel(level) | 839 |
def get_followup_question_list(intent: str) -> List[str]:
"""
Get all imported followup questions for this intent as a list
* `intent`: name-parameter of the yml-section with which the followup questions were imported
**Returns:** None if no followup questions are known for this intent, otherwise list of followup questions for this intent
"""
return None if not qa.get(intent) else qa.get(intent).followup_questions | 840 |
def event_train_test_split(
evs: np.ndarray, n_evs: int, train_split: float, random_seed: int=1
) -> Tuple[np.ndarray, np.ndarray]:
"""[summary]
Args:
n_evs (int): [description]
train_split (float): [description]
random_seed (int, optional): [description]. Defaults to 1.
Returns:
Tuple[np.ndarray, np.ndarray]: [description]
"""
# some basic checks
assert 0 < train_split < 1, "Variable train_split (ts) must be 0<ts<1."
assert n_evs > 1, "Need more than 1 event to split."
# set the random state locally
r = np.random.RandomState(random_seed)
# compute the number of test and train samples
train_samples = int(np.round(train_split * n_evs, 0))
test_samples = int(n_evs - train_samples)
# split the events
train_events = r.choice(evs, train_samples, replace=False)
test_events = evs[~np.isin(evs, train_events)]
# make sure they add up to the total number!
assert len(train_events) + len(test_events) == n_evs
return train_events, test_events | 841 |
def get_cd(wcs, n=1):
"""
Get the value of the change in world coordinate per pixel across a linear axis.
Defaults to wcs.wcs.cd if present. Does not support rotated headers (e.g.,
with nonzero CDm_n where m!=n)
"""
if hasattr(wcs.wcs,'cd'):
if wcs.wcs.cd[n-1,n-1] != 0:
return wcs.wcs.cd[n-1,n-1]
else:
return wcs.wcs.get_cdelt()[n-1] | 842 |
def mock_clear():
"""Clear MOCK_DATA_HEAP"""
MOCK_DATA_HEAP.clear()
return "" | 843 |
def cls_merge_type(classification):
""" classification type이 2가지일 때 합쳐주는 함수
Parameters
----------
classification: cls
classification 리스트
Returns
-------
list of cls
변환된 classification 리스트
"""
cls_type = {'instant' if cls.get('instant_datetime') else 'not_instant' for cls in classification }
if len(cls_type) == 2:
for cls in classification:
instant_datetime = cls.get('instant_datetime')
if instant_datetime:
year = instant_datetime.year
start_datetime = datetime.datetime(year, 1, 1) # 해당년도 1월 1일로 설정
end_datetime = instant_datetime
cls['instant_datetime'] = None
cls['start_datetime'] = start_datetime
cls['end_datetime'] = end_datetime
return classification | 844 |
def complete_source_space_info(this):
"""Add more info on surface
"""
# Main triangulation
print '\tCompleting triangulation info...'
this['tri_area'] = np.zeros(this['ntri'])
r1 = this['rr'][this['tris'][:, 0], :]
r2 = this['rr'][this['tris'][:, 1], :]
r3 = this['rr'][this['tris'][:, 2], :]
this['tri_cent'] = (r1 + r2 + r3) / 3.0
this['tri_nn'] = np.cross((r2-r1), (r3-r1))
for p in range(this['ntri']): # XXX : can do better
size = sqrt(np.sum(this['tri_nn'][p,:] * this['tri_nn'][p,:]))
this['tri_area'][p] = size / 2.0
this['tri_nn'][p,:] = this['tri_nn'][p,:] / size
print '[done]'
# Selected triangles
print '\tCompleting selection triangulation info...'
if this['nuse_tri'] > 0:
r1 = this['rr'][this['use_tris'][:, 0],:]
r2 = this['rr'][this['use_tris'][:, 1],:]
r3 = this['rr'][this['use_tris'][:, 2],:]
this['use_tri_cent'] = (r1 + r2 + r3) / 3.0
this['use_tri_nn'] = np.cross((r2-r1), (r3-r1))
for p in range(this['nuse_tri']): # XXX can do better
this['use_tri_area'][p] = sqrt(np.sum(this['use_tri_nn'][p,:]
* this['use_tri_nn'][p,:])) / 2.0
print '[done]' | 845 |
def duplicate_detector(gate_orders: list[tuple[str]]) -> int:
"""Detects any schematics that have an identical combination of gates."""
difference = len(gate_orders) - len(list(set(gate_orders))) # List - list with no duplicates
return difference | 846 |
def permutations(x):
"""Return all permutations of x"""
def fn(i):
if i == len(x): ans.append(x.copy())
for k in range(i, len(x)):
x[i], x[k] = x[k], x[i]
fn(i+1)
x[i], x[k] = x[k], x[i]
ans = []
fn(0)
return ans | 847 |
def _checkFile(path,suffix=''):
"""
检查该路径是否存在,同时检查路径后缀是否等于suffix
:param path: 文件的路径
:suffix: 文件的后缀,默认为空。当path路径后缀不为suffix的后缀的时候,会报错
"""
if path==None:
raise FileNotFoundError('path路径为空,请设置查找路径!!!')
path = str(path)
if path == "":
raise FileNotFoundError('path路径为空,请设置查找路径!!!')
if not os.path.exists(path):
raise FileNotFoundError('文件:'+path+'。不存在该文件,请检查路径正确。')
if suffix!='' and not path.endswith(suffix):
raise TypeError('数据类型错误,要求类型为:'+suffix+'。但是文件:'+path+"不是该类型") | 848 |
def _add_cobertura_package(packages, package_name, package_data): # type: (SubElement, str, t.Dict[str, t.Dict[str, int]]) -> t.Tuple[int, int]
"""Add a package element to the given packages element."""
elem_package = SubElement(packages, 'package')
elem_classes = SubElement(elem_package, 'classes')
total_lines_hit = 0
total_line_count = 0
for path, results in package_data.items():
lines_hit = len([True for hits in results.values() if hits])
line_count = len(results)
total_lines_hit += lines_hit
total_line_count += line_count
elem_class = SubElement(elem_classes, 'class')
class_name = os.path.splitext(os.path.basename(path))[0]
if class_name.startswith("Ansible.ModuleUtils"):
class_name = class_name[20:]
content_root = data_context().content.root
filename = path
if filename.startswith(content_root):
filename = filename[len(content_root) + 1:]
elem_class.attrib.update({
'branch-rate': '0',
'complexity': '0',
'filename': filename,
'line-rate': str(round(lines_hit / line_count, 4)) if line_count else "0",
'name': class_name,
})
SubElement(elem_class, 'methods')
elem_lines = SubElement(elem_class, 'lines')
for number, hits in results.items():
elem_line = SubElement(elem_lines, 'line')
elem_line.attrib.update(
hits=str(hits),
number=str(number),
)
elem_package.attrib.update({
'branch-rate': '0',
'complexity': '0',
'line-rate': str(round(total_lines_hit / total_line_count, 4)) if total_line_count else "0",
'name': package_name,
})
return total_lines_hit, total_line_count | 849 |
def encode(model, text, out_file=None, topic_priors=None, prior_weight=1.0):
"""
Perform text-to-image encoding.
Parameters
----------
model : :obj:`gclda.model.Model`
Model object needed for decoding.
text : :obj:`str` or :obj:`list`
Text to encode into an image.
out_file : :obj:`str`, optional
If not None, writes the encoded image to a file.
topic_priors : :obj:`numpy.ndarray` of :obj:`float`, optional
A 1d array of size (n_topics) with values for topic weighting.
If None, no weighting is done. Default is None.
prior_weight : :obj:`float`, optional
The weight by which the prior will affect the encoding.
Default is 1.
Returns
-------
img : :obj:`nibabel.Nifti1Image`
The encoded image.
topic_weights : :obj:`numpy.ndarray` of :obj:`float`
The weights of the topics used in encoding.
Notes
-----
====================== ==============================================================
Notation Meaning
====================== ==============================================================
:math:`v` Voxel
:math:`t` Topic
:math:`w` Word type
:math:`h` Input text
:math:`p(v|t)` Probability of topic given voxel (``p_topic_g_voxel``)
:math:`\\tau_{t}` Topic weight vector (``topic_weights``)
:math:`p(w|t)` Probability of word type given topic (``p_word_g_topic``)
:math:`\omega` 1d array from input image (``input_values``)
====================== ==============================================================
1. Compute :math:`p(v|t)`
(``p_voxel_g_topic``).
- From :obj:`gclda.model.Model.get_spatial_probs()`
2. Compute :math:`p(t|w)`
(``p_topic_g_word``).
3. Vectorize input text according to model vocabulary.
4. Reduce :math:`p(t|w)` to only include word types in input text.
5. Compute :math:`p(t|h)` (``p_topic_g_text``) by multiplying :math:`p(t|w)`
by word counts for input text.
6. Sum topic weights (:math:`\\tau_{t}`) across
words.
- :math:`\\tau_{t} = \sum_{i}{p(t|h_{i})}`
7. Compute voxel
weights.
- :math:`p(v|h) \propto p(v|t) \cdot \\tau_{t}`
8. The resulting array (``voxel_weights``) reflects arbitrarily scaled
voxel weights for the input text.
9. Unmask and reshape ``voxel_weights`` into brain image.
"""
if isinstance(text, list):
text = " ".join(text)
# Assume that words in word_labels are underscore-separated.
# Convert to space-separation for vectorization of input string.
vocabulary = [term.replace("_", " ") for term in model.dataset.word_labels]
max_len = max([len(term.split(" ")) for term in vocabulary])
vectorizer = CountVectorizer(
vocabulary=model.dataset.word_labels, ngram_range=(1, max_len)
)
word_counts = np.squeeze(vectorizer.fit_transform([text]).toarray())
keep_idx = np.where(word_counts > 0)[0]
text_counts = word_counts[keep_idx]
n_topics_per_word_token = np.sum(model.n_word_tokens_word_by_topic, axis=1)
p_topic_g_word = (
model.n_word_tokens_word_by_topic / n_topics_per_word_token[:, None]
)
p_topic_g_word = np.nan_to_num(p_topic_g_word, 0)
p_topic_g_text = p_topic_g_word[keep_idx] # p(T|W) for words in text only
prod = p_topic_g_text * text_counts[:, None] # Multiply p(T|W) by words in text
topic_weights = np.sum(prod, axis=0) # Sum across words
if topic_priors is not None:
weighted_priors = weight_priors(topic_priors, prior_weight)
topic_weights *= weighted_priors
_, p_voxel_g_topic = model.get_spatial_probs()
voxel_weights = np.dot(p_voxel_g_topic, topic_weights)
img = unmask(voxel_weights, model.dataset.mask_img)
if out_file is not None:
img.to_filename(out_file)
return img, topic_weights | 850 |
def clean_string(s: str) -> str:
"""Cleans and returns an input string
>>> clean_string(" xYz ")
'XYZ'
"""
return str(s).strip().upper() | 851 |
def get_unique_region_cov_df(unique_region_dict, fuzzer_names):
"""Returns a DataFrame where the two columns are fuzzers and the number
of unique regions covered."""
fuzzers = collections.defaultdict(int)
for region in unique_region_dict:
for fuzzer in unique_region_dict[region]:
fuzzers[fuzzer] += 1
dict_to_transform = {'fuzzer': [], 'unique_regions_covered': []}
for fuzzer in fuzzer_names:
covered_num = fuzzers[fuzzer]
dict_to_transform['fuzzer'].append(fuzzer)
dict_to_transform['unique_regions_covered'].append(covered_num)
return pd.DataFrame(dict_to_transform) | 852 |
def load_continuous_dataset(
root: str,
name: str,
raw: bool = False,
random_state: Optional[RandomState] = None
) -> Union[
Tuple[np.ndarray, np.ndarray, np.ndarray],
Tuple[UnsupervisedDataset, UnsupervisedDataset, UnsupervisedDataset]
]:
"""
Load a continuous dataset.
All the datasets are preprocessed as in the original MAF paper repository.
See https://github.com/gpapamak/maf/tree/master/datasets for details.
:param root: The datasets root directory.
:param name: The name of the dataset.
:param raw: Whether to return unpreprocessed Numpy arrays instead of Torch Datasets.
Torch Datasets will have standardization as data transformation.
:param random_state: The random state to use for shuffling and transforming the data.
It can be either None, a seed integer or a Numpy RandomState.
:return: The train, validation and test dataset splits.
:raise ValueError: If the continuous dataset name is not known.
"""
# Check the random state
random_state = check_random_state(random_state)
directory = os.path.join(root, name)
if name == 'power':
# Load the dataset
data = np.load(os.path.join(directory, 'data.npy'))
random_state.shuffle(data)
n_samples = len(data)
data = np.delete(data, [1, 3], axis=1)
# Add noise as in original datasets preprocessing (MAF paper)
voltage_noise = 0.01 * random_state.rand(n_samples, 1)
gap_noise = 0.001 * random_state.rand(n_samples, 1)
sm_noise = random_state.rand(n_samples, 3)
time_noise = np.zeros(shape=(n_samples, 1))
data = data + np.hstack([gap_noise, voltage_noise, sm_noise, time_noise])
# Split the dataset
n_test = int(0.1 * len(data))
data_test = data[-n_test:]
data = data[:-n_test]
n_valid = int(0.1 * len(data))
data_valid = data[-n_valid:]
data_train = data[:-n_valid]
elif name == 'gas':
# Load the dataset
data = pd.read_pickle(os.path.join(directory, 'ethylene_CO.pickle'))
data.drop(['Meth', 'Eth', 'Time'], axis=1, inplace=True)
# Remove uninformative features
uninformative_idx = (data.corr() > 0.98).to_numpy().sum(axis=1)
while np.any(uninformative_idx > 1):
col_to_remove = np.where(uninformative_idx > 1)[0][0]
data.drop(data.columns[col_to_remove], axis=1, inplace=True)
uninformative_idx = (data.corr() > 0.98).to_numpy().sum(axis=1)
data = data.to_numpy()
random_state.shuffle(data)
# Split the dataset
n_test = int(0.1 * len(data))
data_test = data[-n_test:]
data = data[:-n_test]
n_valid = int(0.1 * len(data))
data_valid = data[-n_valid:]
data_train = data[:-n_valid]
elif name == 'hepmass':
# Load the dataset
data_train = pd.read_csv(os.path.join(directory, "1000_train.csv"), index_col=False)
data_test = pd.read_csv(os.path.join(directory, "1000_test.csv"), index_col=False)
# Gets rid of any background noise examples i.e. class label 0.
data_train = data_train[data_train[data_train.columns[0]] == 1]
data_train = data_train.drop(data_train.columns[0], axis=1)
data_test = data_test[data_test[data_test.columns[0]] == 1]
data_test = data_test.drop(data_test.columns[0], axis=1)
data_test = data_test.drop(data_test.columns[-1], axis=1)
data_train, data_test = data_train.to_numpy(), data_test.to_numpy()
# Remove any features that have too many re-occurring real values.
features_to_remove = []
for i, feature in enumerate(data_train.T):
c = Counter(feature)
max_count = next(v for k, v in sorted(c.items()))
if max_count > 5:
features_to_remove.append(i)
features_to_keep = [i for i in range(data_train.shape[1]) if i not in features_to_remove]
data_train = data_train[:, features_to_keep]
data_test = data_test[:, features_to_keep]
random_state.shuffle(data_train)
# Split the train dataset
n_valid = int(len(data_train) * 0.1)
data_valid = data_train[-n_valid:]
data_train = data_train[:-n_valid]
elif name == 'miniboone':
# Load the dataset
data = np.load(os.path.join(directory, 'data.npy'))
random_state.shuffle(data)
# Split the dataset
n_test = int(0.1 * len(data))
data_test = data[-n_test:]
data = data[:-n_test]
n_valid = int(0.1 * len(data))
data_valid = data[-n_valid:]
data_train = data[:-n_valid]
elif name == 'BSDS300':
# Load the dataset
with h5py.File(os.path.join(directory, 'BSDS300.hdf5'), 'r') as file:
data_train = file['train'][:]
data_valid = file['validation'][:]
data_test = file['test'][:]
else:
raise ValueError("Unknown continuous dataset called {}".format(name))
# Return raw Numpy arrays, if specified
if raw:
return data_train, data_valid, data_test
# Instantiate the standardize transform
mean = torch.tensor(np.mean(data_train, axis=0), dtype=torch.float32)
std = torch.tensor(np.std(data_train, axis=0), dtype=torch.float32)
transform = Normalize(mean, std)
# Wrap and return the datasets
data_train = UnsupervisedDataset(data_train, transform)
data_valid = UnsupervisedDataset(data_valid, transform)
data_test = UnsupervisedDataset(data_test, transform)
return data_train, data_valid, data_test | 853 |
def plot_emo_num(emo_num, server = "local"):
"""plot distribution of emotion categoriy.
Args:
emo_num(list): list of unique emotion
server(str): local - plot emotion distribution; ssh - without plotting.
Return:
save (or plot) emotion categoriy plot.
"""
fig, ax = plt.subplots(figsize=(10,7))
index = np.arange(len(emo_num))
bar_width = 0.5
rects = ax.bar(index, list(emo_num.values()), bar_width)
ax.set_xlabel('Emotion')
ax.set_ylabel('Count')
ax.set_title('Distribution of Emotions')
ax.set_xticks(index)
ax.set_xticklabels(list(emo_num.keys()))
labels = list(emo_num.values())
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2, height, label,
ha='center', va='bottom')
fig.tight_layout()
fig.savefig("./figures/emotion_distribution.png")
if server =="local":
plt.show()
elif server =="ssh":
pass
else:
raise ValueError("server not defined.") | 854 |
def display_generation_hit_results(hit_info, hit_results):
"""Displays the results of a generation HIT
Parameters
----------
hit_info : GenerationHITInfo
HITInfo object storing information regarding the HIT
hit_results : GenerationResults
HIT results object storing the results of the relevant HIT
Returns
-------
bool
returns True
"""
dec_string = format_decomposition_string(hit_results.decomposition)
print(
'HIT ID: {hit_id}'
'\nAssignment ID: {assignment_id}'
'\nHIT Type: Generation'
'\n'
'\nResults'
'\n======='
'\nAnnotation ID: {annotation_id}'
'\nQuestion ID: {question_id}'
'\nQuestion Text: {question_text}'
'\nDecomposition: {decomposition}'.format(
hit_id=hit_results.hit_id,
assignment_id=hit_results.assignment_id,
annotation_id=hit_info.annotation_id,
question_id=hit_info.question_id,
question_text=hit_info.question_text,
decomposition=dec_string))
return True | 855 |
def sub_command_one():
"""Example Sub Command"""
click.echo("{{cookiecutter.project_slug}} sub_command_one") | 856 |
def check_help_all_output(pkg, subcommand=None):
"""test that `python -m PKG --help-all` works"""
cmd = [sys.executable, '-m', pkg]
if subcommand:
cmd.extend(subcommand)
cmd.append('--help-all')
out, err, rc = get_output_error_code(cmd)
nt.assert_equal(rc, 0, err)
nt.assert_not_in("Traceback", err)
nt.assert_in("Options", out)
nt.assert_in("Class parameters", out)
return out, err | 857 |
def install_consul():
""" install Consul """
if env.host == env.consul_host and not exists("/usr/bin/consul"):
run("rm -f consul.zip consul")
run("curl -L --silent {} -o consul.zip".format(CONSUL_URL))
run("unzip consul.zip")
run("chmod +x consul")
sudo("mv consul /usr/bin/consul")
run("rm -f consul.zip")
ipv4_address = get_addressv4_address()
ctx = {
"consul_dir": '/usr/bin',
"ip_address": ipv4_address
}
upload_template(filename='consul.conf', destination='/etc/init/consul.conf',
template_dir=TEMPLATES, context=ctx, use_sudo=True, use_jinja=True)
sudo("service consul start")
time.sleep(2)
consul_address = env.cluster_addresss[env.consul_host]
append("/etc/default/docker",
'DOCKER_OPTS="--kv-store=consul:{}:8500"'.format(consul_address),
use_sudo=True)
sudo("service docker restart")
time.sleep(5) | 858 |
def XOR(v1, v2):
"""
XOR operation element by element from 2 lists
:param v1: [1, 0, 1, 0, 0, 1]
:param v2: [1, 1, 0, 0, 1, 1]
:return: [0, 1, 1, 0, 1, 0]
"""
return [a ^ b for a, b in zip(v1, v2)] | 859 |
def linearly_spaced_combinations(bounds, num_samples):
"""
Return 2-D array with all linearly spaced combinations with the bounds.
Parameters
----------
bounds : sequence of tuples
The bounds for the variables, [(x1_min, x1_max), (x2_min, x2_max), ...]
num_samples : integer or array_likem
Number of samples to use for every dimension. Can be a constant if
the same number should be used for all, or an array to fine-tune
precision. Total number of data points is num_samples ** len(bounds).
Returns
-------
combinations : 2-d array
A 2-d arrray. If d = len(bounds) and l = prod(num_samples) then it
is of size l x d, that is, every row contains one combination of
inputs.
"""
bounds = np.atleast_2d(bounds)
num_vars = len(bounds)
num_samples = np.broadcast_to(num_samples, num_vars)
# Create linearly spaced test inputs
inputs = [np.linspace(b[0], b[1], n) for b, n in zip(bounds,
num_samples)]
# Convert to 2-D array
return combinations(inputs) | 860 |
def flatten_expressions_tree(
expression: Optional[Expression]) -> Tuple[Expression, ...]:
"""
Flatten expressions tree into a list.
"""
if not expression:
return tuple()
expressions = [expression]
for arg in expression.arguments:
if is_expression(arg):
expressions.extend(flatten_expressions_tree(arg))
return tuple(expressions) | 861 |
def workorder(
ctx: typer.Context,
path_in: Path = typer.Argument(
...,
help="Path to the job file(s).",
),
path_out: Path = typer.Argument(
"./tmp",
help="Parent path for saving the new workorder, will be prepended to --file-name.",
),
format_id: FormatChoices = typer.Option(
FormatChoices.json,
"-f",
"--format-id",
show_choices=True,
help="Output file format.",
),
file_name: Path = typer.Option(
"workorders/${ewo_iso_date_time}/workorder-${ewo_uid}",
"-n",
"--file-name",
help=(
"File name for the new workorder. Can include directories, "
"and the file type suffix will be added based on --format-id if necessary."
),
),
ewo_path: Optional[Path] = typer.Option(
None,
"-w",
"--existing-work-order",
help="Path to an existing workorder.",
),
):
"""Create a workorder, and add existing jobs to it.
Can also add jobs to an existing Workorder.
"""
runner: EveEsiJobs = ctx.obj["runner"]
if not path_in.exists():
raise typer.BadParameter(f"{path_in} does not exist.")
if path_in.is_file():
maybe_jobs = [path_in]
else:
maybe_jobs = [*path_in.glob("*.json"), *path_in.glob("*.yaml")]
loaded_jobs = []
for maybe_job in maybe_jobs:
try:
loaded_job = runner.deserialize_job(file_path=maybe_job)
except Exception as ex:
raise typer.BadParameter(f"Error decoding job at {maybe_job}, msg:{ex}")
loaded_jobs.append(loaded_job)
if not loaded_jobs:
raise typer.BadParameter(f"No jobs found at {path_in}")
if ewo_path is None:
ewo = get_default_workorder()
else:
try:
ewo = runner.deserialize_workorder(file_path=ewo_path)
except Exception as ex:
raise typer.BadParameter(
f"Error decoding workorder string. {ex.__class__.__name__}, {ex}"
)
ewo.jobs.extend(loaded_jobs)
output_path = path_out / file_name
out_template = Template(str(output_path))
file_path = Path(out_template.substitute(ewo.attributes()))
try:
saved_path, _ = runner.serialize_workorder(
workorder=ewo, file_path=file_path, data_format=format_id
)
typer.echo(f"Workorder saved to {saved_path}")
report_finished_task(ctx)
except Exception as ex:
raise typer.BadParameter(
f"Error saving workorder to {path_out}. {ex.__class__.__name__}, {ex}"
) | 862 |
def prepare_trans_list_update():
""" Prepares the translation_stats table for a translation list update.
For all translations, available is set to false. The translations that are
currently available will later be set to true. """
cur.execute("UPDATE translation_stats SET available = 0;")
_conn.commit() | 863 |
def main():
"""Main script function."""
info = collect_info()
sort_info(info, "Участник:NapalmBot/файлы") | 864 |
def add_tags(recipe_data, recipe_id):
"""Add entries for Tag and RecipeTags and delete removed tags."""
newTags = recipe_data.get("newTags", {})
for tagname, category in newTags.items():
tagname = tagname.lower().strip()
tag = Tag(tagname=tagname, parent=TagCategory.get(TagCategory.categoryname == category))
tag.save()
# Get existing tags for recipe
existing_tags_rows = RecipeTags.select().join(Tag, pw.JOIN.LEFT_OUTER).where((RecipeTags.recipeID == recipe_id))
existing_tags = []
for row in existing_tags_rows:
row_dict = model_to_dict(row)
if row_dict.get("tagID", {}).get("tagname"):
existing_tags.append(row_dict.get("tagID", {}).get("tagname"))
# Add tags if they don't exist already
tags = recipe_data.get("tags", [])
for tagname in tags:
tagname = tagname.lower().strip()
if tagname not in existing_tags:
recipetags = RecipeTags(recipeID=Recipe.get(Recipe.id == recipe_id), tagID=Tag.get(Tag.tagname == tagname))
recipetags.save()
# Delete removed tags
for tagname in existing_tags:
if tagname not in tags:
recipetags = RecipeTags.get(RecipeTags.recipeID == recipe_id, RecipeTags.tagID == Tag.get(Tag.tagname == tagname).id)
recipetags.delete_instance()
# Remove this tag from Tag table if no other recipe uses it
delete_abandoned_tag(in_tagname=tagname) | 865 |
def policy_head(x, mode, params):
"""
The policy head attached after the residual blocks as described by DeepMind:
1. A convolution of 8 filters of kernel size 3 × 3 with stride 1
2. Batch normalisation
3. A rectifier non-linearity
4. A fully connected linear layer that outputs a vector of size 19²+1 = 362
corresponding to logit probabilities for all intersections and the pass
move
"""
num_channels = params['num_channels']
num_samples = params['num_samples']
def _forward(x, is_recomputing=False):
""" Returns the result of the forward inference pass on `x` """
y = batch_norm_conv2d(x, 'conv_1', (3, 3, num_channels, num_samples), mode, params, is_recomputing=is_recomputing)
y = tf.nn.relu(y)
y = tf.reshape(y, (-1, 361 * num_samples))
y = dense(y, 'linear_1', (361 * num_samples, 362), policy_offset_op, mode, params, is_recomputing=is_recomputing)
return tf.cast(y, tf.float32)
return recompute_grad(_forward)(x) | 866 |
def _get_doors_gt_nsrts() -> Set[NSRT]:
"""Create ground truth NSRTs for DoorsEnv."""
robot_type, door_type, room_type = _get_types_by_names(
CFG.env, ["robot", "door", "room"])
InRoom, InDoorway, InMainRoom, TouchingDoor, DoorIsOpen, DoorInRoom, \
DoorsShareRoom = _get_predicates_by_names(CFG.env, ["InRoom",
"InDoorway", "InMainRoom", "TouchingDoor", "DoorIsOpen",
"DoorInRoom", "DoorsShareRoom"])
MoveToDoor, OpenDoor, MoveThroughDoor = _get_options_by_names(
CFG.env, ["MoveToDoor", "OpenDoor", "MoveThroughDoor"])
nsrts = set()
# MoveToDoorFromMainRoom
# This operator should only be used on the first step of a plan.
robot = Variable("?robot", robot_type)
room = Variable("?room", room_type)
door = Variable("?door", door_type)
parameters = [robot, room, door]
option_vars = [robot, door]
option = MoveToDoor
preconditions = {
LiftedAtom(InRoom, [robot, room]),
LiftedAtom(InMainRoom, [robot, room]),
LiftedAtom(DoorInRoom, [door, room]),
}
add_effects = {
LiftedAtom(TouchingDoor, [robot, door]),
LiftedAtom(InDoorway, [robot, door])
}
delete_effects = {LiftedAtom(InMainRoom, [robot, room])}
side_predicates: Set[Predicate] = set()
move_to_door_nsrt = NSRT("MoveToDoorFromMainRoom", parameters,
preconditions, add_effects, delete_effects,
side_predicates, option, option_vars,
null_sampler)
nsrts.add(move_to_door_nsrt)
# MoveToDoorFromDoorWay
robot = Variable("?robot", robot_type)
start_door = Variable("?start_door", door_type)
end_door = Variable("?end_door", door_type)
parameters = [robot, start_door, end_door]
option_vars = [robot, end_door]
option = MoveToDoor
preconditions = {
LiftedAtom(InDoorway, [robot, start_door]),
LiftedAtom(DoorsShareRoom, [start_door, end_door]),
}
add_effects = {
LiftedAtom(TouchingDoor, [robot, end_door]),
LiftedAtom(InDoorway, [robot, end_door])
}
delete_effects = {LiftedAtom(InDoorway, [robot, start_door])}
side_predicates = set()
move_to_door_nsrt = NSRT("MoveToDoorFromDoorWay", parameters,
preconditions, add_effects, delete_effects,
side_predicates, option, option_vars,
null_sampler)
nsrts.add(move_to_door_nsrt)
# OpenDoor
robot = Variable("?robot", robot_type)
door = Variable("?door", door_type)
parameters = [door, robot]
option_vars = [door, robot]
option = OpenDoor
preconditions = {
LiftedAtom(TouchingDoor, [robot, door]),
LiftedAtom(InDoorway, [robot, door]),
}
add_effects = {LiftedAtom(DoorIsOpen, [door])}
delete_effects = {
LiftedAtom(TouchingDoor, [robot, door]),
}
side_predicates = set()
# Allow protected access because this is an oracle. Used in the sampler.
env = get_or_create_env(CFG.env)
assert isinstance(env, DoorsEnv)
get_open_door_target_value = env._get_open_door_target_value # pylint: disable=protected-access
# Even though this option does not need to be parameterized, we make it so,
# because we want to match the parameter space of the option that will
# get learned during option learning. This is useful for when we want
# to use sampler_learner = "oracle" too.
def open_door_sampler(state: State, goal: Set[GroundAtom],
rng: np.random.Generator,
objs: Sequence[Object]) -> Array:
del rng, goal # unused
door, _ = objs
assert door.is_instance(door_type)
# Calculate the desired change in the doors "rotation" feature.
# Allow protected access because this is an oracle.
mass = state.get(door, "mass")
friction = state.get(door, "friction")
target_rot = state.get(door, "target_rot")
target_val = get_open_door_target_value(mass=mass,
friction=friction,
target_rot=target_rot)
current_val = state.get(door, "rot")
delta_rot = target_val - current_val
# The door always changes from closed to open.
delta_open = 1.0
return np.array([delta_rot, delta_open], dtype=np.float32)
open_door_nsrt = NSRT("OpenDoor", parameters, preconditions, add_effects,
delete_effects, side_predicates, option, option_vars,
open_door_sampler)
nsrts.add(open_door_nsrt)
# MoveThroughDoor
robot = Variable("?robot", robot_type)
start_room = Variable("?start_room", room_type)
end_room = Variable("?end_room", room_type)
door = Variable("?door", door_type)
parameters = [robot, start_room, door, end_room]
option_vars = [robot, door]
option = MoveThroughDoor
preconditions = {
LiftedAtom(InRoom, [robot, start_room]),
LiftedAtom(InDoorway, [robot, door]),
LiftedAtom(DoorIsOpen, [door]),
LiftedAtom(DoorInRoom, [door, start_room]),
LiftedAtom(DoorInRoom, [door, end_room]),
}
add_effects = {
LiftedAtom(InRoom, [robot, end_room]),
}
delete_effects = {
LiftedAtom(InRoom, [robot, start_room]),
}
side_predicates = set()
move_through_door_nsrt = NSRT("MoveThroughDoor", parameters, preconditions,
add_effects, delete_effects, side_predicates,
option, option_vars, null_sampler)
nsrts.add(move_through_door_nsrt)
return nsrts | 867 |
def _get_word_ngrams(n, sentences):
"""Calculates word n-grams for multiple sentences.
"""
assert len(sentences) > 0
assert n > 0
words = sum(sentences, [])
return _get_ngrams(n, words) | 868 |
def untag_domain(
domain_arn: str,
tags: t.List[str],
client: "botocore.client.BaseClient" = None,
) -> None:
"""Remove tags from a domain.
Args:
domain_arn: domain AWS ARN
tags: tags (keys) to remove
client: SWF client
"""
client = _common.ensure_client(client)
client.untag_resource(resourceArn=domain_arn, tagKeys=tags) | 869 |
def override_list(base_list: List, dynamic_key: str, val):
"""
Customize the base list by updating with the
dynamic_key and val.
Parameters
----------
base: dict
Dictionary or List to be customized with dynamic args
dynamic_key: str
Key to identify the location the value should be updated.
Nested with DOT like "custom.key_0.key_1.key_2.0.0.key_4"
val: str or float or int or dict or list
Value to be set
Returns
-------
dict
Updated base_list based on the key-value pairs in dynamic_args
Notes
-----
This will be called recursively with override_dict.
If dynamic_key is not a number, then we try to match on `name` field
in the list of dictionaries.
"""
def find_root_key_index(base_list, root_key):
if root_key.isdigit():
# If array index
root_key = int(root_key)
else:
# If string, then match on `name`
for root_key_i in range(len(base_list)):
if root_key == base_list[root_key_i][NAME]:
root_key = root_key_i
break
if not isinstance(root_key, int):
raise KeyError("{} not found in List".format(root_key))
return root_key
if DOT in dynamic_key:
# Compute root and subtree keys
root_key = find_root_key_index(base_list, dynamic_key.split(DOT)[0])
subtree_key = DOT.join(dynamic_key.split(DOT)[1:])
# Extract subtree
subtree = base_list[root_key]
if isinstance(subtree, dict):
root_val = override_dict(base_dict=subtree,
dynamic_key=subtree_key,
val=val)
elif isinstance(subtree, list):
root_val = override_list(base_list=subtree,
dynamic_key=subtree_key,
val=val)
else:
raise ValueError(
"Unsupported subtree type. Must be one of list or dict")
else:
# End of nested dynamic key
root_key = find_root_key_index(base_list, dynamic_key)
root_val = val
base_list[root_key] = root_val
return base_list | 870 |
def c_components(DAG):
"""Return a list of the maximal c-component node sets in DAG."""
G = nx.Graph();
G.add_nodes_from(observable_nodes(DAG))
G.add_edges_from([(u,v) for u,v in observable_pairs(DAG) if
has_confounded_path(DAG, u, v)])
return list(nx.connected_components(G)) | 871 |
def wss_over_number_of_clusters(data, algorithm='kmeans',
max_iter=100, num_repeats = 5, max_num_clusters = 12,
plot_file = None):
"""
Calculates the within-sum-of-squares (WSS) for different numbers of clusters,
averaged over several iterations.
Parameters
----------
data : float array
Trajectory data [frames,frame_data]
algorithm : string
The algorithm to use for the clustering.
Options: kmeans, rspace.
Default: kmeans
max_iter : int, optional
Maximum number of iterations.
Default: 100.
num_repeats : int, optional
Number of times to run the clustering for each number of clusters.
Default: 5.
max_num_clusters : int, optional
Maximum number of clusters for k-means clustering.
Default: 12.
plot_file : str, optional
Name of the file to save the plot.
Returns
-------
all_wss : float array
WSS values for each number of clusters (starting at 2).
std_wss : float array
Standard deviations of the WSS.
"""
# Initialize lists
all_wss = []
std_wss = []
# Loop over the number of clusters
for nc in range(1,max_num_clusters):
rep_wss = []
# Run each clustering several times.
for repeat in range(num_repeats):
# Get clusters and WSS for this repetition.
cc = obtain_clusters(data, algorithm=algorithm, max_iter=max_iter,
num_clusters=nc, plot=False)
cidx, wss, centroids = cc
rep_wss.append(wss)
# Calculate mean and standard deviation for this number of clusters.
all_wss.append(np.mean(rep_wss))
std_wss.append(np.std(rep_wss))
# Plot the WSS over the number of clusters
fig, ax = plt.subplots(1,1, figsize=[4,3], dpi=300)
ax.errorbar(np.arange(len(all_wss))+2,np.array(all_wss),yerr=np.array(std_wss)/np.sqrt(num_repeats))
ax.set_xlabel('number of clusters')
ax.set_ylabel('total WSS')
fig.tight_layout()
# Save the plot to file.
if plot_file: fig.savefig(plot_file)
return all_wss, std_wss | 872 |
def AptInstall(vm):
"""Installs the mysql package on the VM."""
vm.RemoteCommand('echo "mysql-server-5.5 mysql-server/root_password password '
'%s" | sudo debconf-set-selections' % MYSQL_PSWD)
vm.RemoteCommand('echo "mysql-server-5.5 mysql-server/root_password_again '
'password %s" | sudo debconf-set-selections' % MYSQL_PSWD)
vm.InstallPackages('mysql-server') | 873 |
def plot_original_image(filepath):
"""Plot full-color version of original image"""
plt.imshow(cv2.imread(filepath))
plt.title("Original Image")
plt.xticks([])
plt.yticks([])
plt.show() | 874 |
def __main__():
"""Parse the cmd lne options"""
parser = optparse.OptionParser()
parser.add_option("-i", "--input", default=None, dest="input",
help="The input file")
parser.add_option("-p", "--ptt", default=None, dest="ptt",
help="The output ptt file")
parser.add_option("-r", "--rnt", default=None, dest="rnt",
help="The output rnt file")
(options, args) = parser.parse_args()
if not options.input:
parser.error("Need to specify the input genbank file")
if not options.ptt:
parser.error("Need to specify the output ptt file")
if not options.rnt:
parser.error("Need to specify the output rnt file")
proteins = []
rnas = []
header = {}
coldetail={"Location":"",
"Strand":"",
"Length":"",
"PID":"",
"Gene":"",
"Synonym":"",
"Code":"",
"COG":"",
"Product":""}
with open(options.input, "r") as handle:
for entry in SeqIO.parse(handle, "genbank"):
#the header
header["description"] = entry.description
header["proteins"] = 0
header["rna"] = 0
for feature in entry.features:
if "translation" in feature.qualifiers.keys():
#print(feature.__dict__)
header["proteins"] = header["proteins"] + 1
#Figure out the strandedness and save the location(position)
if feature.location.strand > 0:
coldetail["Location"] = "%i..%i" % (feature.location._start,
feature.location._end)
coldetail["Strand"] = "+"
else:
coldetail["Location"] = "%i..%i" % (feature.location._end,
feature.location._start)
coldetail["Strand"] = "-"
#Calculate the feature length
coldetail["Length"] = str(int(feature.location._end) -
int(feature.location._start))
#PID
try:
coldetail["PID"] = feature.qualifiers["protein_id"][0]
except KeyError:
coldetail["PID"] = feature.qualifiers["locus_tag"][0]
#Gene
try:
coldetail["Gene"] = feature.qualifiers["gene"][0]
except KeyError:
coldetail["Gene"] = "-"
#Synonym
coldetail["Synonym"] = feature.qualifiers["locus_tag"][0]
#Code
coldetail["Code"] = "-"
#COG
coldetail["COG"] = "-"
#Product
coldetail["Product"] = feature.qualifiers["product"][0]
proteins.append(copy.deepcopy(coldetail))
#print("\n")
if feature.type == "tRNA" or feature.type == "rRNA":
#working with RNA here
header["rna"] = header["rna"] + 1
#Figure out the strandedness and save the location(position)
if feature.location.strand > 0:
coldetail["Location"] = "%i..%i" % (feature.location._start,
feature.location._end)
coldetail["Strand"] = "+"
else:
coldetail["Location"] = "%i..%i" % (feature.location._end,
feature.location._start)
coldetail["Strand"] = "-"
#Calculate the feature length
coldetail["Length"] = str(int(feature.location._end) -
int(feature.location._start))
#PID
try:
coldetail["PID"] = feature.qualifiers["protein_id"][0]
except KeyError:
coldetail["PID"] = feature.qualifiers["locus_tag"][0]
#Gene
try:
coldetail["Gene"] = feature.qualifiers["gene"][0]
except KeyError:
coldetail["Gene"] = "-"
#Synonym
coldetail["Synonym"] = feature.qualifiers["locus_tag"][0]
#Code
coldetail["Code"] = "-"
#COG
coldetail["COG"] = "-"
#Product
coldetail["Product"] = feature.qualifiers["product"][0]
#print("\n")
rnas.append(copy.deepcopy(coldetail))
with open(options.ptt, "w") as handle:
handle.write(header["description"] + "\n")
handle.write("%s proteins\n" % header["proteins"])
#header_line = [key for key in entry.keys()]
handle.write("Location\tStrand\tLength\tPID\tGene\tSynonym\tCode\tCOG\tProduct\n")
for entry in proteins:
handle.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (entry["Location"],
entry["Strand"],
entry["Length"],
entry["PID"],
entry["Gene"],
entry["Synonym"],
entry["Code"],
entry["COG"],
entry["Product"]))
with open(options.rnt, "w") as handle:
handle.write(header["description"] + "\n")
handle.write("%s RNAs\n" % header["rna"])
#header_line = [key for key in entry.keys()]
handle.write("Location\tStrand\tLength\tPID\tGene\tSynonym\tCode\tCOG\tProduct\n")
for entry in rnas:
handle.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (entry["Location"],
entry["Strand"],
entry["Length"],
entry["PID"],
entry["Gene"],
entry["Synonym"],
entry["Code"],
entry["COG"],
entry["Product"])) | 875 |
def test_toy_example_collapse_points():
"""Test on a toy example of three points that should collapse
We build a simple example: two points from the same class and a point from
a different class in the middle of them. On this simple example, the new
(transformed) points should all collapse into one single point. Indeed, the
objective is 2/(1 + exp(d/2)), with d the euclidean distance between the
two samples from the same class. This is maximized for d=0 (because d>=0),
with an objective equal to 1 (loss=-1.).
"""
rng = np.random.RandomState(42)
input_dim = 5
two_points = rng.randn(2, input_dim)
X = np.vstack([two_points, two_points.mean(axis=0)[np.newaxis, :]])
y = [0, 0, 1]
class LossStorer:
def __init__(self, X, y):
self.loss = np.inf # initialize the loss to very high
# Initialize a fake NCA and variables needed to compute the loss:
self.fake_nca = NeighborhoodComponentsAnalysis()
self.fake_nca.n_iter_ = np.inf
self.X, y, _ = self.fake_nca._validate_params(X, y)
self.same_class_mask = y[:, np.newaxis] == y[np.newaxis, :]
def callback(self, transformation, n_iter):
"""Stores the last value of the loss function"""
self.loss, _ = self.fake_nca._loss_grad_lbfgs(
transformation, self.X, self.same_class_mask, -1.0
)
loss_storer = LossStorer(X, y)
nca = NeighborhoodComponentsAnalysis(random_state=42, callback=loss_storer.callback)
X_t = nca.fit_transform(X, y)
print(X_t)
# test that points are collapsed into one point
assert_array_almost_equal(X_t - X_t[0], 0.0)
assert abs(loss_storer.loss + 1) < 1e-10 | 876 |
def separate_classes(x: np.ndarray, y: np.ndarray) -> List[Tuple[int, np.ndarray]]:
"""Separate samples by classes into a list.
Args:
x (np.ndarray): Samples.
y (np.ndarray): Target labels (classes).
Returns:
List[Tuple[int, np.ndarray]]: List in the format [(class, samples),...]
"""
classes = np.unique(y)
l = []
for clss in classes:
l.append((clss, x[y==clss]))
return l | 877 |
def monitor_threads(threads, arguments):
"""
Monitor the threads.
Parameters
----------
threads: dict
The threads to monitor.
arguments: namespace
The parsed command line.
# --GT-- not used, kept to avoid to break the function call.
Returns
-------
int
0
"""
try:
# exit and let systemd restart the process to avoid issues with
# potential memory leaks
time.sleep(60 * 60 * 2)
except Exception:
# the sleep was interrupted
pass
for th in threads.keys():
threads[th].stop()
# give up to 30 seconds for threads to exit cleanly
timeout = datetime.now() + timedelta(seconds=30)
while timeout > datetime.now():
thread_running = False
for th in threads.keys():
if threads[th].is_alive():
thread_running = True
if not thread_running:
break
return 0 | 878 |
def fetch_cfr_parts(notice_xml):
""" Sometimes we need to read the CFR part numbers from the notice
XML itself. This would need to happen when we've broken up a
multiple-effective-date notice that has multiple CFR parts that
may not be included in each date. """
parts = []
for cfr_elm in notice_xml.xpath('//CFR'):
parts.extend(notice_cfr_p.parseString(cfr_elm.text).cfr_parts)
return list(sorted(set(parts))) | 879 |
def get_dayofweek(date):
"""
Returns day of week in string format from date parameter (in datetime format).
"""
return date.strftime("%A") | 880 |
def add_model_output(modelIn, mode=None, num_add=None, activation=None):
""" This function modifies the last dense layer in the passed keras model. The modification includes adding units and optionally changing the activation function.
Parameters
----------
modelIn : keras model
Keras model to be modified.
mode : string
Mode to modify the layer. It could be:
'abstain' for adding an arbitrary number of units for the abstention optimization strategy.
'qtl' for quantile regression which needs the outputs to be tripled.
'het' for heteroscedastic regression which needs the outputs to be doubled. (current implicit default: 'het')
num_add : integer
Number of units to add. This only applies to the 'abstain' mode.
activation : string
String with keras specification of activation function (e.g. 'relu', 'sigomid', 'softmax', etc.)
Return
----------
modelOut : keras model
Keras model after last dense layer has been modified as specified. If there is no mode specified it returns the same model.
"""
if mode is None:
return modelIn
numlayers = len(modelIn.layers)
# Find last dense layer
i = -1
while 'dense' not in (modelIn.layers[i].name) and ((i+numlayers) > 0):
i -= 1
# Minimal verification about the validity of the layer found
assert ((i + numlayers) >= 0)
assert ('dense' in modelIn.layers[i].name)
# Compute new output size
if mode is 'abstain':
assert num_add is not None
new_output_size = modelIn.layers[i].output_shape[-1] + num_add
elif mode is 'qtl': # for quantile UQ
new_output_size = 3 * modelIn.layers[i].output_shape[-1]
else: # for heteroscedastic UQ
new_output_size = 2 * modelIn.layers[i].output_shape[-1]
# Recover current layer options
config = modelIn.layers[i].get_config()
# Update number of units
config['units'] = new_output_size
# Update activation function if requested
if activation is not None:
config['activation'] = activation
# Create new Dense layer
reconstructed_layer = Dense.from_config(config)
# Connect new Dense last layer to previous one-before-last layer
additional = reconstructed_layer(modelIn.layers[i-1].output)
# If the layer to replace is not the last layer, add the remainder layers
if i < -1:
for j in range(i+1, 0):
config_j = modelIn.layers[j].get_config()
aux_j = layers.deserialize({'class_name': modelIn.layers[j].__class__.__name__,
'config': config_j})
reconstructed_layer = aux_j.from_config(config_j)
additional = reconstructed_layer(additional)
modelOut = Model(modelIn.input, additional)
return modelOut | 881 |
def parse(f):
"""Parse ASDL from the given file and return a Module node describing it."""
parser = ASDLParser()
return parser.parse(f) | 882 |
def _assert_put_and_patch(usage_examples: UsageExamples, web_app: WebApp):
"""Test PUT and PATH requests."""
info_name = usage_examples.__class__.__name__
test_params = [
('PUT', usage_examples.put_requests),
('PATCH', usage_examples.patch_requests),
]
for http_method, examples_method in test_params:
send = PutPatchRequestsTester(web_app, usage_examples, http_method.lower())
if examples_method:
with usage_examples.send_function(send):
examples_method()
if http_method not in usage_examples.allowed_methods:
assert send.calls_count == 0, '{} sends {} requests to resource'.format(info_name, http_method)
continue
assert send.calls_count > 0, '{} has not any {} requests'.format(info_name, http_method)
etag = usage_examples.resource.get_etag()
if etag:
# if 'HEAD' in resource_examples.allowed_methods:
# params, headers = resource_examples.authorize_request(None, None, None)
# head_res = web_app.head(resource_examples.resource_url, params=params, headers=headers)
# etag = head_res.headers['ETag']
resource = usage_examples.resource
parent = resource.__parent__
if parent and 'GET' in usage_examples.allowed_methods:
# Get a new resource instance with refreshed internal state
etag = parent[resource.__name__].get_etag().serialize()
else:
# WARNING: This value of etag may be obsolete
etag = etag.serialize()
if not send.was_if_match:
send(
headers={'If-Match': '"__bad_etag__"'},
exception=HTTPPreconditionFailed({'etag': etag}),
)
if not send.was_if_none_match:
send(
headers={'If-None-Match': etag},
exception=HTTPPreconditionFailed({'etag': etag}),
) | 883 |
def distance(xyz, lattice, PBC=[1,2,3]):
"""
Returns the Euclidean distance from the origin for a fractional
displacement vector. Takes into account the lattice metric and periodic
boundary conditions, including up to one non-periodic axis.
Args:
xyz: a fractional 3d displacement vector. Can be obtained by
subtracting one fractional vector from another
lattice: a 3x3 matrix describing a unit cell's lattice vectors
PBC: the axes, if any, which are periodic. 1, 2, and 3 correspond
to x, y, and z respectively.
Returns:
a scalar for the distance of the point from the origin
"""
xyz = filtered_coords(xyz, PBC=PBC)
matrix = create_matrix(PBC=PBC)
matrix += xyz
matrix = np.dot(matrix, lattice)
return np.min(cdist(matrix,[[0,0,0]])) | 884 |
def main():
"""Do the main thing here"""
print("\n** Jamf computer group upload script")
print("** Creates a computer group in Jamf Pro.")
# parse the command line arguments
args, cli_custom_keys = get_args()
verbosity = args.verbose
# grab values from a prefs file if supplied
jamf_url, _, _, _, enc_creds = api_connect.get_creds_from_args(args)
# import computer group from file and replace any keys in the XML
with open(args.template, "r") as file:
template_contents = file.read()
# substitute user-assignable keys
template_contents = actions.substitute_assignable_keys(
template_contents, cli_custom_keys, verbosity, xml_escape=True
)
# set a list of names either from the CLI args or from the template if no arg provided
if args.names:
names = args.names
else:
names = [get_computergroup_name(template_contents, verbosity)]
# now process the list of names
for computergroup_name in names:
# where a group name was supplied via CLI arg, replace this in the template
if args.names:
template_contents = replace_computergroup_name(
computergroup_name, template_contents, verbosity
)
# check for existing group
print("\nChecking '{}' on {}".format(computergroup_name, jamf_url))
obj_id = api_get.get_api_obj_id_from_name(
jamf_url, "computer_group", computergroup_name, enc_creds, verbosity
)
if obj_id:
print(
"Computer Group '{}' already exists: ID {}".format(
computergroup_name, obj_id
)
)
if args.replace:
upload_computergroup(
jamf_url,
enc_creds,
computergroup_name,
template_contents,
cli_custom_keys,
verbosity,
obj_id,
)
else:
print(
"Not replacing existing Computer Group. Use --replace to enforce."
)
else:
print(
"Computer Group '{}' not found - will create".format(computergroup_name)
)
upload_computergroup(
jamf_url,
enc_creds,
computergroup_name,
template_contents,
cli_custom_keys,
verbosity,
)
print() | 885 |
def dobro(n=0, formato=False):
"""
Dobrar número
:param n: número a ser dobrado
:param formato: (opicional) mostrar o moeda
:return: resultado
"""
n = float(n)
n += n
return moeda(n) if formato else n | 886 |
def f_q2d(n, m):
"""Lowercase f term for 2D-Q polynomials. oe-20-3-2483 Eq. (A.18b).
Parameters
----------
n : int
radial order
m : int
azimuthal order
Returns
-------
float
f
"""
if n == 0:
return np.sqrt(F_q2d(n=0, m=m))
else:
return np.sqrt(F_q2d(n, m) - g_q2d(n-1, m) ** 2) | 887 |
def binary_search(data, target, low, high):
"""Return position if target is found in indicated portion of a python list and -1 if target is not found.
"""
if low > high:
return -1
mid = (low + high) // 2
if target == data[mid]:
return mid
elif target < data[mid]:
# recur on the portion left of the middle
return binary_search(data, target, low, mid - 1)
else:
# recur on the portion right of the middle
return binary_search(data, target, mid + 1, high) | 888 |
def delete_position(id):
"""Delete a post.
Ensures that the post exists and that the logged in user is the
author of the post.
"""
db = get_db()
db.execute('DELETE FROM gatekeeping WHERE id = ?', (id,))
db.commit()
return jsonify(status='ok') | 889 |
def read_string(stream, length):
"""read data from the file and return as a text string
"""
text = struct.unpack('{}s'.format(length), stream.read(length))
try:
result = str(text[0], encoding='utf-8')
except UnicodeDecodeError:
result = str(text[0], encoding='latin-1')
return result.rstrip('\x00') | 890 |
def generate_gate_y_hamiltonian_vec() -> np.ndarray:
"""Return the vector representation for the Hamiltonian of a Y gate with respect to the orthonormal Hermitian matrix basis with the normalized identity matrix as the 0th element.
The result is a real vector with size 4.
Parameters
----------
Returns
----------
np.ndarray
The real vector representation of the Hamiltonian of the gate.
"""
dim = 2
coeff = 0.5 * math.pi * np.sqrt(2)
vec = np.zeros(dim * dim, dtype=np.float64)
vec[0] = -coeff
vec[2] = coeff
return vec | 891 |
def _semi_implicit_midpoint(ode_fun, jac_fun, y_olds, t_old, f_old, dt, args,
solver_parameters, J00, I):
"""
Calculate solution at t_old+dt using the semi-implicit midpoint
formula. Based on equations IV.9.16a-b of Ref II.
"""
y_older, y_old = y_olds
je_tot=0
if(y_older is None): # Use Euler to get starting value
return _semi_implicit_euler(ode_fun, jac_fun, y_olds, t_old,
f_old, dt, args, solver_parameters,
J00, I)
if(f_old is None):
f_yj = ode_fun(*(y_old,t_old)+args)
fe_tot = 1
else: # We already computed it and can just reuse it
f_yj = f_old
fe_tot=0
b = np.dot(-(I+dt*J00),(y_old-y_older)) + 2*dt*f_yj
A = I-dt*J00
if(solver_parameters['initialGuess']): # Use Euler for initial guess
x0, f_yj, fe_tot_,je_tot=_explicit_euler(ode_fun, jac_fun, y_olds,
t_old, f_yj, dt,
args, solver_parameters)
fe_tot += fe_tot_
else:
x0=None
dy = linear_solve(A, b, iterative=solver_parameters['iterative'],
tol=solver_parameters['min_tol'], x0=x0)
y_new = y_old + dy
return (y_new, f_yj, fe_tot, je_tot) | 892 |
def test_create_property_etters_inner_fns():
""" Test BaseStyle.create_domain_mode_etters Method """
## Creates a MockFeature and a BaseStyle Object
f = MockFeature()
s = pmk.BaseStyle(f)
## Set up MagicMock for clear_cache method
s.clear_cache = MagicMock()
## Setup style tree for no domain
domain_name = None
mode_name = 'mode'
prop_name = 'color'
prop_value1 = 'orange'
prop_value2 = 'green'
s.add_domain(domain_name)
s.add_mode(mode_name, domain_name)
s.add_property(prop_name, prop_value1, mode_name, domain_name)
s.set_mode(mode_name, domain_name)
## Call target function
f.set_color(prop_value2)
## Make assertions
assert s.managed_properties[prop_name] == prop_value2
s.clear_cache.assert_called_once()
s.clear_cache.reset_mock()
## Setup style tree for bingo domain
domain_name = 'bingo'
mode_name = 'mode1'
prop_name = 'myprop'
prop_value1 = 'val1'
prop_value2 = 'val2'
s.add_domain(domain_name)
s.add_mode(mode_name, domain_name)
s.add_property(prop_name, prop_value1, mode_name, domain_name)
s.set_mode(mode_name, domain_name)
## Call target function
f.set_bingo_myprop(prop_value2)
## Make assertions
assert s.managed_properties[domain_name + '_' + prop_name] == prop_value2
s.clear_cache.assert_called_once()
'''
Test Getters
'''
assert s.get_color() == s.managed_properties['color']
assert f.get_color() == s.managed_properties['color']
assert s.get_bingo_myprop() == s.managed_properties['bingo_myprop']
assert f.get_bingo_myprop() == s.managed_properties['bingo_myprop'] | 893 |
async def test_list_failure(mock_datagram_client):
"""Test the wifi_list command failing."""
with mock_datagram_client:
with pytest.raises(CommandError) as err:
async with Client("192.168.1.100") as client:
_ = await client.wifi.list()
assert str(err.value) == (
"wifi_list command failed (response: {'command': 38, 'status': 'error'})"
) | 894 |
def freeze_session(session,
keep_var_names=None,
output_names=None,
clear_devices=True):
"""
Freezes the state of a session into a pruned computation graph.
Creates a new computation graph where variable nodes are replaced by
constants taking their current value in the session. The new graph will be
pruned so subgraphs that are not necessary to compute the requested
outputs are removed.
@param session The TensorFlow session to be frozen.
@param keep_var_names A list of variable names that should not be frozen,
or None to freeze all the variables in the graph.
@param output_names Names of the relevant graph outputs.
@param clear_devices Remove the device directives from the graph for better portability.
@return The frozen graph definition.
"""
from tensorflow.python.framework.graph_util import (
convert_variables_to_constants,
remove_training_nodes,
)
graph = session.graph
with graph.as_default():
freeze_var_names = list(
set(v.op.name for v in tf.global_variables()).difference(
keep_var_names or []))
output_names = output_names or []
output_names += [v.op.name for v in tf.global_variables()]
# Graph -> GraphDef ProtoBuf
input_graph_def = graph.as_graph_def()
if clear_devices:
for node in input_graph_def.node:
node.device = ""
frozen_graph = convert_variables_to_constants(
session, input_graph_def, output_names, freeze_var_names)
frozen_graph = remove_training_nodes(frozen_graph)
return frozen_graph | 895 |
def test_regular_method_used(
assert_errors, parse_ast_tree, method, default_options,
):
"""Testing that other methods are working fine."""
tree = parse_ast_tree(magic_method.format(method))
visitor = WrongClassVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, []) | 896 |
def get_system_cpu_times():
"""Return system CPU times as a namedtuple."""
user, nice, system, idle = _psutil_osx.get_system_cpu_times()
return _cputimes_ntuple(user, nice, system, idle) | 897 |
def Mce1(m, q, ξ, *, p=0):
"""
v = Mce1(m, q, ξ, *, p=0)
Compute the value of the even Radial Mathieu function of the first kind
Mce⁽¹⁾ₘ(q, ξ).
Parameters
----------
m : array_like
interger order of the Mathieu function
q : array_like
positive parameter in the Mathieu differential equation
ξ : array like
``radial'' coordinate in the Elliptic coordinates
p : 0 or 1 or 2 (default 0)
0 for the function,
1 for the first derivative
2 for the second derivative
Returns
-------
v : array like
value of Mce⁽¹⁾ₘ(q, ξ) or Mce⁽¹⁾ₘ′(q, ξ) or Mce⁽¹⁾ₘ′´(q, ξ)
"""
if p == 0:
return mathieu_modcem1(m, q, ξ)[0]
if p == 1:
return mathieu_modcem1(m, q, ξ)[1]
if p == 2:
return (mathieu_a(m, q) - (2 * q) * cosh(2 * ξ)) * mathieu_modcem1(m, q, ξ)[0]
raise ValueError("The value p must be 0, 1, or 2.") | 898 |
def exec_sync( cmd ):
""" exec command line.
"""
print( cmd )
p = subprocess.Popen(cmd, shell=True)
ret = p.wait()
print('') | 899 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.