content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def list_ga4_entities(admin_api):
"""Get a dictionary of GA4 entity settings based on type.
Args:
admin_api: The Admin API object.
Returns:
A dictionary of GA4 entity setting lists.
"""
entities = {
'ga4_account_summaries': [],
'ga4_accounts': [],
'ga4_properties': [],
'ga4_data_streams': [],
'ga4_measurement_protocol_secrets': [],
'ga4_conversion_events': [],
'ga4_custom_dimensions': [],
'ga4_custom_metrics': [],
'ga4_dv360_link_proposals': [],
'ga4_dv360_links': [],
'ga4_firebase_links': [],
'ga4_google_ads_links': []
}
for account_summary in admin_api.list_account_summaries():
a_dict = {
'name': account_summary.name,
'display_name': account_summary.display_name,
'account': account_summary.account,
'property_summaries': []
}
for property_summary in account_summary.property_summaries:
p_dict = {
'property': property_summary.property,
'display_name': property_summary.display_name
}
a_dict['property_summaries'].append(p_dict)
entities['ga4_account_summaries'].append(a_dict)
time.sleep(REQUEST_DELAY)
for account in admin_api.list_accounts():
account_dict = {
'name': account.name,
'display_name': account.display_name,
'create_time': account.create_time,
'update_time': account.update_time,
'region_code': account.region_code,
'deleted': account.deleted
}
entities['ga4_accounts'].append(account_dict)
time.sleep(REQUEST_DELAY)
for account_summary in entities['ga4_account_summaries']:
prop_request = ListPropertiesRequest(
filter=f"parent:{account_summary['account']}")
for prop in admin_api.list_properties(prop_request):
time.sleep(REQUEST_DELAY)
data_retention_settings = admin_api.get_data_retention_settings(
name=(prop.name + '/dataRetentionSettings'))
time.sleep(REQUEST_DELAY)
google_signals_settings = admin_api.get_google_signals_settings(
name=(prop.name + '/googleSignalsSettings'))
ic_enum = prop.industry_category
sl_enum = prop.service_level
gss_state_enum = google_signals_settings.state
gss_consent_enum = google_signals_settings.consent
edr_enum = data_retention_settings.event_data_retention
prop_dict = {
'name': prop.name,
'create_time': prop.create_time,
'update_time': prop.update_time,
'parent': prop.parent,
'display_name': prop.display_name,
'industry_category': IndustryCategory(ic_enum).name,
'time_zone': prop.time_zone,
'currency_code': prop.currency_code,
'service_level': ServiceLevel(sl_enum).name,
'delete_time': prop.delete_time,
'expire_time': prop.expire_time,
'account': account_summary['account'],
'data_sharing_settings': {
'name': data_retention_settings.name,
'event_data_retention': (DataRetentionSettings
.RetentionDuration(edr_enum).name),
'reset_user_data_on_new_activity':
data_retention_settings.reset_user_data_on_new_activity
},
'google_signals_settings': {
'name': google_signals_settings.name,
'state': GoogleSignalsState(gss_state_enum).name,
'consent': GoogleSignalsConsent(gss_consent_enum).name
}
}
entities['ga4_properties'].append(prop_dict)
for property_summary in account_summary['property_summaries']:
time.sleep(REQUEST_DELAY)
for data_stream in admin_api.list_data_streams(
parent=property_summary['property']):
data_stream_dict = {
'name': data_stream.name,
'type': DataStream.DataStreamType(data_stream.type_).name,
'display_name': data_stream.display_name,
'create_time': data_stream.create_time,
'update_time': data_stream.update_time,
'property': property_summary['property'],
'property_display_name': property_summary['display_name']
}
if data_stream.web_stream_data != None:
data_stream_dict['web_stream_data'] = {
'measurment_id': data_stream.web_stream_data.measurement_id,
'firebase_app_id': data_stream.web_stream_data.firebase_app_id,
'default_uri': data_stream.web_stream_data.default_uri
}
time.sleep(REQUEST_DELAY)
for mps in admin_api.list_measurement_protocol_secrets(
parent=data_stream.name):
mps_dict = {
'name': mps.name,
'display_name': mps.display_name,
'secret_value': mps.secret_value,
'stream_name': data_stream.name,
'type': DataStream.DataStreamType(data_stream.type_).name,
'property': property_summary['property'],
'property_display_name': property_summary['display_name']
}
entities['ga4_measurement_protocol_secrets'].append(mps_dict)
if data_stream.android_app_stream_data != None:
data_stream_dict['android_app_stream_data'] = {
'firebase_app_id': (data_stream
.android_app_stream_data.firebase_app_id),
'package_name': data_stream.android_app_stream_data.package_name
}
time.sleep(REQUEST_DELAY)
for mps in admin_api.list_measurement_protocol_secrets(
parent=data_stream.name):
mps_dict = {
'name': mps.name,
'display_name': mps.display_name,
'secret_value': mps.secret_value,
'stream_name': data_stream.name,
'type': DataStream.DataStreamType(data_stream.type_).name,
'property': property_summary['property'],
'property_display_name': property_summary['display_name']
}
entities['ga4_measurement_protocol_secrets'].append(mps_dict)
if data_stream.ios_app_stream_data != None:
data_stream_dict['ios_app_stream_data'] = {
'firebase_app_id': data_stream.ios_app_stream_data.firebase_app_id,
'bundle_id': data_stream.ios_app_stream_data.bundle_id
}
time.sleep(REQUEST_DELAY)
for mps in admin_api.list_measurement_protocol_secrets(
parent=data_stream.name):
mps_dict = {
'name': mps.name,
'display_name': mps.display_name,
'secret_value': mps.secret_value,
'stream_name': data_stream.name,
'type': DataStream.DataStreamType(data_stream.type_).name,
'property': property_summary['property'],
'property_display_name': property_summary['display_name']
}
entities['ga4_measurement_protocol_secrets'].append(mps_dict)
entities['ga4_data_streams'].append(data_stream_dict)
time.sleep(REQUEST_DELAY)
for event in admin_api.list_conversion_events(
parent=property_summary['property']):
event_dict = {
'name': event.name,
'event_name': event.event_name,
'create_time': event.create_time,
'deletable': event.deletable,
'custom': event.custom,
'property': property_summary['property'],
'property_display_name': property_summary['display_name']
}
entities['ga4_conversion_events'].append(event_dict)
time.sleep(REQUEST_DELAY)
for cd in admin_api.list_custom_dimensions(
parent=property_summary['property']):
cd_dict = {
'name': cd.name,
'parameter_name': cd.parameter_name,
'display_name': cd.display_name,
'description': cd.description,
'scope': cd.scope,
'disallow_ads_personalization': cd.disallow_ads_personalization,
'property': property_summary['property'],
'property_display_name': property_summary['display_name']
}
entities['ga4_custom_dimensions'].append(cd_dict)
time.sleep(REQUEST_DELAY)
for cm in admin_api.list_custom_metrics(
parent=property_summary['property']):
cm_dict = {
'name': cm.name,
'parameter_name': cm.parameter_name,
'display_name': cm.display_name,
'description': cm.description,
'scope': cm.scope,
'measurement_unit': cm.measurement_unit,
'property': property_summary['property'],
'property_display_name': property_summary['display_name']
}
entities['ga4_custom_metrics'].append(cm_dict)
time.sleep(REQUEST_DELAY)
for link in admin_api.list_google_ads_links(
parent=property_summary['property']):
link_dict = {
'name': link.name,
'customer_id': link.customer_id,
'can_manage_clients': link.can_manage_clients,
'ads_personalization_enabled': link.ads_personalization_enabled,
'create_time': link.create_time,
'update_time': link.update_time,
'creator_email_address': link.creator_email_address,
'property': property_summary['property'],
'property_display_name': property_summary['display_name']
}
entities['ga4_google_ads_links'].append(link_dict)
time.sleep(REQUEST_DELAY)
for link in admin_api.list_firebase_links(
parent=property_summary['property']):
link_dict = {
'name': link.name,
'project': link.project,
'create_time': link.create_time,
'property': property_summary['property'],
'property_display_name': property_summary['display_name']
}
entities['ga4_firebase_links'].append(link_dict)
time.sleep(REQUEST_DELAY)
for link in admin_api.list_display_video360_advertiser_links(
parent=property_summary['property']):
link_dict = {
'name': link.name,
'advertiser_id': link.advertiser_id,
'advertiser_display_name': link.advertiser_display_name,
'ads_personalization_enabled': link.ads_personalization_enabled,
'campaign_data_sharing_enabled': link.campaign_data_sharing_enabled,
'cost_data_sharing_enabled': link.cost_data_sharing_enabled,
'property': property_summary['property'],
'property_display_name': property_summary['display_name']
}
entities['ga4_dv360_links'].append(link_dict)
time.sleep(REQUEST_DELAY)
for proposal in (
admin_api.list_display_video360_advertiser_link_proposals(
parent=property_summary['property'])):
lpip_enum = (proposal.link_proposal_status_details
.link_proposal_initiating_product)
lps_enum = (proposal.link_proposal_status_details
.link_proposal_state)
proposals_dict = {
'name':
proposal.name,
'advertiser_id':
proposal.adveriser_id,
'link_proposal_status_details': {
'link_proposal_initiating_product':
LinkProposalInitiatingProduct(lpip_enum).name,
'requestor_email':
proposal.link_proposal_status_details.requestor_email,
'link_proposal_state': LinkProposalState(lps_enum).name
},
'advertiser_display_name':
proposal.advertiser_display_name,
'validation_email':
proposal.validation_email,
'ads_personalization_enabled':
proposal.ads_personalization_enabled,
'campaign_data_sharing_enabled':
proposal.campaign_data_sharing_enabled,
'cost_data_sharing_enabled':
proposal.cost_data_sharing_enabled,
'property': property_summary['property'],
'property_display_name': property_summary['display_name']
}
entities['ga4_dv360_link_proposals'].append(proposal_dict)
return entities | 5,358,900 |
def text_detection_background(text, background=None, hsize=15, vsize=15):
""" Given a string TEXT, generate a picture with text in it
arguments:
TEXT -- a string to be displayed
HSIZE -- maximum number of characters in one line
VSIZE -- maximum number of lines
background -- a file for background. If None, then use white
return: path to the desired photo, only one photo
"""
import textwrap
img_hsize = int(hsize * 640 / 15)
img_vsize = int(vsize * 480 / 15)
if not background:
img = Image.new('RGB', (img_hsize, img_vsize), color = "white")
color = (0,0,0)
else:
img = Image.open(background)
# width, height = img.size
# left, top, right, bottom = (0, 0, min(width, img_hsize), min(height, img_vsize))
# img = img.crop((left, top, right, bottom))
img = img.resize((img_hsize, img_vsize))
color = decide_font_color(background, pos=(30,30))
# Uses this font, can change to others if needed
fnt = ImageFont.truetype(os.path.join(FONT_SRC, "Times_CE_Regular.ttf"), 30)
d = ImageDraw.Draw(img)
para = textwrap.wrap(text, width=3*hsize)
if len(para)<=0:
para = [""]
try:
d.multiline_text((30, 30), '\n'.join(para), fill=color, font=fnt)
except:
d.multiline_text((30, 30), '\n'.join(para), fill=(0), font=fnt)
# d.text((30,30), text, font=fnt, fill=(0,0,0))
filename = os.path.join(TEXT_SRC, text+strftime("-%Y-%m-%d-%H-%M-%S.png", gmtime()))
img.save(filename)
return [filename] | 5,358,901 |
def print_sig(expr):
"""
Arguments:
- `expr`:
"""
return "{0!s} × {1!s}".format(expr.dom, expr.body) | 5,358,902 |
def is_grounded_concept(c: Concept) -> bool:
""" Check if a concept is grounded """
return (
"UN" in c.db_refs
and c.db_refs["UN"][0][0].split("/")[1] != "properties"
) | 5,358,903 |
def _get_form(app, parent_form, factory_method, force_disable_csrf=False):
"""Create and fill a form."""
class AForm(parent_form):
pass
with app.test_request_context():
extra = _update_with_csrf_disabled() if force_disable_csrf else {}
RF = factory_method(AForm)
rf = RF(**extra)
rf.profile.username.data = "my username"
rf.profile.full_name.data = "My full name"
rf.validate()
return rf | 5,358,904 |
def get_root_disk_size():
""" Get size of the root disk """
context = pyudev.Context()
rootfs_node = get_rootfs_node()
size_gib = 0
for device in context.list_devices(DEVTYPE='disk'):
# /dev/nvmeXn1 259 are for NVME devices
major = device['MAJOR']
if (major == '8' or major == '3' or major == '253' or
major == '259'):
devname = device['DEVNAME']
if devname == rootfs_node:
try:
size_gib = parse_fdisk(devname)
except Exception as e:
LOG.error("Could not retrieve disk size - %s " % e)
# Do not break config script, just return size 0
break
break
return size_gib | 5,358,905 |
def convert_to_premultiplied_png(file):
"""
http://stackoverflow.com/questions/6591361/method-for-converting-pngs-to-premultiplied-alpha
"""
logger.info("converting to premultiplied alpha")
im = Img.open(file).convert('RGBA')
a = numpy.fromstring(im.tobytes(), dtype=numpy.uint8)
a = a.astype(numpy.float64)
alpha_layer = a[3::4] / 255.0
a[::4] *= alpha_layer
a[1::4] *= alpha_layer
a[2::4] *= alpha_layer
im = Img.frombytes("RGBA", im.size, a.astype(numpy.uint8).tostring())
f = BytesIO()
im.save(f, 'png')
f.seek(0)
return f | 5,358,906 |
def DSQuery(dstype, objectname, attribute=None):
"""DirectoryServices query.
Args:
dstype: The type of objects to query. user, group.
objectname: the object to query.
attribute: the optional attribute to query.
Returns:
If an attribute is specified, the value of the attribute. Otherwise, the
entire plist.
Raises:
DSException: Cannot query DirectoryServices.
"""
ds_path = '/%ss/%s' % (dstype.capitalize(), objectname)
cmd = [_DSCL, '-plist', '.', '-read', ds_path]
if attribute:
cmd.append(attribute)
(stdout, stderr, returncode) = RunProcess(cmd)
if returncode:
raise DSException('Cannot query %s for %s: %s' % (ds_path,
attribute,
stderr))
plist = NSString.stringWithString_(stdout).propertyList()
if attribute:
value = None
if 'dsAttrTypeStandard:%s' % attribute in plist:
value = plist['dsAttrTypeStandard:%s' % attribute]
elif attribute in plist:
value = plist[attribute]
try:
# We're copying to a new list to convert from NSCFArray
return value[:]
except TypeError:
# ... unless we can't
return value
else:
return plist | 5,358,907 |
def main(cfg: DictConfig) -> None:
"""
This is a main function for `arachne.driver.cli`.
"""
logger.info(OmegaConf.to_yaml(cfg))
# Check the specified tool is valid
tools = list(cfg.tools.keys())
try:
assert len(tools) == 1
except AssertionError as err:
logger.exception("You must specify only one tool")
raise err
tool = tools[0]
# Setup the input DNN model
if not cfg.model_file and not cfg.model_dir:
raise RuntimeError("User must specify either model_file or model_dir.")
if cfg.model_file and cfg.model_dir:
raise RuntimeError("User must specify either model_file or model_dir.")
input_model: Model
if cfg.model_file:
input_model = init_from_file(to_absolute_path(cfg.model_file))
else:
input_model = init_from_dir(to_absolute_path(cfg.model_dir))
if cfg.model_spec_file:
# if a YAML file describing the model specification is provided, overwrite input_model.spec
input_model.spec = load_model_spec(to_absolute_path(cfg.model_spec_file))
output_model = ToolFactory.get(tool).run(input=input_model, cfg=cfg.tools.get(tool))
save_model(
model=output_model,
output_path=to_absolute_path(cfg.output_path),
tvm_cfg=cfg.tools.get(tool),
) | 5,358,908 |
def calculate_offset(lon, first_element_value):
"""
Calculate the number of elements to roll the dataset by in order to have
longitude from within requested bounds.
:param lon: longitude coordinate of xarray dataset.
:param first_element_value: the value of the first element of the longitude array to roll to.
"""
# get resolution of data
res = lon.values[1] - lon.values[0]
# calculate how many degrees to move by to have lon[0] of rolled subset as lower bound of request
diff = lon.values[0] - first_element_value
# work out how many elements to roll by to roll data by 1 degree
index = 1 / res
# calculate the corresponding offset needed to change data by diff
offset = int(round(diff * index))
return offset | 5,358,909 |
def plot_instcat_dists(phosim_file, figsize=(12, 12)):
"""
Create a multipanel plot of histograms of various columns in the
phosim instance catalog.
Parameters
----------
phosim_file: str
Instance catalog file containing includeobj references for each
object type.
figsize: tuple [(9, 12)]
Figure dimensions in x, y inches
"""
instcat_dir = os.path.split(phosim_file)[0]
hist_array = HistArray(title=phosim_file, figsize=figsize)
object_files = []
with open(phosim_file) as phosim_input:
for line in phosim_input:
if line.startswith('includeobj'):
object_files.append(line.strip().split()[-1])
ra = defaultdict(list)
dec = defaultdict(list)
magnorm = defaultdict(list)
major_axis = defaultdict(list)
minor_axis = defaultdict(list)
pa = defaultdict(list)
sersic_index = defaultdict(list)
num_zero_major = 0
num_zero_minor = 0
axis_ratio = defaultdict(list)
redshift = defaultdict(list)
gamma1 = defaultdict(list)
gamma2 = defaultdict(list)
kappa = defaultdict(list)
for item in object_files:
with gzip.open(os.path.join(instcat_dir, item), 'r') as objects:
for line in objects:
tokens = line.split()
ra[item].append(float(tokens[2]))
dec[item].append(float(tokens[3]))
if float(tokens[4]) < 1000:
magnorm[item].append(float(tokens[4]))
if float(tokens[6]) > 0:
redshift[item].append(float(tokens[6]))
if 'sersic2d' in str(line):
major_axis[item].append(float(tokens[13]))
minor_axis[item].append(float(tokens[14]))
if major_axis[item][-1] <= 0:
num_zero_major += 1
if minor_axis[item][-1] <= 0:
num_zero_minor += 1
else:
axis_ratio[item].append(major_axis[item][-1]/
minor_axis[item][-1])
if axis_ratio[item][-1] > 1000:
print(line.strip())
pa[item].append(float(tokens[15]))
sersic_index[item].append(float(tokens[16]))
gamma1[item].append(float(tokens[7]))
gamma2[item].append(float(tokens[8]))
kappa[item].append(float(tokens[9]))
hist_array.plot_hists(ra, xlabel='RA (degrees)')
hist_array.plot_hists(dec, xlabel='Dec (degrees)')
hist_array.plot_hists(magnorm, xlabel='magnorm')
hist_array.plot_hists(redshift, xlabel='redshift')
hist_array.plot_hists(major_axis, xlabel='sersic2d major axis (arcsec)')
hist_array.plot_hists(minor_axis, xlabel='sersic2d minor axis (arcsec)')
hist_array.plot_hists(axis_ratio, xlabel='major/minor (#<=0:{} major, {} minor)'.format(num_zero_major, num_zero_minor))
hist_array.plot_hists(pa, xlabel='sersic2d position angle (degrees)')
hist_array.plot_hists(sersic_index, xlabel='sersic index')
hist_array.plot_hists(gamma1, xlabel='gamma1')
hist_array.plot_hists(gamma2, xlabel='gamma2')
hist_array.plot_hists(kappa, xlabel='kappa')
plt.tight_layout() | 5,358,910 |
def min_mean_col(m: ma.MaskedArray) -> int:
"""Calculate the index of the column with the smallest mean.
"""
if ma.count_masked(m) == m.size:
return -1
col_mean = np.nanmean(m, axis=0)
return np.argmin(col_mean) | 5,358,911 |
def load_simclrv2(init_args):
"""
Load pretrained SimCLR-v2 model.
"""
ckpt_file = init_args["ckpt_file"]
model_dir = init_args["model_dir"]
# Load the resnet.py that comes with the SimCLR-v2's PyTorch converter
sys.path.insert(
0,
os.path.join(
model_dir,
"SimCLRv2-Pytorch",
),
)
import resnet
backbone, _ = resnet.get_resnet(depth=50, width_multiplier=1, sk_ratio=0)
backbone.load_state_dict(torch.load(ckpt_file, "cpu")["resnet"])
def forward(x):
# return the tensor obtained at the end of the network
# prior to global average pooling
return backbone(x, apply_fc=False)
return backbone, forward | 5,358,912 |
def minekey_read(request, mk_hmac, mk_fid, mk_fversion, mk_iid, mk_depth, mk_type, mk_ext, **kwargs):
"""
arguments: request, mk_hmac, mk_fid, mk_fversion, mk_iid, mk_depth, mk_type, mk_ext, **kwargs
implements: GET /key/(MK_HMAC)/(MK_FID)/(MK_FVERSION)/(MK_IID)/(MK_DEPTH)/(MK_TYPE).(MK_EXT)
returns: a suitable HttpResponse object
"""
if mk_type not in ('data', 'icon'):
diag = 'bad minekey method for GET'
Event.alert(request, 'minekey_read', diag=diag)
return HttpResponseNotFound(diag)
try:
mk = MineKey(request,
hmac=mk_hmac,
fid=mk_fid,
fversion=mk_fversion,
iid=mk_iid,
depth=mk_depth,
type=mk_type,
ext=mk_ext,
enforce_hmac_check=True)
except:
diag = 'bad minekey validation'
Event.alert(request, 'minekey_read', diag=diag)
if settings.DEBUG: raise
return HttpResponseNotFound(diag)
try:
Event.log(request, 'minekey_read', feed=mk.get_feed(), item=mk.get_item())
return mk.response()
except Exception as e:
Event.alert(request, 'minekey_read', diag=str(e))
raise | 5,358,913 |
def get_file_metadata(folder, video_relative_path):
""" """
# SAMPLE FILENAME: XXCam_01_20180517203949574.mp4
# XXXXX_XX_YYYYMMDDHHMMSSmmm.mp4
# 2019/01/01/XXCam-20180502-1727-34996.mp4
# XXCam-01-20180502-1727-34996.mp4
video_filename = os.path.basename(video_relative_path)
sub_folder = os.path.dirname(video_relative_path)
basename, extension = os.path.splitext(video_filename)
filename_parts_u = basename.split('_')
filename_parts_d = basename.split('-')
if len(filename_parts_u) == 3 and filename_parts_u[2].isdigit() and len(filename_parts_u[2]) == 17:
file_date = filename_parts_u[2][0:8]
file_time1 = filename_parts_u[2][8:12]
file_time2 = filename_parts_u[2][12:17]
basename_new = '%s-%s-%s-%s' % (filename_parts_u[0], file_date, file_time1, file_time2)
elif len(filename_parts_u) == 3 and filename_parts_u[2].isdigit() and len(filename_parts_u[2]) == 14:
# July2019 firmware update on Reolink camera changed filename format, therefore simplify mine!
file_date = filename_parts_u[2][0:8]
file_time1 = filename_parts_u[2][8:14]
# file_time2 = filename_parts_u[2][12:14]
basename_new = '%s-%s-%s' % (filename_parts_u[0], file_date, file_time1) # ,file_time2)
elif (len(filename_parts_d) == 4 and filename_parts_d[1].isdigit() and len(filename_parts_d[1]) == 8
and filename_parts_d[2].isdigit() and len(filename_parts_d[2]) == 4
and filename_parts_d[3].isdigit() and len(filename_parts_d[3]) == 5):
basename_new = basename
file_date = filename_parts_d[1]
elif (len(filename_parts_d) == 5 and filename_parts_d[2].isdigit() and len(filename_parts_d[2]) == 8
and filename_parts_d[3].isdigit() and len(filename_parts_d[3]) == 4
and filename_parts_d[4].isdigit() and len(filename_parts_d[4]) == 5):
basename_new = basename
file_date = filename_parts_d[2]
else:
basename_new = basename
file_date = 'NO_DATE'
return {'original': video_filename,
'sub_folder': sub_folder,
'source_fullpath': os.path.join(folder, video_relative_path),
'filename_new': '%s%s' % (basename_new, extension),
'basename_new': basename_new,
'basename_original': basename,
'file_date': file_date
} | 5,358,914 |
def assert_array_almost_equal(x: numpy.ndarray, y: numpy.ndarray, decimal: int):
"""
usage.matplotlib: 4
usage.scipy: 260
usage.skimage: 14
usage.sklearn: 226
usage.statsmodels: 38
"""
... | 5,358,915 |
def test_data_alignment(role_value, should_pass, check_model):
"""Test a custom model which returns a good and alignments from data().
qtmodeltest should capture this problem and fail when that happens.
"""
class MyModel(qt_api.QAbstractListModel):
def rowCount(self, parent=qt_api.QtCore.QModelIndex()):
return 1 if parent == qt_api.QtCore.QModelIndex() else 0
def data(
self, index=qt_api.QtCore.QModelIndex(), role=qt_api.QtCore.Qt.DisplayRole
):
if role == qt_api.QtCore.Qt.TextAlignmentRole:
return role_value
elif role == qt_api.QtCore.Qt.DisplayRole:
if index == self.index(0, 0):
return "Hello"
return None
check_model(MyModel(), should_pass=should_pass) | 5,358,916 |
def cov(a, b):
"""Return the sample covariance of vectors a and b"""
a = flex.double(a)
b = flex.double(b)
n = len(a)
assert n == len(b)
resid_a = a - flex.mean(a)
resid_b = b - flex.mean(b)
return flex.sum(resid_a*resid_b) / (n - 1) | 5,358,917 |
def beautify(soup: BeautifulSoup, rich_terminal: bool = True) -> str:
"""
Cleans up the raw HTML so it's more presentable.
Parse BeautifulSoup HTML and return prettified string
"""
beautifiedText = str()
for i in soup:
if rich_terminal:
term = Terminal()
span_sub = r"{t.italic}\1{t.normal}".format(t=term)
strong_sub = r"{t.bold}\1{t.normal}".format(t=term)
else:
span_sub = r"\1"
strong_sub = r"\1"
i = re.sub(r'<span class="\w+">(.+)</span>', span_sub, str(i),)
i = re.sub(r"<strong>(.+)</strong>", strong_sub, str(i))
beautifiedText += " " + i
# Remove leading whitespace.
beautifiedText = re.sub(r"^\s+", "", beautifiedText)
# Compress all whitespace to a single space.
beautifiedText = re.sub(r"\s{2,}", " ", beautifiedText)
# Trim whitespace immediately preceding common punctuation.
beautifiedText = re.sub(r"\s+([,\)\].;:])", r"\g<1>", beautifiedText)
# Trim whitespace immediately following common punctuation.
beautifiedText = re.sub(r"([\(])\s+", r"\g<1>", beautifiedText)
return beautifiedText | 5,358,918 |
def design_complexity(design: Design) -> int:
"""Returns an approximation of the design's complexity to create."""
diversity = 3 * len(design.required)
abundance = 2 * sum(design.required.values())
return diversity + abundance + design.additional | 5,358,919 |
def approx_q_y(q_z, mu_lookup, logvar_lookup, k=10):
"""
refer to eq.13 in the paper
"""
q_z_shape = list(q_z.size()) # (b, z_dim)
mu_lookup_shape = [mu_lookup.num_embeddings, mu_lookup.embedding_dim] # (k, z_dim)
logvar_lookup_shape = [logvar_lookup.num_embeddings, logvar_lookup.embedding_dim] # (k, z_dim)
if not mu_lookup_shape[0] == k:
raise ValueError("mu_lookup_shape (%s) does not match the given k (%s)" % (
mu_lookup_shape, k))
if not logvar_lookup_shape[0] == k:
raise ValueError("logvar_lookup_shape (%s) does not match the given k (%s)" % (
logvar_lookup_shape, k))
if not q_z_shape[1] == mu_lookup_shape[1]:
raise ValueError("q_z_shape (%s) does not match mu_lookup_shape (%s) in dimension of z" % (
q_z_shape, mu_lookup_shape))
if not q_z_shape[1] == logvar_lookup_shape[1]:
raise ValueError("q_z_shape (%s) does not match logvar_lookup_shape (%s) in dimension of z" % (
q_z_shape, logvar_lookup_shape))
# TODO: vectorization and don't use for loop
batch_size = q_z_shape[0]
log_q_y_logit = torch.zeros(batch_size, k).type(q_z.type())
for k_i in torch.arange(0, k):
mu_k, logvar_k = mu_lookup(k_i), logvar_lookup(k_i)
log_q_y_logit[:, k_i] = log_gauss(q_z, mu_k, logvar_k) + np.log(1 / k)
q_y = torch.nn.functional.softmax(log_q_y_logit, dim=1)
return log_q_y_logit, q_y | 5,358,920 |
def check_net_numpy(net_ds, num_ds, currents):
"""
Check that an ambient.Profile object is created correctly and that the
methods operate as expected.
"""
chem_names = net_ds.f_names
chem_units = net_ds.f_units
# Check the chemical names and units are correct
for i in range(3):
assert num_ds.f_names[i] == chem_names[i]
assert num_ds.f_units[i] == chem_units[i]
assert num_ds.nchems == 2
# Check the error criteria on the interpolator
assert num_ds.err == 0.01
# Check the get_units method
name_list = ['temperature', 'salinity', 'pressure'] + chem_names[0:3]
unit_list = ['K', 'psu', 'Pa'] + chem_units[0:3]
for i in range(3):
assert num_ds.get_units(name_list[i])[0] == unit_list[i]
units = num_ds.get_units(name_list)
for i in range(3):
assert units[i] == unit_list[i]
# Check the interpolator function ...
z = np.linspace(num_ds.z_min, num_ds.z_max, 100)
# Next, check that the variables returned by the get_values function are
# the variables we expect
for depth in z:
assert num_ds.get_values(depth, 'temperature') == \
net_ds.get_values(depth, 'temperature')
assert num_ds.get_values(depth, 'salinity') == \
net_ds.get_values(depth, 'salinity')
assert num_ds.get_values(depth, 'pressure') == \
net_ds.get_values(depth, 'pressure')
# Test the append() method by inserting the temperature data as a new
# profile, this time in degrees celsius using the variable name temp
n0 = num_ds.nchems
z = num_ds.data[:,0]
T = num_ds.data[:,1]
T_degC = T - 273.15
data = np.vstack((z, T_degC)).transpose()
symbols = ['z', 'temp']
units = ['m', 'deg C']
comments = ['measured', 'identical to temperature, but in deg C']
num_ds.append(data, symbols, units, comments, 0)
# Check that the data were inserted correctly
Tnc = num_ds.data[:,num_ds.chem_names.index('temp')+7]
assert_array_almost_equal(Tnc, T_degC, decimal = 6)
assert num_ds.get_units('temp')[0] == 'deg C'
# Check that get_values works correctly with vector inputs for depth
Temps = num_ds.get_values(z, ['temperature', 'temp'])
for i in range(len(z)):
assert_approx_equal(Temps[i,0], Temps[i,1] + 273.15, significant = 6)
# Make sure the units are returned correctly
assert num_ds.get_units('temp')[0] == 'deg C'
# Check that temp is now listed as a chemical
assert num_ds.nchems == n0 + 1
assert num_ds.chem_names[-1] == 'temp'
# Test the API for calculating the buoyancy frequency (note that we do
# not check the result, just that the function call does not raise an
# error)
N_num = num_ds.buoyancy_frequency(z)
N_net = num_ds.buoyancy_frequency(z)
assert_array_almost_equal(N_num, N_net, decimal=6) | 5,358,921 |
def svn_repos_get_logs2(*args):
"""
svn_repos_get_logs2(svn_repos_t repos, apr_array_header_t paths, svn_revnum_t start,
svn_revnum_t end, svn_boolean_t discover_changed_paths,
svn_boolean_t strict_node_history,
svn_repos_authz_func_t authz_read_func,
svn_log_message_receiver_t receiver,
apr_pool_t pool) -> svn_error_t
"""
return apply(_repos.svn_repos_get_logs2, args) | 5,358,922 |
def error_function(theta, X, y):
"""Error function J definition"""
diff = np.dot(X, theta) - y
return (1. / 2 * m) * np.dot(np.transpose(diff), diff) | 5,358,923 |
def register(operation_name):
"""
Registers the decorated class as an Operation with the supplied operation name
:param operation_name: The identifying name for the Operation
"""
def wrapper(clazz):
if operation_name not in OPERATIONS:
OPERATIONS[operation_name] = clazz
return clazz
return wrapper | 5,358,924 |
def url_exists(video):
"""
check each source for a url for this video; return True as soon as one is found. If none are found, return False
"""
max_timeout = int(kodi.get_setting('source_timeout'))
logger.log('Checking for Url Existence: |%s|' % (video), log_utils.LOGDEBUG)
for cls in relevant_scrapers(video.video_type):
if kodi.get_setting('%s-sub_check' % (cls.get_name())) == 'true':
scraper_instance = cls(max_timeout)
url = scraper_instance.get_url(video)
if url:
logger.log('Found url for |%s| @ %s: %s' % (video, cls.get_name(), url), log_utils.LOGDEBUG)
return True
logger.log('No url found for: |%s|' % (video), log_utils.LOGDEBUG)
return False | 5,358,925 |
def test(loader):
"""Evaluate images with best weights."""
net.eval()
try:
filename = osp.join(cfg.OUTPUT_DIR, 'best_val_acc_weights.pth')
net.load_state_dict(torch.load(filename))
except FileNotFoundError:
net.load_state_dict(torch.load(cfg.OPTIM_SNAPSHOT))
bar = tqdm(enumerate(loader))
for _, sample in bar:
start = time.time()
# TODO: Call the test routine for the net
# outputs, id = routines.test_routine(sample)
total_time = time.time() - start
# TODO: Save porosity value here | 5,358,926 |
def test_actions_explicit_get_collector_action_for_unexisting_terminal():
"""
Test for situation when `get_collector` has an action for un-existing
terminal.
"""
action = get_collector()
@action
def INT(context, value):
return int(value)
@action
def STRING(context, value):
return "#{}#".format(value)
@action
def STRING2(context, value):
return "#{}#".format(value)
grammar = Grammar.from_file(os.path.join(THIS_FOLDER, 'grammar.pg'))
Parser(grammar, actions=action.all) | 5,358,927 |
def log2_grad(orig, grad):
"""Returns [grad * 1 / (log(2) * x)]"""
x = orig.args[0]
ones = ones_like(x)
two = const(2.0, dtype=x.checked_type.dtype)
return [grad * ones / (log(two) * x)] | 5,358,928 |
def test_anim_pairwise_maxmatch():
"""Test generation of NUCmer pairwise comparison command with maxmatch.
"""
cmd = anim.construct_nucmer_cmdline("file1.fna", "file2.fna",
maxmatch=True)
assert_equal(cmd, "nucmer -maxmatch -p ./nucmer_output/file1_vs_file2 " +
"file1.fna file2.fna")
print(cmd) | 5,358,929 |
def _stream_annotation(file_name, pn_dir):
"""
Stream an entire remote annotation file from Physionet.
Parameters
----------
file_name : str
The name of the annotation file to be read.
pn_dir : str
The PhysioNet directory where the annotation file is located.
Returns
-------
ann_data : ndarray
The resulting data stream in numpy array format.
"""
# Full url of annotation file
url = posixpath.join(config.db_index_url, pn_dir, file_name)
# Get the content
response = requests.get(url)
# Raise HTTPError if invalid url
response.raise_for_status()
# Convert to numpy array
ann_data = np.fromstring(response.content, dtype=np.dtype('<u1'))
return ann_data | 5,358,930 |
def get_scenarios():
"""
Return a list scenarios and values for parameters in each of them
:return:
"""
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
scenarios = get_scenarios_in_state(isess.state)
return build_json_response(scenarios, 200) | 5,358,931 |
def download() -> str:
""" Returns a download of the active files.
:return: the zip files needs to be downloaded.
"""
file_manager = utility.load_file_manager()
response = make_response(file_manager.zip_active_files(
"scrubbed_documents.zip"))
# Disable download caching
response.headers["Cache-Control"] = \
"max-age=0, no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response | 5,358,932 |
def evaluate_test_arff(model_path, test_arff_path, out_path):
"""
Obtain predictions of test_file using the trained model in model_path
:param output_folder:
:param output_name:
:param model_path:
:param test_file:
"""
# PREDICTIONS FILE HEADERS: INSTANCE, ACTUAL, PREDICTED, ERROR
bash_file_path = "../../data/bash_scripts/explorer_test_model.sh "
with open(out_path, 'w') as fi:
fi.close()
command = "".join([bash_file_path, test_arff_path, " ", model_path, " ", out_path])
print(command)
subprocess.call(command, shell=True)
remove_lines(out_path) # remove headers of prediction file
df_participant = pd.read_csv(out_path, header=0, sep=",")
return df_participant | 5,358,933 |
def main(root_dir: str,
train_dir: str,
model_info_dir: str) -> None:
"""
Main function for receiving args, and passing them through to form recognizer training function
Parameters
----------
root_dir: str
Root datastore being used
train_dir: str
Path on blob containing training images and asset files
model_info_dir: str
Path to folder containing trained model information
"""
log.info("Form Recognizer Training Step")
# get context of current run
run = Run.get_context()
model_info_dir = join(root_dir, model_info_dir)
# set form recognizer credentials
form_credentials = {"key": run.get_secret("formkey"),
"endpoint": run.get_secret("formendpoint")}
log.info("Training Custom Form Recognizer Model...")
train_results = train_model(
form_credentials=form_credentials,
root_dir=root_dir,
source_directory=train_dir,
sas_uri=run.get_secret("formtrainsasuri")
)
# get model info and save results to JSON
model_info = train_results["modelInfo"]
if model_info is not None:
filename = "model.json"
write_model_info(model_info_dir, filename, model_info)
log.info("Model info saved to model.json")
else:
log.info("Model information empty. Skipping write model information.")
# Log metrics
if "trainResult" in train_results:
if "errors" in train_results["trainResult"] and len(train_results["trainResult"]["errors"]) > 0:
log.error("Error training the model")
log.error(f"train_results: \n{json.dumps(train_results, indent=4)}")
return
log.info("Logging metrics")
avg_accuracy = train_results["trainResult"]["averageModelAccuracy"]
run.parent.log(name="avg_accuracy", value=avg_accuracy)
metrics = train_results["trainResult"]["fields"]
for element in metrics:
target = element["fieldName"]
accuracy = element["accuracy"]
run.parent.log(name=f"{target}_accuracy", value=accuracy)
else:
log.error("Error, could not find any metrics to log") | 5,358,934 |
def load_from_pickle_file(filepath):
""" Loads a pickle file into a python variable """
with open(filepath, "rb") as f:
python_obj = pickle.load(f)
return python_obj | 5,358,935 |
def download_y(id:str, table):
"""
下载数据,主要放在线程里面跑,可以理解成每个线程单独跑一个
:param id: 股票代码
:param table: mongo里面的数据表
:return:
"""
date = datetime.datetime.now().strftime("%Y%m%d")
# date = "20200228"
ndate = datetime.datetime.now().strftime("%Y-%m-%d")
# ndate = "2020-02-28"
if id.startswith("0"):
nid = id+".SZ"
elif id.startswith("3"):
nid = id +".SZ"
else:
nid = id +".SH"
df = pro.daily(ts_code=nid, start_date=date, end_date=date)
# df = ts.get_hist_data(id, start=date, end=date)
y = df["pct_chg"].values
if y >= 0.00:
data = 1
else:
data = 0
dic = {"today_updown": data}
values = {"$set": dic}
myquery = {"code": id, "date": ndate}
table.update_many(myquery, values) | 5,358,936 |
def generate_inputs_pw(fixture_code, generate_structure, generate_kpoints_mesh, generate_upf_data):
"""Generate default inputs for a `PwCalculation."""
def _generate_inputs_pw():
"""Generate default inputs for a `PwCalculation."""
from aiida.orm import Dict
from aiida_quantumespresso.utils.resources import get_default_options
inputs = {
'code': fixture_code('quantumespresso.pw'),
'structure': generate_structure(),
'kpoints': generate_kpoints_mesh(2),
'parameters': Dict(dict={
'CONTROL': {
'calculation': 'scf'
},
'SYSTEM': {
'ecutrho': 240.0,
'ecutwfc': 30.0
}
}),
'pseudos': {
'Si': generate_upf_data('Si')
},
'metadata': {
'options': get_default_options()
}
}
return inputs
return _generate_inputs_pw | 5,358,937 |
def test_private_access_through_caller_object(enable_accessify):
"""
Case: access to private member, which do not follow naming conv., through member's class object in another class.
Expect: inaccessible due to its protection level error message.
"""
tesla = Tesla()
expected_error_message = INACCESSIBLE_DUE_TO_ITS_PROTECTION_LEVEL_EXCEPTION_MESSAGE.format(
class_name=CarWithPrivateEngine.__name__, class_method_name='start_engine',
)
with pytest.raises(InaccessibleDueToItsProtectionLevelException) as error:
tesla.run()
assert expected_error_message == error.value.message | 5,358,938 |
def connected_components(edge_index, num_nodes=None):
"""Find the connected components of a given graph.
Args:
edge_index (LongTensor): Edge coordinate matrix.
num_nodes (int, optional): Number of nodes. Defaults to None.
Returns:
LongTensor: Vector assigning each node to its component index.
"""
if num_nodes is None:
num_nodes = edge_index.max().item() + 1
device = edge_index.device
row, col = edge_index.cpu()
out = cc_cpu.connected_components(row, col, num_nodes)
return out.to(device) | 5,358,939 |
def assembly2graph(path=DATA_PATH):
"""Convert assemblies (assembly.json) to graph format"""
"""Return a list of NetworkX graphs"""
graphs = []
input_files = get_input_files(path)
for input_file in tqdm(input_files, desc="Generating Graphs"):
ag = AssemblyGraph(input_file)
graph = ag.get_graph_networkx()
graphs.append(graph)
return graphs, input_files | 5,358,940 |
def test_check_metadata_italic_style():
""" METADATA.pb font.style "italic" matches font internals ? """
from fontbakery.constants import MacStyle
from fontbakery.profiles.googlefonts import (com_google_fonts_check_metadata_italic_style as check,
family_metadata,
font_metadata)
# Our reference Merriweather Italic is known to good
fontfile = TEST_FILE("merriweather/Merriweather-Italic.ttf")
ttFont = TTFont(fontfile)
family_directory = os.path.dirname(fontfile)
family_meta = family_metadata(family_directory)
font_meta = font_metadata(family_meta, fontfile)
# So it must PASS:
print ("Test PASS with a good font...")
status, message = list(check(ttFont, font_meta))[-1]
assert status == PASS
# now let's introduce issues on the FULL_FONT_NAME entries
# to test the "bad-fullfont-name" codepath:
for i, name in enumerate(ttFont['name'].names):
if name.nameID == NameID.FULL_FONT_NAME:
backup = name.string
ttFont['name'].names[i].string = "BAD VALUE".encode(name.getEncoding())
print ("Test FAIL with a bad NameID.FULL_FONT_NAME entry...")
status, message = list(check(ttFont, font_meta))[-1]
assert status == FAIL and message.code == "bad-fullfont-name"
# and restore the good value:
ttFont['name'].names[i].string = backup
# And, finally, let's flip off that italic bit
# and get a "bad-macstyle" FAIL (so much fun!):
print ("Test FAIL with bad macstyle bit value...")
ttFont['head'].macStyle &= ~MacStyle.ITALIC
status, message = list(check(ttFont, font_meta))[-1]
assert status == FAIL and message.code == "bad-macstyle" | 5,358,941 |
def _get_default_backing(backing):
"""
_get_default_backing(backing)
Returns the prefered backing store
- if user provides a valid Backing object, use it
- if there is a default_backing object instantiated, use it
- if the user provided a configuration dict, use it to create
a new default_backing object
- otherwise, create a default_backing object using our defaults.
"""
# Probably they didn't mean to do this...
global default_backing, default_backing_config
if isinstance(backing, Backing):
return backing
if default_backing:
return default_backing
elif type(backing) is dict:
default_backing = Backing(**backing)
else:
# create a new default backing
default_backing = Backing(**default_backing_config)
return default_backing | 5,358,942 |
def slice(request, response, start, end=None):
"""Send a byte range of the response body
:param start: The starting offset. Follows python semantics including
negative numbers.
:param end: The ending offset, again with python semantics and None
(spelled "null" in a query string) to indicate the end of
the file.
"""
content = resolve_content(response)
response.content = content[start:end]
return response | 5,358,943 |
def pie(args):
"""Plots populational pie charts for desired groups"""
# get basename of file for writing outputs
name = [os.path.splitext(os.path.basename(args.file))[0]]
# read file into anndata obj
if args.verbose:
print("Reading {}".format(args.file), end="")
a = sc.read(args.file)
if args.verbose:
print(" - {} cells and {} genes".format(a.shape[0], a.shape[1]))
# generate cluster_pie plot
os.chdir(args.outdir) # set output directory for scanpy figures
_ = cluster_pie(a, pie_by=args.pieby, groupby=args.groupby)
if args.verbose:
print(
"Saving cluster pie charts to {}/{}_pie.png".format(
args.outdir, "_".join(name)
)
)
plt.savefig("{}/{}_pie.png".format(args.outdir, "_".join(name))) | 5,358,944 |
def get_ps(sdfits, scan, ifnum=0, intnum=None, plnum=0, fdnum=0, method='vector', avgf_min=256):
"""
Parameters
----------
sdfits :
scan : int
Scan number.
plnum : int
Polarization number.
method : {'vector', 'classic'}, optional
Method used to compute the source temperature.
If set to ``'vector'`` it will use Eq. (16) of
Winkel et al. (2012). If set to ``'classic'`` it
will use the same method as GBTIDL.
The default is ``'vector'``.
Returns
-------
"""
ps_scan = sdfits.get_scans(scan, ifnum=ifnum, intnum=intnum, plnum=plnum)
rows = ps_scan.table
obsmode = rows["OBSMODE"]
last_on = rows["LASTON"]
last_off = rows["LASTOFF"]
procnum = rows["PROCSEQN"]
source = np.unique(rows['OBJECT'])[0]
tcal = np.average(rows['TCAL'], axis=0)
procname, swstate, swtchsig = obsmode[0].split(':')
if procname not in ["OffOn", "OnOff"]:
warnings.warn(f"Selected scan is not OnOff or OffOn, it is: {procname}."
f"Cannot get Tcal from this scan.")
return None
scan_on, scan_off = utils.get_ps_scan_pair(scan, procnum, procname)
sou_on = sdfits.get_scans(scan_on, sig="T", cal="T", ifnum=ifnum, intnum=intnum, plnum=plnum)
sou_off = sdfits.get_scans(scan_on, sig="T", cal="F", ifnum=ifnum, intnum=intnum, plnum=plnum)
off_on = sdfits.get_scans(scan_off, sig="T", cal="T", ifnum=ifnum, intnum=intnum, plnum=plnum)
off_off = sdfits.get_scans(scan_off, sig="T", cal="F", ifnum=ifnum, intnum=intnum, plnum=plnum)
if method == 'vector':
sou_on.average()
sou_off.average()
off_on.average()
off_off.average()
off_freq = off_off.freq
sou_freq = sou_on.freq
nchan = off_on.data.shape[0]
facs = utils.factors(nchan)
avgf = np.min(facs[facs >= avgf_min])
kappa_off = get_kappa(off_on.data, off_off.data, avgf=avgf)
kappa_freq = off_freq.reshape(nchan//avgf, avgf).mean(axis=1)
# Interpolate back to high frequency resolution.
pt = np.argsort(kappa_freq)
pi = np.argsort(sou_freq)
kappa_interp = np.interp(sou_freq.to('Hz').value[pi], kappa_freq.to('Hz').value[pt], kappa_off)
# Compute the source temperature (Eq. (16) in Winkel et al. 2012).
tsou_on = (kappa_interp + 1.)*tcal*(sou_on.data - off_on.data)/off_on.data
tsou_off = kappa_interp*tcal*(sou_off.data - off_off.data)/off_off.data
# Average.
tsou = 0.5*(tsou_on + tsou_off)
elif method == 'gbtidl':
# Eqs. (1) and (2) from Braatz (2009, GBTIDL calibration guide)
# https://www.gb.nrao.edu/GBT/DA/gbtidl/gbtidl_calibration.pdf
tsys = gbtidl_tsys(off_on.data, off_off.data, tcal)
sig = 0.5*(sou_on.data + sou_off.data)
ref = 0.5*(off_on.data + off_off.data)
ta = gbtidl_sigref2ta(sig, ref, tsys)
tint_sou = 0.5*(sou_on.table["EXPOSURE"] + sou_off.table["EXPOSURE"])
tint_off = 0.5*(off_on.table["EXPOSURE"] + off_off.table["EXPOSURE"])
tint = 0.5*(tint_sou + tint_off)
dnu = np.mean(sou_on.table["CDELT1"])
tsou = np.average(ta, axis=0, weights=dnu*tint*np.power(tsys, -2.))
elif method == 'classic':
tsys = classic_tsys(off_on.data, off_off.data, tcal)
ta_on = (sou_on.data - off_on.data)/off_on.data*(tsys[:,np.newaxis] + tcal)
ta_off = (sou_off.data - off_off.data)/off_off.data*(tsys[:,np.newaxis])
tint_sou = 0.5*(sou_on.table["EXPOSURE"] + sou_off.table["EXPOSURE"])
tint_off = 0.5*(off_on.table["EXPOSURE"] + off_off.table["EXPOSURE"])
tint = 0.5*(tint_sou + tint_off)
dnu = np.mean(sou_on.table["CDELT1"])
ta_on = np.average(ta_on, axis=0, weights=dnu*tint_sou*np.power(tsys, -2.))
ta_off = np.average(ta_off, axis=0, weights=dnu*tint_off*np.power(tsys, -2.))
tsou = 0.5*(ta_on + ta_off)
return tsou | 5,358,945 |
def choiceprompt(variable: Variable) -> Binding:
"""Prompt to choose from several values for the given name."""
if not variable.choices:
raise ValueError("variable with empty choices")
choices = {str(number): value for number, value in enumerate(variable.choices, 1)}
lines = [
f"Select {variable.name}:",
*[f"{number} - {value}" for number, value in choices.items()],
"Choose from {}".format(", ".join(choices.keys())),
]
choice = click.prompt(
"\n".join(lines),
type=click.Choice(list(choices)),
default="1",
show_choices=False,
)
return bind(variable, choices[choice]) | 5,358,946 |
def test_edge_betweenness_centrality_k_full(
graph_file,
directed,
subset_size,
normalized,
weight,
subset_seed,
result_dtype,
use_k_full,
edgevals
):
"""Tests full edge betweenness centrality by using k = G.number_of_vertices()
instead of k=None, checks that k scales properly"""
sorted_df = calc_edge_betweenness_centrality(
graph_file,
directed=directed,
normalized=normalized,
k=subset_size,
weight=weight,
seed=subset_seed,
result_dtype=result_dtype,
use_k_full=use_k_full,
edgevals=edgevals
)
compare_scores(sorted_df, first_key="cu_bc", second_key="ref_bc") | 5,358,947 |
def GetCLInfo(review_host, change_id, auth_cookie='', include_messages=False,
include_detailed_accounts=False):
"""Get the info of the specified CL by querying the Gerrit API.
Args:
review_host: Base URL to the API endpoint.
change_id: Identity of the CL to query.
auth_cookie: Auth cookie if the API is not public.
include_messages: Whether to pull and return the CL messages.
include_detailed_accounts: Whether to pull and return the email of users
in CL messages.
Returns:
An instance of `CLInfo`. Optional fields might be `None`.
Raises:
GitUtilException if error occurs while querying the Gerrit API.
"""
url = f'{review_host}/changes/{change_id}'
params = []
if include_messages:
params.append(('o', 'MESSAGES'))
if include_detailed_accounts:
params.append(('o', 'DETAILED_ACCOUNTS'))
if params:
url = url + '?' + urllib.parse.urlencode(params)
pool_manager = PoolManager(ca_certs=certifi.where())
pool_manager.headers['Cookie'] = auth_cookie
pool_manager.headers['Content-Type'] = 'application/json'
pool_manager.headers['Connection'] = 'close'
try:
r = pool_manager.urlopen('GET', url)
except urllib3.exceptions.HTTPError:
raise GitUtilException(f'invalid url {url}')
if r.status != http.client.OK:
raise GitUtilException(f'request unsuccessfully with code {r.status}')
try:
# the response starts with a magic prefix line for preventing XSSI which
# should be stripped.
stripped_json = r.data.split(b'\n', 1)[1]
json_data = json_utils.LoadStr(stripped_json)
except Exception:
raise GitUtilException('Response format Error: %r' % (r.data, ))
def _ConvertGerritCLMessage(json_data):
return CLMessage(
json_data['message'],
json_data['author']['email'] if include_detailed_accounts else None)
try:
return CLInfo(json_data['change_id'], json_data['_number'],
_GERRIT_CL_STATUS_TO_CL_STATUS[json_data['status']],
[_ConvertGerritCLMessage(x) for x in json_data['messages']]
if include_messages else None)
except Exception as ex:
logging.debug('Unexpected Gerrit API response for CL info: %r', json_data)
raise GitUtilException('failed to parse the Gerrit API response') from ex | 5,358,948 |
def revoke_jti(jti):
"""Revoke the given jti"""
revoked_token = RevokedToken(jti=jti)
DB.session.add(revoked_token)
DB.session.commit() | 5,358,949 |
def rehash(file_path):
"""Return (hash, size) for a file with path file_path. The hash and size
are used by pip to verify the integrity of the contents of a wheel."""
with open(file_path, 'rb') as file:
contents = file.read()
hash = base64.urlsafe_b64encode(hashlib.sha256(contents).digest()).decode('latin1').rstrip('=')
size = len(contents)
return hash, size | 5,358,950 |
def set_token_auth():
"""Set authorisation header for JWT token using the Bearer schema."""
global AUTH
if not JWT_DISABLED:
api_jwt = get_token(AUTH_API_ENDP, URI_API_USER, URI_API_PASS)
AUTH = f'Bearer {api_jwt}' | 5,358,951 |
def _magpie_update_services_conflict(conflict_services, services_dict, request_cookies):
# type: (List[Str], ServicesSettings, AnyCookiesType) -> Dict[Str, int]
"""
Resolve conflicting services by name during registration by updating them only if pointing to different URL.
"""
magpie_url = get_magpie_url()
statuses = dict()
for svc_name in conflict_services:
statuses[svc_name] = 409
svc_url_new = services_dict[svc_name]["url"]
svc_url_db = "{magpie}/services/{svc}".format(magpie=magpie_url, svc=svc_name)
svc_resp = requests.get(svc_url_db, cookies=request_cookies)
svc_info = get_json(svc_resp).get(svc_name)
svc_url_old = svc_info["service_url"]
if svc_url_old != svc_url_new:
svc_info["service_url"] = svc_url_new
res_svc_put = requests.patch(svc_url_db, data=svc_info, cookies=request_cookies)
statuses[svc_name] = res_svc_put.status_code
print_log("[{url_old}] => [{url_new}] Service URL update ({svc}): {resp}"
.format(svc=svc_name, url_old=svc_url_old, url_new=svc_url_new, resp=res_svc_put.status_code),
logger=LOGGER)
return statuses | 5,358,952 |
def export_inference_model(
model: TinyImageNetModel, out_path: str, tmpdir: str
) -> None:
"""
export_inference_model uses TorchScript JIT to serialize the
TinyImageNetModel into a standalone file that can be used during inference.
TorchServe can also handle interpreted models with just the model.py file if
your model can't be JITed.
"""
print("exporting inference model")
jit_path = os.path.join(tmpdir, "model_jit.pt")
jitted = torch.jit.script(model)
print(f"saving JIT model to {jit_path}")
torch.jit.save(jitted, jit_path)
model_name = "tiny_image_net"
mar_path = os.path.join(tmpdir, f"{model_name}.mar")
print(f"creating model archive at {mar_path}")
subprocess.run(
[
"torch-model-archiver",
"--model-name",
"tiny_image_net",
"--handler",
"torchx/examples/apps/lightning_classy_vision/handler/handler.py",
"--version",
"1",
"--serialized-file",
jit_path,
"--export-path",
tmpdir,
],
check=True,
)
remote_path = os.path.join(out_path, "model.mar")
print(f"uploading to {remote_path}")
fs, _, rpaths = fsspec.get_fs_token_paths(remote_path)
assert len(rpaths) == 1, "must have single path"
fs.put(mar_path, rpaths[0]) | 5,358,953 |
def configure_app(app, config):
"""read configuration"""
app.config.from_object(DefaultConfig())
if config is not None:
app.config.from_object(config)
app.config.from_envvar('APP_CONFIG', silent=True) | 5,358,954 |
def rtc_runner(rtc):
"""Resolved tool contract runner."""
return run_main(polish_chunks_pickle_file=rtc.task.input_files[0],
sentinel_file=rtc.task.input_files[1],
subreads_file=rtc.task.input_files[2],
output_json_file=rtc.task.output_files[0],
max_nchunks=rtc.task.max_nchunks) | 5,358,955 |
def dict_to_json_str(o: Any) -> str:
"""
Converts a python object into json.
"""
json_str = json.dumps(o, cls=EnhancedJSONEncoder, sort_keys=True)
return json_str | 5,358,956 |
def paramid_to_paramname(paramid):
"""Turn a parameter id number into a parameter name"""
try:
return param_info[paramid]['n']
except KeyError:
return "UNKNOWN_%s" % str(hex(paramid)) | 5,358,957 |
def write_left_aligned_text(text: str,
parent_surface,
font_size: int=FONT_SIZE,
font_color: Color=FONT_COLOR) -> None:
"""Draw the given text at the left border of the parent surface."""
font = pygame.font.Font(pygame.font.match_font(FONT_NAME), font_size)
text_surface = font.render(text, True, font_color)
text_pos = text_surface.get_rect(center=(
0.5 * parent_surface.get_width(), 0.5 * parent_surface.get_height()))
text_pos.left = 0 # Align text with left border
parent_surface.blit(text_surface, text_pos) | 5,358,958 |
def max_dist_comp(G, cc0, cc1):
""" Maximum distance between components
Parameters
----------
G : nx.graph
Graph
cc0 : list
Component 0
cc1 : list
Compoennt 1
Returns
-------
threshold : float
Maximum distance
"""
# Assertions
assert isinstance(G, nx.Graph), "G is not a NetworkX graph"
# Calculation
threshold = 0
for n0 in cc0:
for n1 in cc1:
distance = metrics.distance_between_nodes(G, n0, n1)
if distance > threshold:
threshold = distance
return threshold | 5,358,959 |
def convert_examples_to_features(
examples,
label_list,
max_seq_length,
tokenizer,
cls_token_at_end=False,
cls_token="[CLS]",
cls_token_segment_id=1,
sep_token="[SEP]",
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
pad_token_label_id=-100,
sequence_a_segment_id=0,
mask_padding_with_zero=True,
):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d", ex_index, len(examples))
tokens = []
label_ids = []
for word, label in zip(example.words, example.labels):
word_tokens = tokenizer.tokenize(word)
tokens.extend(word_tokens)
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(word_tokens) - 1))
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
special_tokens_count = 3 if sep_token_extra else 2
if len(tokens) > max_seq_length - special_tokens_count:
tokens = tokens[: (max_seq_length - special_tokens_count)]
label_ids = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
segment_ids = [sequence_a_segment_id] * len(tokens)
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
label_ids = [pad_token_label_id] + label_ids
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
label_ids = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s", example.guid)
logger.info("tokens: %s", " ".join([str(x) for x in tokens]))
logger.info("input_ids: %s", " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s", " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s", " ".join([str(x) for x in segment_ids]))
logger.info("label_ids: %s", " ".join([str(x) for x in label_ids]))
features.append(
InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_ids=label_ids)
)
return features | 5,358,960 |
def upload(images, host):
"""Upload an image file or rehost an image URL."""
uploaded_urls = []
for img_url, del_url in upload_images(images, host):
uploaded_urls.append(img_url)
click.echo(f'{img_url}', nl=False)
click.echo(f' | Delete: {del_url}' if del_url else '')
if conf.copy_to_clipboard:
pyperclip.copy(' '.join(uploaded_urls)) | 5,358,961 |
def sync_instrument(target_session, instruments):
"""
Inserts / updates all supplied instruments into the target database.
Parameters
----------
target_session : sqlalchemy session
The sqlalchemy session into which we will insert instruments.
instrument : list
The list of instruments that we wish to insert / update in the target
database.
returns:
None
"""
for i in instruments:
target_session.merge(i)
target_session.commit() | 5,358,962 |
def exp(var):
"""
Returns variable representing exp applied to the input variable var
"""
result = Var(np.exp(var.val))
result.parents[var] = var.children[result] = np.exp(var.val)
return result | 5,358,963 |
def format_now(frmt):
""" Formats the current time according to the frmt string """
print(datetime.datetime.now().strftime(frmt))
return | 5,358,964 |
def invalidate_cache(
key: str = None,
keys: List = [],
obj: Any = None,
obj_attr: str = None,
namespace: str = None,
):
"""Invalidates a specific cache key"""
if not namespace:
namespace = HTTPCache.namespace
if key:
keys = [key]
def wrapper(func: Callable):
@wraps(func)
async def inner(*args, **kwargs):
try:
# extracts the `id` attribute from the `obj_attr` giparameter passed to the `@cache` method
_obj = kwargs.get(f"{obj}", None)
_keys = await HTTPKeys.generate_keys(
keys=keys, config=HTTPCache, obj=_obj, obj_attr=obj_attr
)
_cache = HTTPCacheBackend(
redis=HTTPCache.redis_client, namespace=namespace
)
await _cache.invalidate_all(keys=_keys)
_computed_response = await func(*args, **kwargs)
return _computed_response
except Exception as e:
log_error(msg=f"Cache Error: {e}", e=e, method="cache")
return await func(*args, **kwargs)
return inner
return wrapper | 5,358,965 |
def read_dataset(filename):
"""Reads in the TD events contained in the N-MNIST/N-CALTECH101 dataset file specified by 'filename'"""
# NMIST: 34×34 pixels big
f = open(filename, 'rb')
raw_data = np.fromfile(f, dtype=np.uint8)
f.close()
raw_data = np.uint32(raw_data)
all_y = raw_data[1::5]
all_x = raw_data[0::5]
all_p = (raw_data[2::5] & 128) >> 7 #bit 7
all_ts = ((raw_data[2::5] & 127) << 16) | (raw_data[3::5] << 8) | (raw_data[4::5])
#Process time stamp overflow events
time_increment = 2 ** 13
overflow_indices = np.where(all_y == 240)[0]
for overflow_index in overflow_indices:
all_ts[overflow_index:] += time_increment
#Everything else is a proper td spike
td_indices = np.where(all_y != 240)[0]
events = np.stack([all_x[td_indices], all_y[td_indices], all_ts[td_indices], all_p[td_indices]], axis=1).astype(np.float32)
# events[:,3] = 2*events[:,3]-1
return events | 5,358,966 |
def prge_annotation():
"""Returns an annotation with protein/gene entities (PRGE) identified.
"""
annotation = {"ents": [{"text": "p53", "label": "PRGE", "start": 0, "end": 0},
{"text": "MK2", "label": "PRGE", "start": 0, "end": 0}],
"text": "p53 and MK2",
"title": ""}
return annotation | 5,358,967 |
def inner_points_mask(points):
"""Mask array into `points` where ``points[msk]`` are all "inner" points,
i.e. `points` with one level of edge points removed. For 1D, this is simply
points[1:-1,:] (assuming ordered points). For ND, we calculate and remove
the convex hull.
Parameters
----------
points : nd array (npoints, ndim)
Returns
-------
msk : (npoints, ndim)
Bool array.
"""
msk = np.ones((points.shape[0],), dtype=bool)
if points.shape[1] == 1:
assert (np.diff(points[:,0]) >= 0.0).all(), ("points not monotonic")
msk[0] = False
msk[-1] = False
else:
from scipy.spatial import Delaunay
tri = Delaunay(points)
edge_idx = np.unique(tri.convex_hull)
msk.put(edge_idx, False)
return msk | 5,358,968 |
def array2list(X_train: Union[np.ndarray, torch.Tensor],
y_train: Union[np.ndarray, torch.Tensor],
X_test: Union[np.ndarray, torch.Tensor],
y_test: Union[np.ndarray, torch.Tensor],
batch_size: int, memory_alloc: float = 4
) -> Union[Tuple[List[np.ndarray]], Tuple[List[torch.Tensor]]]:
"""
Splits train and test numpy arrays or torch tensors into lists of
arrays/tensors of a specified size. The remainders are not included.
"""
all_data = [X_train, y_train, X_test, y_test]
arrsize = sum([get_array_memsize(x) for x in all_data])
store_on_cpu = (arrsize / 1e9) > memory_alloc
X_train = array2list_(X_train, batch_size, store_on_cpu)
y_train = array2list_(y_train, batch_size, store_on_cpu)
X_test = array2list_(X_test, batch_size, store_on_cpu)
y_test = array2list_(y_test, batch_size, store_on_cpu)
return X_train, y_train, X_test, y_test | 5,358,969 |
def livecoding_redirect_view(request):
"""
livecoding oath2 fetch access token after permission dialog
"""
code = request.GET.get('code')
if code is None:
return HttpResponse("code param is empty/not found")
try:
url = "https://www.livecoding.tv/o/token/"
data = dict(code=code, grant_type='authorization_code', redirect_uri=LIVECODING_REDIRECT_URI,
client_id=LIVECODING_KEY, client_secret=LIVECODING_SECRET)
response = requests.post(url, data=data)
except urllib2.URLError as e:
print(e)
return HttpResponse("Failed to make POST request to fetch token")
res = json.loads(response.content)
print res
access_token = res['access_token']
print(access_token)
user = User.objects.get(username='admin')
print user
a, created = AccessToken.objects.get_or_create(user=user)
print a, created
a.access_token = access_token
a.save()
print(a)
redirect = request.GET.get('redirect')
if redirect is None:
return HttpResponse(response.content)
else:
return HttpResponseRedirect(redirect) | 5,358,970 |
def test_nonKeywordAfterKeywordSyntaxError():
"""Source which has a non-keyword argument after a keyword argument
should include the line number of the syntax error
However these exceptions do not include an offset
"""
source = """\
foo(bar=baz, bax)
"""
sourcePath = make_temp_file(source)
last_line = ' ^\n' if sys.version_info >= (3, 2) else ''
column = '12:' if sys.version_info >= (3, 2) else ''
assert_contains_errors(
sourcePath,
["""\
%s:1:%s non-keyword arg after keyword arg
foo(bar=baz, bax)
%s""" % (sourcePath, column, last_line)]) | 5,358,971 |
def test_list_time_min_length_1_nistxml_sv_iv_list_time_min_length_2_5(mode, save_output, output_format):
"""
Type list/time is restricted by facet minLength with value 6.
"""
assert_bindings(
schema="nistData/list/time/Schema+Instance/NISTSchema-SV-IV-list-time-minLength-2.xsd",
instance="nistData/list/time/Schema+Instance/NISTXML-SV-IV-list-time-minLength-2-5.xml",
class_name="NistschemaSvIvListTimeMinLength2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 5,358,972 |
def sort_dict(value):
"""Sort a dictionary."""
return OrderedDict((key, value[key]) for key in sorted(value)) | 5,358,973 |
def withdraw_worker():
"""
Checks every address in database for withdrawals and executes them.
Afterward burns assets
"""
while True:
try:
data = json.dumps({"password":publicserverpassword})
r = post(url + "get/withdrawdata", data).json()
address_data = r["data"]
for pair in address_data:
raddress = pair[1]
vaddress = pair[0]
value_list = versum.getaddressbalances(vaddress)
for asst in value_list:
if asst["name"] == "BTC":
value = float(asst["qty"])
if value > 0:
data = json.dumps({"vaddress": vaddress,\
"password":publicserverpassword})
r = post(url + "del/withdrawdata", data).text
if r == "Success":
print btc.sendtoaddress(raddress, (value*0.995)-0.001)
versum.sendassetfrom(vaddress, \
"1XXXXXXXKhXXXXXXTzXXXXXXY6XXXXXXX5UtyF",\
"BTC", value)
except:
pass
time.sleep(60) | 5,358,974 |
def hyb_stor_capacity_rule(mod, prj, prd):
"""
Power capacity of a hybrid project's storage component.
"""
return 0 | 5,358,975 |
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
ClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method's model.
"""
pass | 5,358,976 |
def gilr_layer_cpu(X, hidden_size, nonlin=tf.nn.elu,
name='gilr'):
"""
g_t = sigmoid(Ux_t + b)
h_t = g_t h_{t-1} + (1-g_t) f(Vx_t + c)
"""
with vscope(name):
n_dims = X.get_shape()[-1].value
act = fc_layer(X, 2 * hidden_size, nonlin=tf.identity)
gate, impulse = tf.split(act, 2, len(act.shape) - 1)
gate = tf.sigmoid(gate)
impulse = nonlin(impulse)
return s_linear_recurrence_cpu(gate, (1-gate) * impulse) | 5,358,977 |
def append_freecad_path():
"""Append the FreeCAD path."""
global path_to_freecad
if os.path.exists(path_to_freecad):
if os.path.isfile(path_to_freecad):
path_to_freecad = os.path.dirname(path_to_freecad)
print("Configured FreeCAD path:", path_to_freecad)
if path_to_freecad not in sys.path:
sys.path.append(path_to_freecad)
else:
print("FreeCAD path is not correct.") | 5,358,978 |
async def retry(f: Callable[..., Awaitable[A]], schedule: Schedule[Exception, Tuple[OpinionT, float]]):
"""
Run an awaitable computation,
retrying on failures according to a schedule.
"""
while True:
try:
result = await f()
except Exception as ex:
try:
opinion, delay = schedule.update(ex)
except ScheduleConcluded:
raise ex from None
else:
await asyncio.sleep(delay)
# TODO: do something with opinion
yield (ex, opinion)
else:
return result | 5,358,979 |
def save_layer_mask_debug(gia, out_file, bitmap_data, img_rect):
""" Similar to save_rect_mask(), but for an entire layer. Since layer-mask might be a rectangle smaller
than the full image, expand to full size. Also for debugging (in case the function names aren't a clue...)
"""
pic_width = gia['width']
pic_height = gia['height']
bitmap_bytes = string_to_bytes(bitmap_data)
img_back = Image.new('L', (pic_width, pic_height))
img_mask = Image.frombytes('L', (img_rect.width, img_rect.height), bitmap_bytes)
img_back.paste(img_mask, (img_rect.tl_x, img_rect.tl_y))
img_back.save(out_file, 'bmp')
img_back.close()
img_mask.close() | 5,358,980 |
def dual_solve_u(v, s, alpha, eps, verbose=False, n_iters=100, gtol=0):
"""
min_{u>=0} max_pi L(pi, u, v)
= E_xy [ u(x)alpha(x) + v(y)beta(y) + Softplus(1/eps)(s-u-v) ],
where u = min{u>=0 : E_y[pi(x,y)] <= alpha(x)}
find exact u s.t. E_y[pi(x,y)] == alpha(x)
"""
alpha = torch.as_tensor(alpha, device=s.device).clip(0, 1)
eps = torch.as_tensor(eps, device=s.device)
z = alpha.log() - (1 - alpha).log()
if alpha.amax() <= 0 or alpha.amin() >= 1: # z = +-infinity
u = -z * torch.ones_like(s[:, 0])
return u, 0
v_inp = torch.as_tensor(v, device=s.device).reshape((1, -1))
if 'CVX_STABLE' in os.environ and int(os.environ['CVX_STABLE']):
v = v_inp
else:
s = s_u_v(s, None, v)
v = None
u_min = s_u_v(s, None, v).amin(1) - z * eps - 1e-3
u_max = s_u_v(s, None, v).amax(1) - z * eps + 1e-3
u_guess = [ # avoids large negative prior_score when s>=0 if most valid cases
torch.zeros_like(u_min) + (0 - v_inp).amin() - z * eps - 1e-3,
]
# u_guess.extend(
# s_u_v(s, None, v).topk(
# (alpha * s.shape[1] + 1).clip(None, s.shape[1]).int()
# ).values[:, -3:].T
# )
assert (grad_u(u_min, v, s, alpha, eps) <= 0).all()
assert (grad_u(u_max, v, s, alpha, eps) >= 0).all()
for i in range(n_iters):
if i < len(u_guess):
u = u_guess[i]
else:
u = (u_min + u_max) / 2
g = grad_u(u, v, s, alpha, eps)
assert not u.isnan().any()
if g.abs().max() < gtol:
break
u_min = torch.where(g < 0, u, u_min)
u_max = torch.where(g > 0, u, u_max)
return u, (i + 1) | 5,358,981 |
def activation(formula=None, instrument=None,
flux=None, cdratio=0, fastratio=0,
mass=None, exposure=24, getdata=False):
"""Calculate sample activation using the FRM II activation web services.
``formula``:
the chemical formula, see below for possible formats
The *flux* can be specified either by:
``instrument``:
the instrument name to select flux data
or:
``flux``:
The thermal flux (for cold instruments use the equivalent
thermal flux)
``cdratio``:
The ratio between full flux and flux with 1mm Cd in the beam,
0 to deactivate
``fastratio``:
Thermal/fast neutron ratio, 0 to deactivate
``mass``:
the sample mass in g
``exposure``:
exposure time in h, default 24h
``getdata``:
In addition to printing the result table,
return a dict with the full results for further
processing
**Formula input format**
Formula:
``CaCO3``
Formula with fragments:
``CaCO3+6H2O``
Formula with parentheses:
``HO ((CH2)2O)6 H``
Formula with isotope:
``CaCO[18]3+6H2O``
Counts can be integer or decimal:
``CaCO3+(3HO1.5)2``
Mass fractions use %wt, with the final portion adding to 100%:
``10%wt Fe // 15% Co // Ni``
Volume fractions use %vol, with the final portion adding to 100%:
``10%vol [email protected] // [email protected]``
For volume fractions you have to specify the density using
``@<density>``!
Mixtures can nest. The following is a 10% salt solution by weight \
mixed 20:80 by volume with D2O:
``20%vol (10%wt [email protected] // H2O@1) // D2O@1``
"""
if formula is None:
try:
# preparation for a future enhanced sample class
formula = session.experiment.sample.formula
except (ConfigurationError, AttributeError):
# ConfigurationError is raised if no experiment is in session
pass
if formula is None:
raise UsageError('Please give a formula')
if flux:
instrument = 'Manual'
if instrument is None:
try:
instrument = session.instrument.instrument or None
except ConfigurationError:
pass
if instrument is None:
raise UsageError('Please specifiy an instrument or flux')
if mass is None:
try:
formula = session.experiment.sample.mass
except (ConfigurationError, AttributeError):
pass
if mass is None:
raise UsageError('Please specify the sample mass')
qs = '?json=1&formula=%(formula)s&instrument=%(instrument)s&mass=%(mass)g' \
% locals()
if flux:
qs += '&fluence=%(flux)f&cdratio=%(cdratio)f&fastratio=%(fastratio)f' \
% locals()
qs = ACTIVATIONURL + qs
try:
with urllib.request.urlopen(qs) as response:
data = json.load(response)
except urllib.error.HTTPError as e:
session.log.warning('Error opening: %s', qs)
session.log.warning(e)
return None
if data['ecode'] == 'unknown instrument' and flux is None:
session.log.warning('Instrument %s unknown to calculator, '
'specify flux manually', instrument)
session.log.info('Known instruments')
printTable(['instrument'], [(d, ) for d in data['instruments']],
session.log.info)
if data['result']['activation']:
h = data['result']['activation']['headers']
th = [h['isotope'], h['daughter'], h['reaction'], h['Thalf_str']]
for ha in h['activities']:
th.append(ha)
rows = []
for r in data['result']['activation']['rows']:
rd = [r['isotope'], r['daughter'], r['reaction'], r['Thalf_str']]
for a in r['activities']:
rd.append('%.3g' % a if a > 1e-6 else '<1e-6')
rows.append(rd)
dr = ['', '', '', 'Dose (uSv/h)']
for d in data['result']['activation']['doses']:
dr.append('%.3g' % d)
rows.append(dr)
printTable(th, rows, session.log.info)
else:
session.log.info('No activation')
if getdata:
return data
return | 5,358,982 |
def displayToolTips():
"""force display tool tips in maya as these are turned off by default"""
cmds.help(popupMode=True) | 5,358,983 |
def set_dashboard_conf(config):
"""
Write to configuration
@param config: Input configuration
"""
with open(DASHBOARD_CONF, "w", encoding='utf-8') as conf_object:
config.write(conf_object) | 5,358,984 |
def wrap_parfor_blocks(parfor, entry_label = None):
"""wrap parfor blocks for analysis/optimization like CFG"""
blocks = parfor.loop_body.copy() # shallow copy is enough
if entry_label == None:
entry_label = min(blocks.keys())
assert entry_label > 0 # we are using 0 for init block here
# add dummy jump in init_block for CFG to work
blocks[0] = parfor.init_block
blocks[0].body.append(ir.Jump(entry_label, blocks[0].loc))
for block in blocks.values():
if len(block.body) == 0 or (not block.body[-1].is_terminator):
block.body.append(ir.Jump(entry_label, block.loc))
return blocks | 5,358,985 |
def crawl_lyrics(art_id):
"""抓取一整个歌手的所有歌词"""
html = get_html(start_url.format(art_id)) # 先抓该歌手的专辑列表
soup = BeautifulSoup(html, 'lxml')
artist = soup.find('h2', id='artist-name').text.strip().replace(' ', '_')
artist_dir = 'data/' + artist
if not os.path.exists(artist_dir): # 歌手目录
os.mkdir(artist_dir)
print("歌手名:", artist)
albums = soup.find('ul', class_='m-cvrlst').find_all('a', class_='msk') # 专辑列表
for album in albums:
html = get_html(base_url + album.get('href')) # 再抓取该专辑下歌曲列表
soup = BeautifulSoup(html, 'lxml')
album_title = soup.find('h2', class_='f-ff2').text.strip().replace(' ', '_').replace('/', '_') # '/'会影响目录
album_dir = os.path.join(artist_dir, album_title)
if not os.path.exists(album_dir): # 专辑目录
os.mkdir(album_dir)
print(" " + artist + "---" + album_title)
links = soup.find('ul', class_='f-hide').find_all('a') # 歌曲列表
for link in links:
song_name = link.text.strip().replace(' ', '_').replace('/', '_')
song_id = link.get('href').split('=')[1]
html = get_html(song_url.format(song_id)) # 抓取歌词
try: # 存在无歌词的歌曲,直接忽略
lyric_json = json.loads(html)
lyric_text = lyric_json['lrc']['lyric']
open(os.path.join(album_dir, song_name + '.txt'), 'w', encoding='utf-8').write(lyric_text)
print(" " + song_name + ", URL: " + song_url.format(song_id))
except:
print(" " + song_name + ": 无歌词, URL: " + song_url.format(song_id))
print() | 5,358,986 |
def frequency_of_occurrence(words, specific_words=None):
"""
Returns a list of (instance, count) sorted in total order and then from most to least common
Along with the count/frequency of each of those words as a tuple
If specific_words list is present then SUM of frequencies of specific_words is returned
"""
freq = sorted(sorted(Counter(words).items(), key=itemgetter(0)), key=itemgetter(1), reverse=True)
if not specific_words or specific_words==None:
return freq
else:
frequencies = 0
for (inst, count) in freq:
if inst in specific_words:
frequencies += count
return float(frequencies) | 5,358,987 |
def cache_remove_all(
connection: 'Connection', cache: Union[str, int], binary=False,
query_id=None,
) -> 'APIResult':
"""
Removes all entries from cache, notifying listeners and cache writers.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status on success,
non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_REMOVE_ALL,
[
('hash_code', Int),
('flag', Byte),
],
query_id=query_id,
)
return query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
},
) | 5,358,988 |
def transform_bundle(bundle_uuid, bundle_version, bundle_path, bundle_manifest_path, extractor=None):
"""
This function is used with the ETL interface in dcplib.etl.DSSExtractor.extract.
Given a bundle ID and directory containing its medatata JSON files, it produces an intermediate representation
of the bundle and its files ready to be inserted into the database by BundleLoader.
"""
result = dict(uuid=bundle_uuid,
version=bundle_version,
manifest=json.load(open(bundle_manifest_path)),
aggregate_metadata={},
files=OrderedDict())
# Load and process all the metadata files; construct the "aggregate_metadata" doc:
# - Singleton metadata files get inserted under their name minus the extension (project.json => {"project": {...}})
# - Numbered metadata files are put in an array (assay_0.json, assay_1.json => {"assay": [{...0...}, {...1...}]})
bundle_fetched_files = sorted(os.listdir(bundle_path)) if os.path.exists(bundle_path) else []
for f in bundle_fetched_files:
if re.match(r"(.+)_(\d+).json", f):
metadata_key, index = re.match(r"(.+)_(\d+).json", f).groups()
elif re.match(r"(.+).json", f):
metadata_key, index = re.match(r"(.+).json", f).group(1), None
else:
metadata_key, index = f, None
with open(os.path.join(bundle_path, f)) as fh:
file_doc = json.load(fh)
if index is None:
result["aggregate_metadata"][metadata_key] = file_doc
else:
result["aggregate_metadata"].setdefault(metadata_key, [])
result["aggregate_metadata"][metadata_key].append(file_doc)
for fm in result["manifest"]["files"]:
if f == fm["name"] and "schema_type" in file_doc:
result["files"][fm["name"]] = dict(fm,
body=file_doc,
schema_type=file_doc['schema_type'])
# For all other (non-metadata) files from the bundle manifest, insert them with a default body
# indicating an empty schema type.
for fm in result["manifest"]["files"]:
if fm["name"] not in result["files"]:
result["files"][fm["name"]] = dict(fm,
body=None,
schema_type=None)
# Flatten the file list while preserving order.
result["files"] = list(result["files"].values())
return result | 5,358,989 |
def visualize(o, r, p):
"""visualize the camera trajction by picture
Args:
o ([array]): [f,2]
r ([array]): [f]
p ([array]): [f,2]
"""
import matplotlib.pyplot as plt
i = 5
o = o[::i]
r = r[::i]
plt.plot(p[:,0],p[:,1])
plt.plot(o[:,0],o[:,1])
plt.show() | 5,358,990 |
def choices_function() -> List[str]:
"""Choices functions are useful when the choice list is dynamically generated (e.g. from data in a database)"""
return ['a', 'dynamic', 'list', 'goes', 'here'] | 5,358,991 |
def validate_resource_identifier(rid):
"""Check that the input is a valid resource identifier."""
raise ValidationError | 5,358,992 |
def GetInfraPythonPath(hermetic=True, master_dir=None):
"""Returns (PythonPath): The full working Chrome Infra utility path.
This path is consistent for master, slave, and tool usage. It includes (in
this order):
- Any environment PYTHONPATH overrides.
- If 'master_dir' is supplied, the master's python path component.
- The Chrome Infra build path.
- The system python path.
Args:
hermetic (bool): True, prune any non-system path from the system path.
master_dir (str): If not None, include a master path component.
"""
path = PythonPath()
if master_dir:
path += GetMasterPythonPath(master_dir)
path += GetBuildPythonPath()
path += GetSysPythonPath(hermetic=hermetic)
return path | 5,358,993 |
def dl(outdir: Path = Path("data"), version: str = "v1.0"):
"""Checks that the segments in the given batch are valid."""
metadata_dir = f"https://dl.fbaipublicfiles.com/laser/CCMatrix/{version}"
file_list = [l.strip() for l in open_remote_file(metadata_dir + "/list.txt")]
outdir.mkdir(exist_ok=True)
outdir = outdir / version
outdir.mkdir(exist_ok=True)
for file in file_list:
dl_file(metadata_dir, file, outdir) | 5,358,994 |
def nodeid():
"""nodeid() -> UUID
Generate a new node id
>>> nodeid()
UUID('...')
:returns: node id
:rtype: :class:`uuid.UUID`
"""
return uuid.uuid4() | 5,358,995 |
def stackedensemble_validation_frame_test():
"""This test checks the following:
1) That passing in a validation_frame to h2o.stackedEnsemble does something (validation metrics exist).
2) It should hopefully produce a better model (in the metalearning step).
"""
# Import training set
df = h2o.import_file(path=pyunit_utils.locate("smalldata/higgs/higgs_train_5k.csv"),
destination_frame="higgs_train_5k")
test = h2o.import_file(path=pyunit_utils.locate("smalldata/higgs/higgs_test_5k.csv"),
destination_frame="higgs_test_5k")
# Identify predictors and response
x = df.columns
y = "response"
x.remove(y)
# Convert response to a factor
df[y] = df[y].asfactor()
test[y] = test[y].asfactor()
# Split off a validation_frame
ss = df.split_frame(seed = 1)
train = ss[0]
valid = ss[1]
# Set number of folds
nfolds = 5
# Train and cross-validate a GBM
my_gbm = H2OGradientBoostingEstimator(distribution="bernoulli",
ntrees=10,
nfolds=nfolds,
fold_assignment="Modulo",
keep_cross_validation_predictions=True,
seed=1)
my_gbm.train(x=x, y=y, training_frame=train)
# Train and cross-validate a RF
my_rf = H2ORandomForestEstimator(ntrees=10,
nfolds=nfolds,
fold_assignment="Modulo",
keep_cross_validation_predictions=True,
seed=1)
my_rf.train(x=x, y=y, training_frame=train)
# Train a stacked ensemble & check that validation metrics are missing
stack1 = H2OStackedEnsembleEstimator(base_models=[my_gbm.model_id, my_rf.model_id])
stack1.train(x=x, y=y, training_frame=train)
assert(stack1.model_performance(valid=True) is None)
# Train a stacked ensemble with a validation_frame & check that validation metrics exist & are correct type
stack2 = H2OStackedEnsembleEstimator(base_models=[my_gbm.model_id, my_rf.model_id])
stack2.train(x=x, y=y, training_frame=train, validation_frame=valid)
assert(type(stack2.model_performance(valid=True)) == h2o.model.metrics_base.H2OBinomialModelMetrics)
assert(type(stack2.auc(valid=True)) == float)
# Compare test AUC (ensemble with validation_frame should not be worse)
perf1 = stack1.model_performance(test_data=test)
perf2 = stack2.model_performance(test_data=test)
assert perf2.auc() >= perf1.auc() | 5,358,996 |
def test_config_groups(api):
""" Verify the ``config_groups`` method call """
PROJECT_ID = 15
api._session.request.return_value = [CG1, CG2, CG3]
cg_list = list(api.config_groups(PROJECT_ID))
exp_call = mock.call(method=GET, path=AP['get_configs'].format(project_id=PROJECT_ID))
assert all(map(lambda c: isinstance(c, dict), cg_list))
assert len(cg_list) == 3
assert api._session.request.call_args == exp_call | 5,358,997 |
def drop_redundant_cols(movies_df):
"""
Drop the following redundant columns:
1. `release_data_wiki` - after dropping the outlier
2. `revenue` - after using it to fill `box_office` missing values
3. `budget_kaggle` - after using it to fill `budget_wiki` missing values
4. `duration` - after using it to fill `runtime` missing values
Parameters
----------
movies_df : Pandas dataframe
Joined movie data
Returns
-------
Pandas dataframe
Movie data with redundant columns dropped
"""
# Drop record with `release_date` outlier and `release_date_wiki` column
outlier_index = movies_df.loc[(movies_df['release_date_wiki'] > '2000') &
(movies_df['release_date_kaggle'] < '1960')].index
movies_df.drop(outlier_index, inplace=True)
movies_df.drop('release_date_wiki', axis=1, inplace=True)
# Pairs of redundant columns
redundant_pairs = [
['box_office', 'revenue'],
['budget_wiki', 'budget_kaggle'],
['runtime', 'duration']
]
# Fill the first column and drop the second column for each pair
for a, b in redundant_pairs:
movies_df = filla_dropb(a, b, movies_df)
return movies_df | 5,358,998 |
def print_default_settings():
"""
Print ``default_settings.py``.
"""
path = join(dirname(default_settings.__file__), 'default_settings.py')
print(open(path).read()) | 5,358,999 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.