content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def wait(timeout):
"""
Just wait during the timeout passed as argument
Can be use on test execution
@param timeout: in second
@type timeout: float
"""
try:
timeout = float(timeout)
except Exception:
raise TestWaitException("ERR_TE_002: wait initialization failed, "
"wrong type: %s" % str(timeout))
TLX.instance().log_testsuite_info(message='waiting for %s sec ...' %
str(timeout),
component=TS,
fromlevel=LEVEL_TE,
tolevel=LEVEL_USER)
time.sleep(timeout) | 5,358,700 |
def update_minor_ver_in_trunk(ver, revnum):
"""Change the minor version in trunk to the next (future) minor version.
"""
trunk_wc = get_trunk_wc_path()
trunk_url = get_trunk_url()
svn_checkout(trunk_url + '@' + (str(revnum) if revnum else ''),
trunk_wc)
prev_ver = Version('1.%d.0' % (ver.minor - 1,))
next_ver = Version('1.%d.0' % (ver.minor + 1,))
relpaths = []
relpath = 'subversion/include/svn_version.h'
relpaths.append(relpath)
edit_file(get_trunk_wc_path(relpath),
r'(#define SVN_VER_MINOR *)%s' % (ver.minor,),
r'\g<1>%s' % (next_ver.minor,))
relpath = 'subversion/tests/cmdline/svntest/main.py'
relpaths.append(relpath)
edit_file(get_trunk_wc_path(relpath),
r'(SVN_VER_MINOR = )%s' % (ver.minor,),
r'\g<1>%s' % (next_ver.minor,))
relpath = 'subversion/bindings/javahl/src/org/apache/subversion/javahl/NativeResources.java'
relpaths.append(relpath)
try:
# since r1817921 (just after branching 1.10)
edit_file(get_trunk_wc_path(relpath),
r'SVN_VER_MINOR = %s;' % (ver.minor,),
r'SVN_VER_MINOR = %s;' % (next_ver.minor,))
except:
# before r1817921: two separate places
edit_file(get_trunk_wc_path(relpath),
r'version.isAtLeast\(1, %s, 0\)' % (ver.minor,),
r'version.isAtLeast\(1, %s, 0\)' % (next_ver.minor,))
edit_file(get_trunk_wc_path(relpath),
r'1.%s.0, but' % (ver.minor,),
r'1.%s.0, but' % (next_ver.minor,))
relpath = 'CHANGES'
relpaths.append(relpath)
# insert at beginning of CHANGES file
prepend_file(get_trunk_wc_path(relpath),
'Version ' + next_ver.base + '\n'
+ '(?? ??? 20XX, from /branches/' + next_ver.branch + '.x)\n'
+ get_tag_url(next_ver) + '\n'
+ '\n')
log_msg = '''\
Increment the trunk version number to %s, and introduce a new CHANGES
section, following the creation of the %s.x release branch.
* subversion/include/svn_version.h,
subversion/bindings/javahl/src/org/apache/subversion/javahl/NativeResources.java,
subversion/tests/cmdline/svntest/main.py
(SVN_VER_MINOR): Increment to %s.
* CHANGES: New section for %s.0.
''' % (next_ver.branch, ver.branch, next_ver.minor, next_ver.branch)
commit_paths = [get_trunk_wc_path(p) for p in relpaths]
svn_commit(commit_paths + ['-m', log_msg]) | 5,358,701 |
def visual_landmarks_cca_heatmap(visual_cca, ax, title='CC'):
""" Plots the visual cca loadings as landmarks
:param ax:
:param visual_cca:
:param title:
:return:
"""
color_map = plt.get_cmap('Reds')
mapcolors = [color_map(int(x * color_map.N / 100)) for x in range(100)]
# Normalize the loadings to sum to one.
if len(visual_cca.shape) == 2:
visual_cca = visual_cca.squeeze()
visual_cca = abs(visual_cca)
if len(visual_cca) == 204:
visual_cca = np.array([visual_cca[::3], visual_cca[1::3], visual_cca[2::3]]).T
visual_cca = visual_cca / np.sum(visual_cca)
# Load the normalized landmarks
landmarks = load_normalized_face_landmarks()
landmarks -= np.mean(landmarks, axis=0)
max_landmarks = np.max(landmarks, axis=0)
min_landmarks = np.min(landmarks, axis=0)
max_landmarks += 0.1
min_landmarks -= 0.1
landmarks[:, 1] = -landmarks[:, 1]
# Define ellipses based on the importance in the loadings
ells = [Ellipse(xy=landmarks[i, :], width=0.04, height=0.04, angle=0) for i in range(len(landmarks))]
ells_center = [Ellipse(xy=landmarks[i, :], width=0.005, height=0.005, angle=0) for i in range(len(landmarks))]
if len(visual_cca.shape) == 2:
mean_visual_cca = np.mean(visual_cca, axis=1)
color_sort = np.round((mean_visual_cca / max(mean_visual_cca)) * len(mapcolors)) - 1
else:
color_sort = np.round((visual_cca / max(visual_cca)) * len(mapcolors)) - 1
# Plots the ellipses
for e, color_idx in zip(ells, color_sort):
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(0.5) # how transparent or pastel the color should be
e.set_facecolor(mapcolors[int(color_idx)])
# Plots the center of ellipses
for e in ells_center:
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(1) # how transparent or pastel the color should be
e.set_facecolor(mapcolors[-1])
ax.set_xlim(min_landmarks[0], max_landmarks[0])
ax.set_ylim(-max_landmarks[1], -min_landmarks[0])
# With frame around plot
ax.set(xticks=[], yticks=[])
if title:
ax.text(0.5, 1.05, title, horizontalalignment='center', transform=ax.transAxes) | 5,358,702 |
def test_many2many_through_ext():
"""
>>> db = get_connection('sqlite://')
>>> db.echo = False
>>> db.metadata.drop_all()
>>> db.metadata.clear()
>>> class User(Model):
... username = Field(CHAR, max_length=20)
>>> def _save1(x):
... x['flag'] = '1'
>>> def _save2(x):
... x['flag'] = '2'
>>> def _default1():
... R = Relation
... return R.c.flag == '1'
>>> def _default2():
... R = Relation
... return R.c.flag == '2'
>>> class Group(Model):
... name = Field(str, max_length=20)
... users1 = ManyToMany(User, through='relation', before_save=_save1, default_condition=_default1)
... users2 = ManyToMany(User, through='relation', before_save=_save2, default_condition=_default2)
>>> class Relation(Model):
... user = Reference(User)
... group = Reference(Group)
... flag = Field(CHAR, max_length=1)
>>> a = User(username='a')
>>> a.save()
True
>>> b = User(username='b')
>>> b.save()
True
>>> c = User(username='c')
>>> c.save()
True
>>> d = User(username='d')
>>> d.save()
True
>>> g1 = Group(name='G1')
>>> g1.save()
True
>>> g2 = Group(name='G2')
>>> g2.save()
True
>>> g1.users1.add(a)
True
>>> g1.users1.add(b)
True
>>> g1.users2.add(c)
True
>>> g1.users2.add(d)
True
>>> print list(g1.users1.all())
[<User {'username':u'a','id':1}>, <User {'username':u'b','id':2}>]
>>> print list(g1.users2.all())
[<User {'username':u'c','id':3}>, <User {'username':u'd','id':4}>]
>>> print g1.users1.has(a)
True
""" | 5,358,703 |
def waitAndLocate(btn_img, params):
"""
Function to locate a button in the window
:param btn_img: path to the image of the button to look for
:return: coordinates + dimensions of the button
"""
start = time.time()
while True:
if time.time() - start > (3*60):
print("Timeout Error")
raise TimeoutError(f"wait and locate exceeded {str(time.time()-start)}")
# Find window and maximize
if 'no_fullscreen' not in params or params['no_fullscreen'] == False:
maximizeWindows(params)
# Make foreground window full screen - replaced with exact window name lookup
# win32gui.ShowWindow(win32gui.GetForegroundWindow(), win32con.SW_MAXIMIZE)
# Look for the button on the screen
res = pyautogui.locateOnScreen(btn_img, confidence=0.75)
# If button is found, return the location
if (res):
return res
# Wait 0.5 seconds before retrying to keep CPU usage low
time.sleep(0.5) | 5,358,704 |
def make_preds_batch(classifier: nn.Module,
batch_elements: List[SentenceEvidence],
device: str=None,
criterion: nn.Module=None,
tensorize_model_inputs: bool=True) -> Tuple[float, List[float], List[int], List[int]]:
"""Batch predictions
Args:
classifier: a module that looks like an AttentiveClassifier
batch_elements: a list of elements to make predictions over. These must be SentenceEvidence objects.
device: Optional; what compute device this should run on
criterion: Optional; a loss function
tensorize_model_inputs: should we convert our data to tensors before passing it to the model? Useful if we have a model that performs its own tokenization
"""
# delete any "None" padding, if any (imposed by the use of the "grouper")
batch_elements = filter(lambda x: x is not None, batch_elements)
targets, queries, sentences = zip(*[(s.kls, s.query, s.sentence) for s in batch_elements])
ids = [(s.ann_id, s.docid, s.index) for s in batch_elements]
targets = torch.tensor(targets, dtype=torch.long, device=device)
if tensorize_model_inputs:
queries = [torch.tensor(q, dtype=torch.long) for q in queries]
sentences = [torch.tensor(s, dtype=torch.long) for s in sentences]
#queries = PaddedSequence.autopad(queries, device=device, batch_first=batch_first)
#sentences = PaddedSequence.autopad(sentences, device=device, batch_first=batch_first)
preds = classifier(queries, ids, sentences)
targets = targets.to(device=preds.device)
if criterion:
loss = criterion(preds, targets)
else:
loss = None
# .float() because pytorch 1.3 introduces a bug where argmax is unsupported for float16
hard_preds = torch.argmax(preds.float(), dim=-1)
return loss, preds, hard_preds, targets | 5,358,705 |
def extract_validation(filename):
"""Extract certificate validation data from input file."""
with open(filename, "rt") as input_file:
event = json.load(input_file)
for participante in event.get("participantes", []):
fingerprint = participante["fingerprint"]
certificate = {
"validation_code": fingerprint,
"name": participante["nome"],
"date": event["data"],
"city": event["cidade"],
"institution": event["instituicao"],
"hours": 0,
"organization": 0,
"presentations": [],
}
if "roles" in participante:
if "participante" in participante["roles"]:
certificate["hours"] = event["horas"]
if "organizador" in participante["roles"]:
certificate["organization"] = event["horas_organizacao"]
if "palestrante" in participante["roles"]:
certificate["presentations"] = participante["palestras"]
else:
certificate["hours"] = event["horas"]
# save certificate validation data
dir = "%s/certificates/%s" % (__cert_dir, fingerprint[0:2])
if not os.path.isdir(dir):
os.makedirs(dir)
with open("%s/%s" % (dir, fingerprint), "wt") as cert_file:
json.dump(certificate, cert_file, indent=4) | 5,358,706 |
def quick_sort(array):
"""
Not Inplace, but Standard version
"""
if array == []:
return []
else:
pivot = array[-1]
smaller = quick_sort([x for x in array[0:-1] if x <= pivot])
larger = quick_sort([x for x in array[0:-1] if x > pivot])
return smaller + [pivot] + larger | 5,358,707 |
def calc_laplacian_matrix(D, W):
"""
给定图的度矩阵和相似度矩阵,计算拉普拉斯矩阵
:param W: 相似度矩阵
:param D: 图的度矩阵
:return: 拉普拉斯矩阵
"""
return D - W | 5,358,708 |
def on_fire(client, userdata, message):
"""
*Callback function parses a FireStarted message and switches FireState from "undefined" to "started"*
.. literalinclude:: /../examples/firesat/fires/main_fire.py
:lines: 68-73
"""
for index, observer in enumerate(app.simulator._observers):
if isinstance(observer, Environment):
app.simulator._observers[index].on_fire(client, userdata, message) | 5,358,709 |
def package_upgrade(distupgrade=False):
"""Updates every package present on the system.""" | 5,358,710 |
def _potrf_mhlo(platform, gpu_solver, dtype, a, lower):
"""Cholesky decomposition."""
a_type = ir.RankedTensorType(a.type)
dims = a_type.shape
m, n = dims[-2:]
assert m == n
batch_dims = tuple(dims[:-2])
num_bd = len(batch_dims)
batch = _prod(batch_dims)
lwork, opaque = gpu_solver.build_potrf_descriptor(
np.dtype(dtype), lower, batch, n)
layout = (num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))
info_layout = tuple(range(num_bd - 1, -1, -1))
i32_type = ir.IntegerType.get_signless(32)
info_type = ir.RankedTensorType.get(batch_dims, i32_type)
work_layout = [0]
out = custom_call(
f"{platform}solver_potrf",
[
a.type,
info_type,
ir.RankedTensorType.get([lwork], ir.IntegerType.get_signless(8)),
],
[a],
backend_config=opaque,
operand_layouts=[layout],
result_layouts=[layout, info_layout, work_layout])
return out[:2] | 5,358,711 |
def deduplicate_fasta(args):
"""Deduplicata a fasta file."""
# get the number of genes per cluster
with MaybeCompressed(args.fasta, "rt") as stream:
fbuffer = []
ftuple = namedtuple("FTUPLE", ["name", "sequence"])
for name, sequence in fasta(stream, toupper=False, fullnames=True):
fbuffer.append(ftuple(name, sequence))
# deduplicate the FastA the fasta
dbuffer = [ft for ft in deduplicate(fbuffer)]
with MaybeCompressed(args.output, "wt") as stream:
for entry in dbuffer:
stream.write(">{0}\n{1}\n".format(entry.name, entry.sequence)) | 5,358,712 |
def _bin2bcd(value):
"""Convert a binary value to binary coded decimal.
:param value: the binary value to convert to BCD. (required, no default)
"""
return value + 6 * (value // 10) | 5,358,713 |
def osm_get_info(idx):
"""Получаем информацию об административной территории
"""
link = 'https://www.openstreetmap.org/api/0.6/relation/' + str(idx)
response = requests.get(link)
if response.status_code == 200:
soup = BeautifulSoup(response.text, 'xml')
subarea_ids = [member.get('ref') for member in soup.find_all('member', {'role':'subarea'})]
name = soup.find('tag', {'k': 'name'})
name = name.get('v')
return name, subarea_ids
return False | 5,358,714 |
def get_test_cases_coverage(session_id):
"""
coverage by test case
"""
tc_stats={}
tc_stats_list=[]
total_executed=0
sql='SELECT DISTINCT(test_id) FROM stats WHERE session_id=:sid AND test_id!="null"'
params={"sid":session_id}
conn=sqlite3.connect(CONNECTION_STRING)
c=conn.cursor()
c.execute(sql,params)
tests=c.fetchall()
conn.close()
if len(tests)>0:
for t in tests:
total_executed=0
sql="SELECT DISTINCT(file_id) FROM stats WHERE session_id=:sid AND test_id=:tid"
params={"sid":session_id,"tid":t[0]}
conn=sqlite3.connect(CONNECTION_STRING)
c=conn.cursor()
c.execute(sql,params)
files=c.fetchall()
conn.close()
for f in files:
line_count=get_executable_lines_count_for_file(f[0])
# get executions
sql="SELECT COUNT(DISTINCT line_guid) FROM stats WHERE session_id= :sid AND file_id= :fid AND test_id=:tid"
params={"sid":session_id,"tid":t[0],"fid":f[0]}
conn=sqlite3.connect(CONNECTION_STRING)
c=conn.cursor()
c.execute(sql,params)
executed=c.fetchone()
conn.close()
total_executed+=executed[0]
# save test case and it's executions
tc_stats={}
tc_stats["test_id"]=t[0]
tc_stats["total_executed"]=total_executed
tc_stats["total_executed"]
tc_stats_list.append(tc_stats)
return tc_stats_list | 5,358,715 |
def can_minimize_file(file_path):
"""Check to see if we support minimization for this file."""
# If this is not a binary file, we should be able to minimize it in some way.
if not utils.is_binary_file(file_path):
return True
# Attempt to minimize IPC dumps.
if file_path.endswith(testcase_manager.IPCDUMP_EXTENSION):
return supports_ipc_minimization(file_path)
# Other binary file formats are not supported.
return False | 5,358,716 |
def generate_new_xen_xml(VIRSH_TEMPLATE, vm_name,
disk_img,
mac_addr,
memory_size=1048576, # 1GB of memory
cpu_count=1):
"""
Given a name, disk, and mac, this will output the appropriate xml
config
"""
tmp = VIRSH_TEMPLATE
tmp = tmp.replace(REPLACE_STRINGS.vm_name, vm_name)
tmp = tmp.replace(REPLACE_STRINGS.disk_img, disk_img)
tmp = tmp.replace(REPLACE_STRINGS.mac_addr, mac_addr)
tmp = tmp.replace(REPLACE_STRINGS.memory_size, str(memory_size))
tmp = tmp.replace(REPLACE_STRINGS.cpu_count, str(cpu_count))
return tmp | 5,358,717 |
def literal_query(query):
"""Don't interprete any special query syntax
SQLite's FTS extensions support special query syntax for AND, OR and
prefix searches, as well as grouping and negation. There are not of much
use in the dictionary case, but they break some legitimate queries. So
let's treat all queries literally by enlosing them in quotes.
"""
return '"' + query.replace('"', '') + '"' | 5,358,718 |
def gene2symbol(key: str, value: str) -> Dict[str, str]:
"""Map between S. pombe gene IDs, symbols, synonyms and names.
# Arguments
key: str, one of {"ID", "Symbol", "Synonym", "Name"}
value: str, one of {"ID", "Symbol", "Synonym", "Name"}
# Returns
dict: key value mapping
"""
df = pd.read_csv(os.path.join(data, "sysID2product.tsv"),
skiprows=1,
header=None,
sep="\t")
df.columns = ["ID", "Symbol", "Synonymns", "Name"]
return dictify(df, key, value) | 5,358,719 |
def initialize_application():
"""Create a config file and database as necessary"""
config_dir = util.get_config_dir()
config_path = util.get_config_path()
if os.path.exists(config_path):
if not click.confirm(('Churn has already been initialized.\n'
'Delete your existing data, and '
'reinitialize?')):
exit(0)
try:
os.makedirs(config_dir)
except FileExistsError:
pass
click.echo('Default directory: ' + config_dir)
use_default = click.confirm('Store your data in the default directory?')
db_name = 'churn.db'
db_path = os.path.join(config_dir, db_name)
if not use_default:
given_dir = click.prompt('Absolute directory')
while not os.path.isdir(given_dir):
click.secho('Invalid directory.', fg='red')
given_dir = click.prompt('Absolute directory')
db_path = os.path.join(given_dir, db_name)
config = configparser.ConfigParser()
config['database'] = {
'path': db_path,
'version': 1
}
with open(config_path, 'w') as config_file:
config.write(config_file)
try:
os.remove(db_path)
except FileNotFoundError:
pass
_initialize_tables(db_path) | 5,358,720 |
def run(num_trials=NUM_TRIALS_DEFAULT, dataset=get_dataset_names(),
algorithm=get_algorithm_names()):
"""
- Step 1: load preprocessed data and split it into train and test by 2/3 and 1/3
- Step 2: train and evaluate algorithm by calling `run_eval_alg`
- Step 3: write results (metrics eval and predicted probs) to file
:param num_trials:
:param dataset:
:param algorithm:
:return:
"""
algorithms_to_run = algorithm
print(algorithms_to_run)
print("Datasets: %s" % dataset)
for dataset_obj in DATASETS:
if not dataset_obj.get_dataset_name() in dataset:
continue
print("Evaluating dataset:" + dataset_obj.get_dataset_name())
processed_dataset = ProcessedData(dataset_obj)
# train_test_splits: a dict maps key to a list of (train, test) tuple of length num_trials
train_test_splits = processed_dataset.create_train_test_splits(num_trials)
all_sensitive_attributes = dataset_obj.get_sensitive_attributes() # dataset_obj.get_sensitive_attributes_with_joint()
print("All sensitive attributes:" + ",".join(all_sensitive_attributes))
for sensitive in all_sensitive_attributes:
print("Sensitive attribute:" + sensitive)
detailed_files = dict((k, create_detailed_file(
dataset_obj.get_results_filename(sensitive, k),
dataset_obj,
processed_dataset.get_sensitive_values(k), k))
for k in train_test_splits.keys())
for i in range(0, num_trials):
predicted_probs_dict = {}
for algorithm in ALGORITHMS:
print("\n\n", algorithm.name)
if not algorithm.get_name() in algorithms_to_run:
print("!!!!!!!!!!")
continue
for supported_tag in algorithm.get_supported_data_types():
train, test = train_test_splits[supported_tag][i] # train and test are pandas.DataFrame
try:
eval_output = run_eval_alg(algorithm, train, test, dataset_obj, processed_dataset,
all_sensitive_attributes, sensitive, supported_tag)
except Exception as e:
print("Failed: %s" % e)
else:
write_alg_results(detailed_files[supported_tag],
algorithm.get_name(), eval_output['params'], i,
eval_output['metrics_eval'])
predicted_probs_dict[algorithm.name] = eval_output['test_prediction_probs']
if i == 0:
# write the predicted probs on testset to file
filename = dataset_obj.get_predicted_probs_filename(sensitive)
write_predicted_probs(filename, test, dataset_obj, predicted_probs_dict)
print("Results written to:")
for supported_tag in algorithm.get_supported_data_types():
print(" %s" % dataset_obj.get_results_filename(sensitive, supported_tag))
print(" %s" % dataset_obj.get_predicted_probs_filename(sensitive))
for detailed_file in detailed_files.values():
detailed_file.close() | 5,358,721 |
def graph_from_string(s):
"""
Turn a string like "1 2; 1->2" into a graph.
"""
vertex_string, edge_string = s.split(';')
vertices = vertex_string.split()
edge_pairs = []
for edge_sequence in edge_string.split():
sequence_nodes = edge_sequence.split('->')
for tail, head in zip(sequence_nodes[:-1], sequence_nodes[1:]):
edge_pairs.append((tail, head))
return DirectedGraph.from_edge_pairs(vertices, edge_pairs) | 5,358,722 |
def WideResnetBlocknt(channels, strides=(1,1), channel_mismatch=False, batchnorm='std', parameterization='ntk'):
"""A WideResnet block, with or without BatchNorm."""
Main = stax_nt.serial(_batch_norm_internal(batchnorm), stax_nt.Relu(), stax_nt.Conv(channels, (3,3), strides, padding='SAME', parameterization=parameterization),_batch_norm_internal(batchnorm), stax_nt.Relu(), stax_nt.Conv(channels, (3,3), padding='SAME', parameterization=parameterization))
Shortcut = stax_nt.Identity() if not channel_mismatch else stax_nt.Conv(channels, (3,3), strides, padding='SAME', parameterization=parameterization)
return stax_nt.serial(stax_nt.FanOut(2), stax_nt.parallel(Main, Shortcut), stax_nt.FanInSum()) | 5,358,723 |
def saveOTF(font, destFile, truetype=False):
"""Save a RoboFab font as an OTF binary using ufo2fdk."""
if truetype:
compiler = compileTTF
else:
compiler = compileOTF
otf = compiler(font, featureCompilerClass=RobotoFeatureCompiler,
kernWriter=RobotoKernWriter)
otf.save(destFile) | 5,358,724 |
def inverse_pinhole_matrix(pinhole, eps=1e-6):
"""
Returns the inverted pinhole matrix from a pinhole model
"""
assert len(pinhole.shape) == 2 and pinhole.shape[1] == 12, pinhole.shape
# unpack pinhole values
fx, fy, cx, cy = torch.chunk(pinhole[..., :4], 4, dim=1) # Nx1
# create output container
k = torch.eye(4, device=pinhole.device, dtype=pinhole.dtype)
k = k.view(1, 4, 4).repeat(pinhole.shape[0], 1, 1) # Nx4x4
# fill output with inverse values
k[..., 0, 0:1] = 1. / (fx + eps)
k[..., 1, 1:2] = 1. / (fy + eps)
k[..., 0, 2:3] = -1. * cx / (fx + eps)
k[..., 1, 2:3] = -1. * cy / (fy + eps)
return k | 5,358,725 |
async def init_integration_empty_response(hass) -> MockConfigEntry:
"""Set up the Nightscout integration in Home Assistant."""
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_URL: "https://some.url:1234"},
)
with patch(
"homeassistant.components.nightscout.NightscoutAPI.get_sgvs", return_value=[]
), patch(
"homeassistant.components.nightscout.NightscoutAPI.get_server_status",
return_value=SERVER_STATUS,
):
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
return entry | 5,358,726 |
def create_DCT_NETWORK_INFO(networkid: str) -> dict:
"""Computes dictionary DCT_NETWORK_INFO for XML file
:param networkid: network identifier
:type networkid: str
:return: dict
:rtype: [type]
"""
DCT_NETWORK_INFO.update({"id": networkid})
return DCT_NETWORK_INFO | 5,358,727 |
def retrieve_question(request, uuid):
"""
"""
try:
question = Question.objects.get(pk=uuid)
except (Question.DoesNotExist, ValueError):
response_data = {
"error": {
"state": "not found",
"details": "Question object with ID {} could not be found.".format(uuid)
}
}
return Response(response_data, status=status.HTTP_404_NOT_FOUND)
if question.survey.is_private:
if request.user.is_authenticated:
if request.user == question.survey.admin or request.user in question.survey.users.all():
serializer = QuestionSerializer(question, context={'request': request})
response_data = serializer.data
return Response(response_data, status=status.HTTP_200_OK)
else:
return Response({"error": "This question is part of a private survey."}, status=status.HTTP_403_FORBIDDEN)
else:
return Response({"error": "Please login."}, status=status.HTTP_401_UNAUTHORIZED)
else:
serializer = QuestionSerializer(question, context={'request': request})
response_data = serializer.data
return Response(response_data, status=status.HTTP_200_OK) | 5,358,728 |
def gamescriptToJson(title: str, version: str = None) -> dict:
"""
Get game script heirarchy as a dictionary (for saving as json, etc)
"""
scripts = GameScript.objects.all().filter(title=title)
if version:
scripts = scripts.filter(version=version)
if len(scripts) == 0:
print("No title with that name and version")
return
if len(scripts) > 1:
print("The following titles with versions were found. Please choose one.")
print([script.title for script in scripts])
return
script = scripts[0]
return script.toJson() | 5,358,729 |
def is_ignored_faces(faces):
"""Check if the faces are ignored faces.
Args:
faces: Encoded face from face_recognition.
Returns:
bool: If a not ignored face appeared, return false, otherwise true.
"""
global ignored_faces
for face in faces:
matches = face_recognition.compare_faces(ignored_faces, face)
if False in matches:
return False
return True | 5,358,730 |
def get_prediction_model_status(hub_name: Optional[str] = None,
prediction_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPredictionModelStatusResult:
"""
The prediction model status.
:param str hub_name: The name of the hub.
:param str prediction_name: The name of the Prediction.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['hubName'] = hub_name
__args__['predictionName'] = prediction_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:customerinsights/v20170426:getPredictionModelStatus', __args__, opts=opts, typ=GetPredictionModelStatusResult).value
return AwaitableGetPredictionModelStatusResult(
message=__ret__.message,
model_version=__ret__.model_version,
prediction_guid_id=__ret__.prediction_guid_id,
prediction_name=__ret__.prediction_name,
signals_used=__ret__.signals_used,
status=__ret__.status,
tenant_id=__ret__.tenant_id,
test_set_count=__ret__.test_set_count,
training_accuracy=__ret__.training_accuracy,
training_set_count=__ret__.training_set_count,
validation_set_count=__ret__.validation_set_count) | 5,358,731 |
def set_description_bool(resources, resource_texts):
"""Add bool 'has_description' for every resource."""
for i in resources:
resources[i]["has_description"] = False
if resource_texts.get(i):
resources[i]["has_description"] = True | 5,358,732 |
def build_2d_grid(ir):
""" Build simple grid with a column for each gate."""
grid = []
for g in ir.gates:
step = [None] * ir.ngates
if g.is_single():
step[g.idx0] = g
if g.is_ctl():
step[g.ctl] = g.ctl
step[g.idx1] = g
grid.append(step)
return grid | 5,358,733 |
def run_thread(execute=True):
"""
Start pdconfd service as a thread.
This function schedules pdconfd to run as a thread and returns immediately.
"""
global configManager
configManager = ConfigManager(settings.PDCONFD_WRITE_DIR, execute)
reactor.callFromThread(listen, configManager) | 5,358,734 |
def parse_args():
"""
Wrapper function of argument parsing process.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--save_loc', type=str, default='.',
help='where to save results'
)
parser.add_argument(
'--log_dir', type=str, default=os.path.join('logs', f'{_current_file_name}.{_time}.log'),
help='the directory of the log file'
)
args = parser.parse_args()
return args | 5,358,735 |
def emmental_collate_fn(
batch: Union[List[Tuple[Dict[str, Any], Dict[str, Tensor]]], List[Dict[str, Any]]],
min_data_len: int = 0,
max_data_len: int = 0,
) -> Union[Tuple[Dict[str, Any], Dict[str, Tensor]], Dict[str, Any]]:
"""Collate function.
Args:
batch: The batch to collate.
min_data_len: The minimal data sequence length, defaults to 0.
max_data_len: The maximal data sequence length (0 means no limit), defaults to 0.
Returns:
The collated batch.
"""
X_batch: defaultdict = defaultdict(list)
Y_batch: defaultdict = defaultdict(list)
for item in batch:
# Check if batch is (x_dict, y_dict) pair
if isinstance(item, dict):
x_dict = item
y_dict: Dict[str, Any] = dict()
else:
x_dict, y_dict = item
for field_name, value in x_dict.items():
if isinstance(value, list):
X_batch[field_name] += value
else:
X_batch[field_name].append(value)
for label_name, value in y_dict.items():
if isinstance(value, list):
Y_batch[label_name] += value
else:
Y_batch[label_name].append(value)
field_names = copy.deepcopy(list(X_batch.keys()))
for field_name in field_names:
values = X_batch[field_name]
# Only merge list of tensors
if isinstance(values[0], Tensor):
item_tensor, item_mask_tensor = list_to_tensor(
values,
min_len=min_data_len,
max_len=max_data_len,
)
X_batch[field_name] = item_tensor
if item_mask_tensor is not None:
X_batch[f"{field_name}_mask"] = item_mask_tensor
for label_name, values in Y_batch.items():
Y_batch[label_name] = list_to_tensor(
values,
min_len=min_data_len,
max_len=max_data_len,
)[0]
if len(Y_batch) != 0:
return dict(X_batch), dict(Y_batch)
else:
return dict(X_batch) | 5,358,736 |
def trap_jac_factory(j, dt):
"""Factory function to return a function for evaluating the Jacobian
of the trapezoidal formula. This returns a function of x_n (x at
this time step).
:param j: Jacobian of the function of x.
:param dt: time step.
:returns: trap_jac, callable which takes x_n and evaluates the
Jacobian of the trapezoidal formula.
"""
def trap_jac(x_n):
"""Function to compute the Jacobian of the implicit trapezoidal
equation.
"""
return np.identity(x_n.shape[0]) - dt / 2 * j(x_n)
return trap_jac | 5,358,737 |
async def about(message: types.Message):
"""return the current price of $JOE and $AVAX, the market cap, the circulating supply and the TVL."""
if not timer.canMessageOnChatId(message.chat.id):
return
about = JoeSubGraph.getAbout()
await bot.send_message(message.chat.id, about) | 5,358,738 |
def get_position_object(file_path: FilePathType):
"""
Read position data from .bin or .pos file and convert to
pynwb.behavior.SpatialSeries objects. If possible it should always
be preferred to read position data from the `.bin` file to ensure
samples are locked to ecephys time courses.
Parameters:
----------
file_path (Path or Str):
Full file_path of Axona file with any extension.
Returns:
-------
position (pynwb.behavior.Position)
"""
position = Position()
position_channel_names = [
"time(ms)",
"X",
"Y",
"x",
"y",
"PX",
"px",
"px_total",
"unused",
]
if Path(file_path).suffix == ".bin":
position_data = read_bin_file_position_data(file_path)
else:
position_data = read_pos_file_position_data(file_path)
position_timestamps = position_data[:, 0]
for ichan in range(0, position_data.shape[1]):
spatial_series = SpatialSeries(
name=position_channel_names[ichan],
timestamps=position_timestamps,
data=position_data[:, ichan],
reference_frame="start of raw acquisition (.bin file)",
)
position.add_spatial_series(spatial_series)
return position | 5,358,739 |
def throw_dice(n):
"""Throw `n` dice, returns list of integers"""
results = []
while n > 0:
results += [random.randint(1,6)]
n = n-1
return results | 5,358,740 |
def verify_token_signature(token):
"""Verify the signature of the token and return the claims
such as subject/username on valid signature"""
key = jwk.JWK.from_password(flask.current_app.config.get("SECRET_KEY"))
try:
jwttoken = jwt.JWT(key=key, jwt=token, algs=["HS256"])
return json.loads(jwttoken.claims)
except jwt.JWTExpired:
# jwt dependency uses a 60 seconds leeway to check exp
# it also prints out a stack trace for it, so we handle it here
raise AuthenticationError(message="Expired token") | 5,358,741 |
def logtime_r2(t, y, ppd):
"""
Convert y=f(t) data from linear in time to logarithmic in time.
Args:
t: is the input time vector, linearly spaced
y: is the input vector of y values
ppd: number of points per decade for the output
Returns:
A 3-tuple (tout, yout, wt) where tout and yout are logarithimically-spaced
versions of t and y and wt is a vector of weights giving the number of points
averaged for each yout value.
"""
zt = len(t)
zy = len(y)
assert zt == zy
# Find the index of t = 0 by taking the index where t^2 is minimum.
indzero = np.argmin(np.power(t,2))
if t[indzero] < 0:
indzero += 1
# tmin is minimum nonzero value of t after start.
tmin = t[indzero]
tmax = np.max(t)
if tmin == 0:
tmin = t[indzero+1]
ltmin = np.log10(tmin)
ltmax = np.log10(tmax)
tt = np.arange(ltmin, ltmax, 1/(2*ppd))
tt = np.power(10, tt)
ztt = tt.size
# perform resampling from indzero to end, forward in time
icount, jcount = indzero, 0
tout, yout, wt = np.zeros(ztt), np.zeros(ztt), np.zeros(ztt)
for i in np.arange(1, ztt, 2):
# accumulate points until we reach the end of the interval
while icount < zt and t[icount] < tt[i]:
tout[jcount] = tout[jcount] + t[icount]
yout[jcount] = yout[jcount] + y[icount]
wt[jcount] += 1
icount += 1
# If we accumulated data points, then average by the number of points.
if wt[jcount] > 0:
tout[jcount] = tout[jcount] / wt[jcount];
yout[jcount] = yout[jcount] / wt[jcount];
jcount += 1
# Purposely allocated too much space at the start. Trim zeroes from the end.
yout = np.trim_zeros(yout, 'b')
tout = tout[:yout.size]
wt = wt[:yout.size]
# If we started at the beginning, then we are done.
if indzero == 0:
return (tout, yout, wt)
# If not, perform resampling from indzero backwards in time.
tmp_t, tmp_y = -t[indzero-1::-1], y[indzero-1::-1]
tmp_zt = len(tmp_t)
icount, jcount = 0, 0
tmp_tout, tmp_yout, tmp_wt = np.zeros(ztt), np.zeros(ztt), np.zeros(ztt)
for i in np.arange(1, ztt, 2):
while icount < tmp_zt and tmp_t[icount] < tt[i]:
tmp_tout[jcount] = tmp_tout[jcount] + tmp_t[icount]
tmp_yout[jcount] = tmp_yout[jcount] + tmp_y[icount]
tmp_wt[jcount] += 1
icount += 1
if tmp_wt[jcount] > 0:
tmp_tout[jcount] = tmp_tout[jcount] / tmp_wt[jcount];
tmp_yout[jcount] = tmp_yout[jcount] / tmp_wt[jcount];
jcount += 1
# Purposely allocated too much space at the start. Trim zeroes from the end.
tmp_yout = np.trim_zeros(tmp_yout, 'b')
tmp_tout = tmp_tout[:tmp_yout.size]
tmp_wt = tmp_wt[:tmp_yout.size]
# Concat results and return
return (np.concatenate([-tmp_tout[::-1], tout]),
np.concatenate([tmp_yout[::-1], yout]),
np.concatenate([tmp_wt[::-1], wt])) | 5,358,742 |
def parcours_serpentin(n):
"""Retourne la liste des indices (colonne,ligne) (!!attention ici
ligne et colonne sont inversées!!) des cases correspondant à un
parcours de tableau de taille n x n en serpentin.
Ex: pour T = [ [1,2,3],
[4,5,6],
[7,8,9] ]
le parcours correspond aux cases 1,2,3,6,9,8,7,4,5 et la
fonction retournera la liste d'indices [(0,0),(1,0),(2,0),(2,1) ...]
"""
return [] | 5,358,743 |
def server():
"""
This function uses the socket and the time module.
The function is used to run a server that can be connected to by client.py and interacts with SockItAll.py.
The function is being ran on a separate thread 'thread_two'.
All messages produced by the server will be distinctly marked by '!SERVER! date+time !SERVER!'.
At 'Configuration', all local variables used to initiate the server are defined.
At 'Startup', the server will notify that it is waiting for an incoming connection and notify when it receives one.
At 'Main Loop', the server will use the received data to interact with the StockTracker method add_item_server()
defined in StockItAll.py, until the exit condition is met.
At 'Shutdown', the server socket will close, so the thread can exit.
Returns:
None
"""
# Configuration
version = socket.AF_INET
protocol = socket.SOCK_STREAM
sock = socket.socket(version, protocol)
ip = "127.0.0.1"
port = 9182
sock.bind((ip, port))
sock.listen()
# Startup
print("\n!SERVER! {} !SERVER!\nWaiting for connection on {}:{}\n".format
(time.strftime("%d.%m.%y %H:%M", time.localtime()), ip, port))
con, address = sock.accept()
print("\n!SERVER! {} !SERVER!\nIncoming connection from: {}\n".format
(time.strftime("%d.%m.%y %H:%M", time.localtime()), address[0]))
data = ""
# Main Loop
while data != "exit":
data = con.recv(1024).decode()
if data != "exit":
try:
ST.add_item_server(data)
print("\n!SERVER! {} !SERVER!\n'{}' added to item_list\n".format
(time.strftime("%d.%m.%y %H:%M", time.localtime()), data))
except Exception as e:
print(e)
# Shutdown
print("\n!SERVER! {} !SERVER!\nClient disconnected! Shutting down server!\n".format
(time.strftime("%d.%m.%y %H:%M", time.localtime())))
sock.close() | 5,358,744 |
def bdev_rbd_unregister_cluster(client, name):
"""Remove Rados cluster object from the system.
Args:
name: name of Rados cluster object to unregister
"""
params = {'name': name}
return client.call('bdev_rbd_unregister_cluster', params) | 5,358,745 |
def get_transcript(ContactId=None, MaxResults=None, NextToken=None, ScanDirection=None, SortOrder=None, StartPosition=None, ConnectionToken=None):
"""
Retrieves a transcript of the session. Note that ConnectionToken is used for invoking this API instead of ParticipantToken.
See also: AWS API Documentation
Exceptions
:example: response = client.get_transcript(
ContactId='string',
MaxResults=123,
NextToken='string',
ScanDirection='FORWARD'|'BACKWARD',
SortOrder='DESCENDING'|'ASCENDING',
StartPosition={
'Id': 'string',
'AbsoluteTime': 'string',
'MostRecent': 123
},
ConnectionToken='string'
)
:type ContactId: string
:param ContactId: The contactId from the current contact chain for which transcript is needed.
:type MaxResults: integer
:param MaxResults: The maximum number of results to return in the page. Default: 10.
:type NextToken: string
:param NextToken: The pagination token. Use the value returned previously in the next subsequent request to retrieve the next set of results.
:type ScanDirection: string
:param ScanDirection: The direction from StartPosition from which to retrieve message. Default: BACKWARD when no StartPosition is provided, FORWARD with StartPosition.
:type SortOrder: string
:param SortOrder: The sort order for the records. Default: DESCENDING.
:type StartPosition: dict
:param StartPosition: A filtering option for where to start.\n\nId (string) --The ID of the message or event where to start.\n\nAbsoluteTime (string) --The time in ISO format where to start.\nIt\'s specified in ISO 8601 format: yyyy-MM-ddThh:mm:ss.SSSZ. For example, 2019-11-08T02:41:28.172Z.\n\nMostRecent (integer) --The start position of the most recent message where you want to start.\n\n\n
:type ConnectionToken: string
:param ConnectionToken: [REQUIRED]\nThe authentication token associated with the participant\'s connection.\n
:rtype: dict
ReturnsResponse Syntax
{
'InitialContactId': 'string',
'Transcript': [
{
'AbsoluteTime': 'string',
'Content': 'string',
'ContentType': 'string',
'Id': 'string',
'Type': 'MESSAGE'|'EVENT'|'CONNECTION_ACK',
'ParticipantId': 'string',
'DisplayName': 'string',
'ParticipantRole': 'AGENT'|'CUSTOMER'|'SYSTEM'
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
InitialContactId (string) --
The initial contact ID for the contact.
Transcript (list) --
The list of messages in the session.
(dict) --
An item - message or event - that has been sent.
AbsoluteTime (string) --
The time when the message or event was sent.
It\'s specified in ISO 8601 format: yyyy-MM-ddThh:mm:ss.SSSZ. For example, 2019-11-08T02:41:28.172Z.
Content (string) --
The content of the message or event.
ContentType (string) --
The type of content of the item.
Id (string) --
The ID of the item.
Type (string) --
Type of the item: message or event.
ParticipantId (string) --
The ID of the sender in the session.
DisplayName (string) --
The chat display name of the sender.
ParticipantRole (string) --
The role of the sender. For example, is it a customer, agent, or system.
NextToken (string) --
The pagination token. Use the value returned previously in the next subsequent request to retrieve the next set of results.
Exceptions
ConnectParticipant.Client.exceptions.AccessDeniedException
ConnectParticipant.Client.exceptions.InternalServerException
ConnectParticipant.Client.exceptions.ThrottlingException
ConnectParticipant.Client.exceptions.ValidationException
:return: {
'InitialContactId': 'string',
'Transcript': [
{
'AbsoluteTime': 'string',
'Content': 'string',
'ContentType': 'string',
'Id': 'string',
'Type': 'MESSAGE'|'EVENT'|'CONNECTION_ACK',
'ParticipantId': 'string',
'DisplayName': 'string',
'ParticipantRole': 'AGENT'|'CUSTOMER'|'SYSTEM'
},
],
'NextToken': 'string'
}
:returns:
ConnectParticipant.Client.exceptions.AccessDeniedException
ConnectParticipant.Client.exceptions.InternalServerException
ConnectParticipant.Client.exceptions.ThrottlingException
ConnectParticipant.Client.exceptions.ValidationException
"""
pass | 5,358,746 |
def add_image_background(filepath):
""" adds image background to the scene
args:
string, file path to background image
returns:
None
"""
img = bpy.data.images.load(filepath)
for area in bpy.context.screen.areas:
if area.type == 'VIEW_3D':
space_data = area.spaces.active
bg = space_data.background_images.new()
bg.image = img
space_data.show_background_images = True
break
texture = bpy.data.textures.new("Texture.001", 'IMAGE')
texture.image = img
bpy.data.worlds['World'].active_texture = texture
bpy.context.scene.world.texture_slots[0].use_map_horizon = True | 5,358,747 |
def _write_service(service, filename):
"""Write the passed service to 'filename'"""
_write_json(service.to_data(), filename) | 5,358,748 |
def plot_text_len(file):
"""
文本长度可视化
:param file:
:return:
"""
with open(file, "r", encoding='utf-8') as f:
lines = f.readlines()
# 获取所有文本的token 和 char 特征长度
all_length = [(len(i.strip().split("\t")[1].split(" ")), len(i.strip().split("\t")[2].split(" "))) for i in lines]
all_token_length = [i[0] for i in all_length]
all_char_length = [i[1] for i in all_length]
# print(all_token_length[:2])
# 可视化语料序列长度, 可见大部分文本的长度都在1000以下
token_prop = np.mean(np.array(all_token_length) < 1000)
print("文本token长度在1000以下的比例: {}".format(token_prop))
plt.hist(all_token_length, bins=500)
plt.show()
char_prop = np.mean(np.array(all_char_length) < 2000)
print("文本char长度在2000以下的比例: {}".format(char_prop))
plt.hist(all_token_length, bins=500)
plt.show() | 5,358,749 |
def print_tc_footer(log, tc_name):
"""
print_tc_footer
@param log Logger object
@param tc_name Test Case name
"""
log.info("{}".format(STANZA))
log.info("{}{}{}".format(SML_STANZA, tc_name, " END")) | 5,358,750 |
def get_rndc_secret():
"""Use the singleton from the DesignateBindCharm to retrieve the RNDC
secret
:returns: str or None. Secret if available, None if not.
"""
return DesignateBindCharm.singleton.get_rndc_secret() | 5,358,751 |
def interpolate_pairs(data_key, key, experiment, n_pairs, n_interp, save_path):
"""
Interpolate images
"""
exp, sampler, encoder, decoder = experiment
# Load the data that we'll use for interpolation
x_for_interpolation = exp.data_loader((2*n_pairs,), key=data_key)
# Split the data into pairs
random_pairs = random.randint(key, (2*n_pairs,), minval=0, maxval=x_for_interpolation.shape[0])
pairs_iter = iter(random_pairs)
index_pairs = [(next(pairs_iter), next(pairs_iter)) for _ in range(n_pairs)]
n_cols = n_interp
n_rows = len(index_pairs)
fig, axes = plt.subplots(n_rows, n_cols)
if(axes.ndim == 1):
axes = axes[None]
fig.set_size_inches(2*n_cols, 2*n_rows)
for i, (idx1, idx2) in enumerate(index_pairs):
x = x_for_interpolation[[idx1, idx2]]
# Find the embeddings of the data
_, finvx = encoder(x, key, sigma=1.0)
# Interpolate
phi = jit(vmap(cartesian_to_spherical))(finvx)
phi1, phi2 = phi
interpolation_phi = jnp.linspace(phi1, phi2, n_interp)
interpolation_z = jit(vmap(spherical_to_cartesian))(interpolation_phi)
# Decode the interpolations
_, fz = decoder(finvx, key, sigma=0.0)
# Plot
for j in range(n_interp):
im = fz[j][:,:,0] if fz[j].shape[-1] == 1 else fz[j]
axes[i,j].imshow(im)
axes[i,j].set_axis_off()
plt.subplots_adjust(wspace=0, hspace=0, left=0, right=1, bottom=0, top=1)
plt.savefig(save_path, bbox_inches='tight', format='pdf')
plt.close() | 5,358,752 |
def transform_tweet(source_tweet):
"""
Perform transformation on one tweet, producing a new, transformed tweet.
:param source_tweet: Tweet text to transform
:type source_tweet: str
:return: Transformed tweet text
:rtype: str
"""
no_emojis = replace_emojis(source_tweet)
as_tokens = tokenize_string(no_emojis)
result = ' '.join(as_tokens)
if not result:
return pd.NaT
else:
return result | 5,358,753 |
def pdFrame(file):
"""Creates a pandas data frame from a json log file
Args:
file: json log file to read
Returns:
pandas data frame
"""
logger.debug("creating pandas data frame from {}".format(file))
data = []
with open(file) as f:
for line in f:
tmp = []
log = json.loads(line)
try:
tmp.append(pd.Timestamp(log['timestamp']))
except KeyError:
tmp.append('no value')
try:
tmp.append(str(log['resource']['type']))
except KeyError:
tmp.append('no value')
try:
tmp.append(str(log['severity']))
except KeyError:
tmp.append('no value')
try:
tmp.append(str(log['protoPayload']['authenticationInfo']['principalEmail']))
except KeyError:
tmp.append('no value')
data.append(tmp)
fieldNames = ['timestamp', 'resourceType', 'severity', 'account']
logs = pd.DataFrame(data, columns=fieldNames)
return logs | 5,358,754 |
def generate_figure(nc_file: str,
field_names: list,
show: bool = True,
save_path: str = None,
max_y: Optional[int] = 12,
dpi: Optional[int] = 200,
image_name: Optional[str] = None,
sub_title: Optional[bool] = True,
title: Optional[bool] = True):
"""Generates a Cloudnet figure.
Args:
nc_file (str): Input file.
field_names (list): Variable names to be plotted.
show (bool, optional): If True, shows the figure. Default is True.
save_path (str, optional): Setting this path will save the figure (in the
given path). Default is None, when the figure is not saved.
max_y (int, optional): Upper limit in the plots (km). Default is 12.
dpi (int, optional): Figure quality (if saved). Higher value means
more pixels, i.e., better image quality. Default is 200.
image_name (str, optional): Name (and full path) of the output image.
Overrides the *save_path* option. Default is None.
sub_title (bool, optional): Add subtitle to image. Default is True.
title (bool, optional): Add title to image. Default is True.
Examples:
>>> from cloudnetpy.plotting import generate_figure
>>> generate_figure('categorize_file.nc', ['Z', 'v', 'width', 'ldr', 'beta', 'lwp'])
>>> generate_figure('iwc_file.nc', ['iwc', 'iwc_error', 'iwc_retrieval_status'])
>>> generate_figure('lwc_file.nc', ['lwc', 'lwc_error', 'lwc_retrieval_status'], max_y=4)
>>> generate_figure('classification_file.nc', ['target_classification', 'detection_status'])
>>> generate_figure('drizzle_file.nc', ['Do', 'mu', 'S'], max_y=3)
"""
valid_fields, valid_names = _find_valid_fields(nc_file, field_names)
is_height = _is_height_dimension(nc_file)
fig, axes = _initialize_figure(len(valid_fields))
for ax, field, name in zip(axes, valid_fields, valid_names):
plot_type = ATTRIBUTES[name].plot_type
if title:
_set_title(ax, name, '')
if not is_height:
unit = _get_variable_unit(nc_file, name)
source = ATTRIBUTES[name].source
time = _read_time_vector(nc_file)
_plot_instrument_data(ax, field, name, source, time, unit)
continue
ax_value = _read_ax_values(nc_file)
field, ax_value = _screen_high_altitudes(field, ax_value, max_y)
_set_ax(ax, max_y)
if plot_type == 'bar':
_plot_bar_data(ax, field, ax_value[0])
_set_ax(ax, 2, ATTRIBUTES[name].ylabel)
elif plot_type == 'segment':
_plot_segment_data(ax, field, name, ax_value)
else:
_plot_colormesh_data(ax, field, name, ax_value)
case_date = _set_labels(fig, axes[-1], nc_file, sub_title)
_handle_saving(image_name, save_path, show, dpi, case_date, valid_names) | 5,358,755 |
def fitDataBFGSM2(M, val, c_w_l, init=None, nozero=True, k=3e34, lam=1., name='W_Abundances_grid_puestu_adpak_fitscaling_74_0.00000_5.00000_1000_idlsave'): #init is the three initial values of the gaussian needed to fit the data
""" function for determining the optimal fit given the desired parabolic regularization"""
#intialize start position
temp = scipy.io.readsav(name)
init = temp['abundance'][:,36]
reg = gen2Diff(init)
bndarray = scipy.ones((len(init),2))
bndarray[:,0] = 1e-10
bndarray[:,1] = 1e10
Te = temp['en']
y = time.time()
output = scipy.optimize.minimize(fullObjectiveLog,
init,
args=(val, c_w_l, M/k),
jac=objectiveLogJac2,
bounds=bndarray)
print(time.time()-y)
return output | 5,358,756 |
def shutdown_application(app, env, docname):
"""
Shutdown qt application
"""
if herculeum.sphinx.helpers.qt_app is not None:
herculeum.sphinx.helpers.qt_app = None | 5,358,757 |
def set_complete_cfg_spacy(false_or_true: str):
"""Set all SpaCy configuration parameters to the same logical value."""
return pytest.helpers.backup_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_SPACY,
[
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_CLUSTER, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_DEP_, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_DOC, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_ENT_IOB_, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_ENT_KB_ID_, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_ENT_TYPE_, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_HEAD, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_I, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IDX, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_ALPHA, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_ASCII, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_BRACKET, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_CURRENCY, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_DIGIT, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_LEFT_PUNCT, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_LOWER, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_OOV, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_PUNCT, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_QUOTE, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_RIGHT_PUNCT, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_SENT_END, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_SENT_START, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_SPACE, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_STOP, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_TITLE, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_IS_UPPER, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_LANG_, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_LEFT_EDGE, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_LEMMA_, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_LEX, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_LEX_ID, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_LIKE_EMAIL, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_LIKE_NUM, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_LIKE_URL, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_LOWER_, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_MORPH, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_NORM_, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_ORTH_, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_POS_, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_PREFIX_, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_PROB, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_RANK, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_RIGHT_EDGE, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_SENT, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_SENTIMENT, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_SHAPE_, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_SUFFIX_, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_TAG_, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_TENSOR, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_TEXT, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_TEXT_WITH_WS, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_VOCAB, false_or_true),
(cfg.cls_setup.Setup._DCR_CFG_SPACY_TKN_ATTR_WHITESPACE_, false_or_true),
],
) | 5,358,758 |
def guard_unexpected_errors(func):
"""Decorator to be used in PyObjC callbacks where an error bubbling up
would cause a crash. Instead of crashing, print the error to stderr and
prevent passing to PyObjC layer.
For Python 3, print the exception using chaining. Accomplished by setting
the cause of :exc:`rumps.exceptions.InternalRumpsError` to the exception.
For Python 2, emulate exception chaining by printing the original exception
followed by :exc:`rumps.exceptions.InternalRumpsError`.
"""
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
internal_error = exceptions.InternalRumpsError(
'an unexpected error occurred within an internal callback'
)
if compat.PY2:
import sys
traceback.print_exc()
print('\nThe above exception was the direct cause of the following exception:\n', file=sys.stderr)
traceback.print_exception(exceptions.InternalRumpsError, internal_error, None)
else:
internal_error.__cause__ = e
traceback.print_exception(exceptions.InternalRumpsError, internal_error, None)
return wrapper | 5,358,759 |
def get_lun_ids(service_instance=None):
"""
Return a list of LUN (Logical Unit Number) NAA (Network Addressing Authority) IDs.
"""
if service_instance is None:
service_instance = get_service_instance(opts=__opts__, pillar=__pillar__)
hosts = utils_esxi.get_hosts(service_instance=service_instance, get_all_hosts=True)
ids = []
for host in hosts:
for datastore in host.datastore:
for extent in datastore.info.vmfs.extent:
ids.append(extent.diskName)
return ids | 5,358,760 |
def parse_midi_file(midi_file,
max_notes=float('Inf'),
max_time_signatures=1,
max_tempos=1,
ignore_polyphonic_notes=True,
convert_to_drums=False,
steps_per_quarter=16):
"""Summary
Parameters
----------
midi_file : TYPE
Description
max_notes : TYPE, optional
Description
max_time_signatures : int, optional
Description
max_tempos : int, optional
Description
ignore_polyphonic_notes : bool, optional
Description
convert_to_drums : bool, optional
Description
steps_per_quarter : int, optional
Description
Returns
-------
TYPE
Description
"""
seq = midi_io.midi_file_to_sequence_proto(midi_file)
while len(seq.notes) > max_notes:
seq.notes.pop()
while len(seq.time_signatures) > max_time_signatures:
seq.time_signatures.pop()
while len(seq.tempos) > max_tempos:
seq.tempos.pop()
if convert_to_drums:
for note_i in range(len(seq.notes)):
seq.notes[note_i].program = 10
if ignore_polyphonic_notes:
convert_to_monophonic(seq)
seq = sequences_lib.quantize_note_sequence(
seq, steps_per_quarter=steps_per_quarter)
if seq.tempos:
qpm = seq.tempos[0].qpm
else:
qpm = 120
melody = Melody()
melody.from_quantized_sequence(
seq, ignore_polyphonic_notes=ignore_polyphonic_notes)
seq = melody.to_sequence(qpm=qpm)
return seq, qpm | 5,358,761 |
def filter_prediction(disable_valid_filter, disable_extra_one_word_filter, pred_token_2dlist_stemmed):
"""
Remove the duplicate predictions, can optionally remove invalid predictions and extra one word predictions
:param disable_valid_filter:
:param disable_extra_one_word_filter:
:param pred_token_2dlist_stemmed:
:param pred_token_2d_list:
:return:
"""
num_predictions = len(pred_token_2dlist_stemmed)
is_unique_mask = check_duplicate_keyphrases(pred_token_2dlist_stemmed) # boolean array, 1=unqiue, 0=duplicate
pred_filter = is_unique_mask
if not disable_valid_filter:
is_valid_mask = check_valid_keyphrases(pred_token_2dlist_stemmed)
pred_filter = pred_filter * is_valid_mask
if not disable_extra_one_word_filter:
extra_one_word_seqs_mask, num_one_word_seqs = compute_extra_one_word_seqs_mask(pred_token_2dlist_stemmed)
pred_filter = pred_filter * extra_one_word_seqs_mask
filtered_stemmed_pred_str_list = [word_list for word_list, is_keep in
zip(pred_token_2dlist_stemmed, pred_filter) if
is_keep]
num_duplicated_predictions = num_predictions - np.sum(is_unique_mask)
return filtered_stemmed_pred_str_list, num_duplicated_predictions, is_unique_mask | 5,358,762 |
def mlmc_test(integrand_qmcpy, n, l, n0, eps, l_min, l_max):
"""
Multilevel Monte Carlo test routine
Args:
integrand_qmcpy (function):
low-level routine for l level estimation such that
Args:
x (ndarray): nx(integrand._dim_at_level(l)) array of samples from discrete distribution
l (int): level
Return:
sums(1) = sum(Pf-Pc)
sums(2) = sum((Pf-Pc).^2)
sums(3) = sum((Pf-Pc).^3)
sums(4) = sum((Pf-Pc).^4)
sums(5) = sum(Pf)
sums(6) = sum(Pf.^2)
cost = user-defined computational cost
n (int): number of samples for convergence tests
l (int): number of levels for convergence tests
n0 (int): initial number of samples for MLMC calcs
eps (float): desired accuracy array for MLMC calcs
l_min (int): minimum number of levels for MLMC calcs
l_max (int): maximum number of levels for MLMC calcs
"""
# first, convergence tests
n = 100*ceil(n/100) # make N a multiple of 100
print('\nConvergence tests, kurtosis, telescoping sum check using N =%7d samples'%n)
print('\t%-15s%-15s%-15s%-15s%-15s%-15s%-15s%s'\
%('l','ave(Pf-Pc)','ave(Pf)','var(Pf-Pc)','var(Pf)','kurtosis','check','cost'))
del1 = array([])
del2 = array([])
var1 = array([])
var2 = array([])
kur1 = array([])
chk1 = array([])
cost = array([])
for ll in range(l+1):
sums = 0
cst = 0
for j in range(1,101):
# reset dimension
new_dim = integrand_qmcpy._dim_at_level(ll)
integrand_qmcpy.measure.set_dimension(new_dim)
# evaluate integral at sampleing points samples
samples = integrand_qmcpy.measure.distribution.gen_samples(n=n/100)
sums_j,cst_j = integrand_qmcpy.f(samples,l=ll)
sums = sums + sums_j/n
cst = cst + cst_j/n
if ll == 0:
kurt = 0.
else:
kurt = ( sums[3] - 4*sums[2]*sums[0] + 6*sums[1]*sums[0]**2 -
3*sums[0]*sums[0]**3 ) / (sums[1]-sums[0]**2)**2.
cost = hstack((cost, cst))
del1 = hstack((del1, sums[0]))
del2 = hstack((del2, sums[4]))
var1 = hstack((var1, sums[1]-sums[0]**2))
var2 = hstack((var2, sums[5]-sums[4]**2))
var2 = maximum(var2, 1e-10) # fix for cases with var=0
kur1 = hstack((kur1, kurt))
if ll == 0:
check = 0
else:
check = abs( del1[ll] + del2[ll-1] - del2[ll]) / \
( 3.*( sqrt(var1[ll]) + sqrt(var2[ll-1]) + sqrt(var2[ll]) ) / sqrt(n))
chk1 = hstack((chk1, check))
print('\t%-15d%-15.4e%-15.4e%-15.3e%-15.3e%-15.2e%-15.2e%.2e'\
%(ll,del1[ll],del2[ll],var1[ll],var2[ll],kur1[ll],chk1[ll],cst))
# print out a warning if kurtosis or consistency check looks bad
if kur1[-1] > 100.:
print('WARNING: kurtosis on finest level = %f'%kur1[-1])
print(' indicates MLMC correction dominated by a few rare paths;')
print(' for information on the connection to variance of sample variances,')
print(' see http://mathworld.wolfram.com/SampleVarianceDistribution.html\n')
if max(chk1) > 1.:
print('WARNING: maximum consistency error = %f'%max(chk1))
print(' indicates identity E[Pf-Pc] = E[Pf] - E[Pc] not satisfied;')
print(' to be more certain, re-run mlmc_test with larger N\n')
# use linear regression to estimate alpha, beta and gamma
l1 = 2
l2 = l+1
x = ones((l2+1-l1,2))
x[:,1] = arange(l1,l2+1)
pa = lstsq(x,log2(absolute(del1[(l1-1):l2])),rcond=None)[0]
alpha = -pa[1]
pb = lstsq(x,log2(absolute(var1[(l1-1):l2])),rcond=None)[0]
beta = -pb[1]
pg = lstsq(x,log2(absolute(cost[(l1-1):l2])),rcond=None)[0]
gamma = pg[1]
print('\nLinear regression estimates of MLMC parameters')
print('\talpha = %f (exponent for MLMC weak convergence)'%alpha)
print('\tbeta = %f (exponent for MLMC variance)'%beta)
print('\tgamma = %f (exponent for MLMC cost)'%gamma)
#second, mlmc complexity tests
print('\nMLMC complexity tests')
print('\t%-15s%-15s%-15s%-15s%-15s%s'\
%('eps','value','mlmc_cost','std_cost','savings','N_l'))
alpha = max(alpha,0.5)
beta = max(beta,0.5)
theta = 0.25
for i in range(len(eps)):
mlmc_qmcpy = CubMCML(integrand_qmcpy,
rmse_tol = eps[i],
n_init = n0,
levels_min = l_min,
levels_max = l_max,
alpha0 = alpha,
beta0 = beta,
gamma0 = gamma)
mlmc_qmcpy.integrate()
p = mlmc_qmcpy.data.solution
nl = mlmc_qmcpy.data.n_level
cl = mlmc_qmcpy.data.cost_per_sample
mlmc_cost = sum(nl*cl)
idx = min(len(var2),len(nl))-1
std_cost = var2[idx]*cl[-1] / ((1.-theta)*array(eps[i])**2)
print('\t%-15.3e%-15.3e%-15.3e%-15.3e%-15.2f%s'\
%(eps[i], p, mlmc_cost, std_cost, std_cost/mlmc_cost,'\t'.join(str(int(nli)) for nli in nl))) | 5,358,763 |
def simple_dict_event_extractor(row, condition_for_creating_event, id_field, timestamp_field, name_of_event):
"""
Takes a row of the data df and returns an event record {id, event, timestamp}
if the row satisfies the condition (i.e. condition_for_creating_event(row) returns True)
"""
if condition_for_creating_event(row):
return {'id': row[id_field], 'event': name_of_event, 'timestamp': row[timestamp_field]} | 5,358,764 |
def split_fused_prelu(input_graph_def: util.GraphDef) -> util.GraphDef:
"""
This function looks for fused operations that include a 'Prelu'-activation.
Matching nodes will be split into individual operations.
TFJS uses fused operations for performance.
Some fused activations aren't supported by TF (e.g. 'Prelu'), so we need
to split the fused ops back into individual ops and replace unsupported
functions by equivalent supported constructs later.
Args:
input_graph_def: TF graph definition to examine
Returns:
Updated copy of the input graph with matching nodes replaced by
individual operations
"""
def _predicate(node):
return (util.is_fused_conv2d(node, b'Prelu')
or util.is_fused_matmul(node, b'Prelu'))
return util.replace_matching_nodes(input_graph_def, _predicate,
_split_fused_op) | 5,358,765 |
def test_harvest_lost_resources(pool):
"""Test unreferenced resources are returned to the pool."""
def get_resource_id():
"""
Ensures ``Resource`` falls out of scope before calling
``_harvest_lost_resources()``.
"""
return id(pool.get_resource()._resource)
r_id = get_resource_id()
# Run garbage collection to ensure ``Resource`` created in
# ``get_resource_id()`` is destroyed.
gc.collect()
pool._harvest_lost_resources()
assert r_id == id(pool.get_resource()._resource) | 5,358,766 |
def __call__for_keras_init_v1(self, shape, dtype=None, partition_info=None):
""" Making keras VarianceScaling initializers v1 support dynamic shape.
"""
if dtype is None:
dtype = self.dtype
scale = self.scale
scale_shape = shape
if partition_info is not None:
scale_shape = partition_info.full_shape
fan_in, fan_out = _compute_fans_for_keras_init_v1_v2(scale_shape)
fan_in = math_ops.cast(fan_in, dtype=dtype)
fan_out = math_ops.cast(fan_out, dtype=dtype)
if self.mode == "fan_in":
scale /= math_ops.maximum(1., fan_in)
elif self.mode == "fan_out":
scale /= math_ops.maximum(1., fan_out)
else:
scale /= math_ops.maximum(1., (fan_in + fan_out) / 2.)
if self.distribution == "normal" or self.distribution == "truncated_normal":
# constant taken from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.)
stddev = math_ops.sqrt(scale) / .87962566103423978
return random_ops.truncated_normal(shape,
0.0,
stddev,
dtype,
seed=self.seed)
elif self.distribution == "untruncated_normal":
stddev = math_ops.sqrt(scale)
return random_ops.random_normal(shape, 0.0, stddev, dtype, seed=self.seed)
else:
limit = math_ops.sqrt(3.0 * scale)
return random_ops.random_uniform(shape,
-limit,
limit,
dtype,
seed=self.seed) | 5,358,767 |
def create_line(net, from_bus, to_bus, length_km, std_type, name=None, index=None, geodata=None,
df=1., parallel=1, in_service=True, max_loading_percent=nan):
""" create_line(net, from_bus, to_bus, length_km, std_type, name=None, index=None, \
geodata=None, df=1., parallel=1, in_service=True, max_loading_percent=nan)
Creates a line element in net["line"]
The line parameters are defined through the standard type library.
INPUT:
**net** - The net within this line should be created
**from_bus** (int) - ID of the bus on one side which the line will be connected with
**to_bus** (int) - ID of the bus on the other side which the line will be connected with
**length_km** (float) - The line length in km
**std_type** (string) - The linetype of a standard line pre-defined in standard_linetypes.
OPTIONAL:
**name** (string) - A custom name for this line
**index** (int, None) - Force a specified ID if it is available. If None, the index one \
higher than the highest already existing index is selected.
**geodata**
(array, default None, shape= (,2L)) -
The linegeodata of the line. The first row should be the coordinates
of bus a and the last should be the coordinates of bus b. The points
in the middle represent the bending points of the line
**in_service** (boolean) - True for in_service or False for out of service
**df** (float) - derating factor: maximal current of line in relation to nominal current \
of line (from 0 to 1)
**parallel** (integer) - number of parallel line systems
**max_loading_percent (float)** - maximum current loading (only needed for OPF)
OUTPUT:
**index** (int) - The unique ID of the created line
EXAMPLE:
create_line(net, "line1", from_bus = 0, to_bus = 1, length_km=0.1, std_type="NAYY 4x50 SE")
"""
# check if bus exist to attach the line to
for b in [from_bus, to_bus]:
if b not in net["bus"].index.values:
raise UserWarning("Line %s tries to attach to non-existing bus %s" % (name, b))
if index is None:
index = get_free_id(net["line"])
if index in net["line"].index:
raise UserWarning("A line with index %s already exists" % index)
v = {
"name": name, "length_km": length_km, "from_bus": from_bus,
"to_bus": to_bus, "in_service": bool(in_service), "std_type": std_type,
"df": df, "parallel": parallel
}
lineparam = load_std_type(net, std_type, "line")
v.update({
"r_ohm_per_km": lineparam["r_ohm_per_km"],
"x_ohm_per_km": lineparam["x_ohm_per_km"],
"c_nf_per_km": lineparam["c_nf_per_km"],
"max_i_ka": lineparam["max_i_ka"]
})
if "type" in lineparam:
v.update({"type": lineparam["type"]})
# store dtypes
dtypes = net.line.dtypes
net.line.loc[index, list(v.keys())] = list(v.values())
# and preserve dtypes
_preserve_dtypes(net.line, dtypes)
if geodata is not None:
net["line_geodata"].loc[index, "coords"] = geodata
if not isnan(max_loading_percent):
if "max_loading_percent" not in net.line.columns:
net.line.loc[:, "max_loading_percent"] = pd.Series()
net.line.loc[index, "max_loading_percent"] = float(max_loading_percent)
return index | 5,358,768 |
def get_initmap(X, A=None, standardize=False, cov_func=None):
""" Give back parameters such that we have the L U decomposition of the
product with A (if given, or the PCA scores if not).
That is we will get back:
X[:, perm]*L*U + b = ((X-meanvec)/stdvec)*A
where A are PCA directions if not given, L, U are LU decomposition,
and meanvec, stdvec are zeros, ones vectors if not standardizing.
Args:
X: N x d array of training data
A: d x d linear map to decompose, XA+b, (uses Identity if None given
with no cov_func).
standardize: boolean that indicates to standardize the dimensions
of X after applying linear map.
cov_func: function that yeilds a linear map given covariance matrix of
X.
Returns:
init_mat: d x d matrix where stricly lower triangle is corresponds to L
and upper triangle corresponds to U.
b: d length vector of offset
perm: permuation of dimensions of X
"""
# import pdb; pdb.set_trace() # XXX BREAKPOINT
N, d = X.shape
if A is None:
if cov_func is None:
A = np.eye(d)
b = np.zeros((1, d))
else:
b = -np.mean(X, 0, keepdims=True)
M = (X+b) # Has mean zero.
cov = np.matmul(M.T, M)/N
A = cov_func(cov)
b = np.matmul(b, A)
if standardize:
z = np.matmul(X, A)+b
mean_vec = np.mean(z, 0, keepdims=True)
# std_vec = np.std(z, 0, keepdims=True)
# Standardizing may lead to outliers, better to get things in [-1, 1].
# std_vec = np.max(np.abs(z-mean_vec), 0, keepdims=True)
std_vec = np.maximum(np.max(np.abs(z-mean_vec), 0, keepdims=True),
np.ones((1, d)), dtype=np.float32)
# import pdb; pdb.set_trace() # XXX BREAKPOINT
else:
mean_vec = np.zeros((1, d))
std_vec = np.ones((1, d))
AS = np.divide(A, std_vec)
P, L, U = linalg.lu(AS)
perm = np.concatenate([np.flatnonzero(P[:, i]) for i in range(P.shape[1])])
init_mat = np.tril(L, -1) + U
init_b = np.squeeze((b-mean_vec)/std_vec)
return np.float32(init_mat), np.float32(init_b), perm | 5,358,769 |
def publish_screenshot_sets(
access_token: AccessToken,
localization_dir: str,
localization_id: str,
):
"""Publish the screenshot sets from assets on disk."""
screenshots_dir = os.path.join(localization_dir, "screenshots")
if not os.path.isdir(screenshots_dir):
print_clr(
f" No screenshots: directory {colorama.Fore.CYAN}{screenshots_dir}{colorama.Fore.RESET} not found.",
)
return
screenshot_sets = appstore.get_screenshot_sets(
localization_id=localization_id, access_token=access_token
)
asset_display_types = [
x
for x in os.listdir(screenshots_dir)
if os.path.isdir(os.path.join(screenshots_dir, x))
]
# Create new display types
loc_display_types = [
x["attributes"]["screenshotDisplayType"] for x in screenshot_sets
]
new_display_types = [x for x in asset_display_types if x not in loc_display_types]
for display_type in new_display_types:
print_media_set_status(
display_type, colorama.Fore.YELLOW, "creating display type"
)
screenshot_set = appstore.create_screenshot_set(
localization_id=localization_id,
display_type=display_type,
access_token=access_token,
)
screenshot_sets.append(screenshot_set)
for screenshot_set in screenshot_sets:
screenshot_set_id = screenshot_set["id"]
display_type = screenshot_set["attributes"]["screenshotDisplayType"]
screenshot_set_dir = os.path.join(screenshots_dir, display_type)
# Delete removed display types
if not os.path.isdir(screenshot_set_dir):
print_media_set_status(
display_type, colorama.Fore.RED, "deleting display type"
)
appstore.delete_screenshot_set(
screenshot_set_id=screenshot_set_id, access_token=access_token
)
continue
# Publish
publish_screenshots(
access_token=access_token,
screenshot_set_dir=screenshot_set_dir,
screenshot_set_id=screenshot_set_id,
display_type=display_type,
) | 5,358,770 |
def read_FQ_matlab(file_open):
""" Opens FISH-quant result files generated with Matlab (tab-delimited text file).
Args:
file_open (string): string containing the full file name.
Returns:
dictionary containing outlines of cells, and if present the detected spots.
"""
# Open file
with open(file_open, "r") as fh:
data = fh.readlines()
# Strip white space characters
data = [x.strip() for x in data]
# Loop over read-in data
fq_dict = {'cells':{},'file_names':{},'settings':{}}
iLine = 0
while iLine < len(data):
line = data[iLine]
# READ FILE NAMES
if 'IMG_Raw' in line:
img_name = line.split('\t')
if len(img_name) == 2:
fq_dict['file_names'].update({'smFISH':img_name[1]})
if 'IMG_Filtered' in line:
img_name = line.split('\t')
if len(img_name) == 2:
fq_dict['file_names'].update({'smFISH_filt':img_name[1]})
if 'IMG_DAPI' in line:
img_name = line.split('\t')
if len(img_name) == 2:
fq_dict['file_names'].update({'DAPI':img_name[1]})
if 'FILE_settings' in line:
img_name = line.split('\t')
if len(img_name) == 2:
fq_dict['file_names'].update({'settings':img_name[1]})
# READ IMAGE PARAMETERS
if 'PARAMETERS' in line:
iLine += 2
par_microscope = data[iLine].split('\t')
fq_dict['settings'].update({'microscope':{'pix_xy':float(par_microscope[0]),
'pix_z':float(par_microscope[1]),
'RI':float(par_microscope[2]),
'EX':float(par_microscope[3]),
'EM':float(par_microscope[4]),
'NA':float(par_microscope[5]),
'type':par_microscope[6]}})
# New cell
if 'CELL_START' in line:
# Get name of cell
cell_id = line.split('\t')[1]
### POSITION OF CELL
# Read X-POS
iLine += 1
pos_list = (data[iLine].replace('X_POS\t','')).split('\t')
x_pos = [int(s) for s in pos_list]
# Read Y-POS
iLine += 1
pos_list = (data[iLine].replace('Y_POS\t','')).split('\t')
y_pos = [int(s) for s in pos_list]
# Read Z-POS
iLine += 1
pos_list = (data[iLine].replace('Z_POS\t','')).split('\t')
if len(pos_list) > 1:
z_pos = [int(s) for s in pos_list]
else:
z_pos = ['']
fq_dict['cells'].update({cell_id:{'cell_pos':{'x': x_pos,'y': y_pos,'z': z_pos}}})
# New nucleus
if 'Nucleus_START' in line:
# Get name of cell
nuc_id = line.split('\t')[1]
### POSITION OF CELL
# Read X-POS
iLine += 1
pos_list = (data[iLine].replace('X_POS\t','')).split('\t')
x_pos = [int(s) for s in pos_list]
# Read Y-POS
iLine += 1
pos_list = (data[iLine].replace('Y_POS\t','')).split('\t')
y_pos = [int(s) for s in pos_list]
# Read Z-POS
iLine += 1
pos_list = (data[iLine].replace('Z_POS\t','')).split('\t')
if len(pos_list) > 1:
z_pos = [int(s) for s in pos_list]
else:
z_pos = ['']
fq_dict['cells'][cell_id].update({nuc_id:{'nuc_pos':{'x': x_pos,'y': y_pos,'z': z_pos}}})
# Position of detected RNAS
if 'SPOTS_START' in line:
iLine += 2 # Move over header
RNA_prop = []
while not('SPOTS_END' in data[iLine]):
RNA_prop.append([float(s) for s in data[iLine].split('\t')])
iLine += 1
# Assign to dictionary
fq_dict['cells'][cell_id].update({'spots': np.array(RNA_prop)})
# Up date line counter
iLine += 1
return fq_dict | 5,358,771 |
def create_dfn(settings, seed, fname='csp'):
"""
Settings:
HL1 is half-length of outer box.
HL2 is half-length of fracture center box.
HL3 is half-length of inner box.
"""
document()
guids, midpt = srfc_guids(), (0,0,0)
random.seed(seed)
bsrf_ids = cube(settings['HL1']*2.)
guids.boxes = bsrf_ids
layer('INTS_BOX')
corner_points(settings['HL1']*2.)
if settings['HL3 cube']:
bsrf_ids = cube(settings['HL3']*2., '_INT')
guids.boxes_int = bsrf_ids
layer('INTS_BOX_INT')
corner_points(settings['HL3']*2.)
if not settings['uniform size rmax']:
radii = power_law_variates(settings['N'], settings['rmin'], settings['rmax'], settings['exponent'])
else:
radii = [settings['rmax'] for i in range(settings['N'])]
if not settings['perimeter distance min']:
centers = uniform_centers(settings['N'], settings['HL2']*2., midpt, settings['center intervals'])
unorms = uniform_normals(settings['N'], settings['pole intervals'])
else:
centers, unorms = uniform_centers_normals(radii, settings['HL2']*2., midpt, settings['perimeter distance min'])
fnames, fsrf_ids = populate(radii, centers, unorms, settings['perimeter points'], settings['polygon'])
guids.fractures = fsrf_ids
intersect_surfaces(guids)
color_surfaces(fnames)
freport(fnames, radii, centers, settings['HL3']*2., unorms)
save(fname)
#final_view() | 5,358,772 |
def create_default_reporting_options(embedded=True, config={}):
"""
config must follow this scheme:
{
`table_name`: {
`option1`: `value1`
}
}
The different options will depend on the table role.
- for ALL tables:
{n
'data' : {
'remove_columns': ['column_name1'],
'subsampling_factor': 1.0,
'keep_last_n_rows': 1000
}
- For role `data_samples`:
{
'default':
'Scatter X Axis': value,
'Scatter Y Axis': value,
'Color by': value,
'Color scheme': value,
'Binning X Axis': value,
'Binning Y Axis': value,
'Label with': value,
'Display with': value,
}
"""
o = Object()
o.image_size = 80
o.font_size = 19
o.frame_size_x = None
o.frame_size_y = 768
o.data_samples = Object()
o.data_samples.display_tabular = True
o.data_samples.display_scatter = True
o.data_samples.max_numpy_display = 10 # if array below this size, the content will be displayed
o.data_samples.resize_heterogeneous_numpy = True # if True, numpy arrays of different shape will be resized to common average size
o.db_root = None
o.embedded = embedded
o.style = Object()
o.style.color_by_line_width = 1
o.style.scatter_aspect_ratio = 1.5
o.style.tool_window_size_x = 200
o.style.tool_window_size_y = 500
o.style.sorted_legend = True
o.style.category_margin = 0.2
o.style.scatter_continuous_factor = 10
o.data = Object()
o.data.refresh_time = 5.0
o.data.unpack_numpy_arrays_with_less_than_x_columns = 15
o.data.types_to_discard = [
np.dtype('|S1'), # binary string
]
o.config = config
return o | 5,358,773 |
def get_patient_note(state, patient_id, note_id, *args, **kwargs):
"""
Return a note for a patient.
---
tags: ["FHIR"]
parameters:
- name: patient_id
in: path
description: ID of the patient of interest
required: true
schema:
type: string
- name: note_id
in: path
description: ID of the note of interest
required: true
schema:
type: string
responses:
200:
description: "Note returned"
content:
application/json:
schema:
type: array
items:
type: object
404:
description: "No patient or note exists with identifier"
content:
text/plain:
schema:
type: string
428:
description: "No FHIR data currently in application state"
content:
text/plain:
schema:
type: string
"""
p = state.patients.get(patient_id)
if p is None:
return (
f'No patient exists with identifier "{patient_id}".',
404,
{'Content-Type': 'text/plain'}
)
n = p.notes.get(note_id)
if n is None:
return (
f'No note exists with identifier "{note_id}".',
404,
{'Content-Type': 'text/plain'}
)
return jsonify(n.to_dict()) | 5,358,774 |
def _html_build_item(tag: str, text: str, attributes: map = None, include_tags=True) -> str:
"""Builds an HTML inline element and returns the HTML output.
:param str tag: the HTML tag
:param str text: the text between the HTML tags
:param map attributes: map of attributes
:param bool include_tags: True if the tags should be part of the output
"""
attributes = attributes if attributes is not None else {}
opening_tag = "<" + tag + _html_build_attributes(attributes) + ">"
closing_tag = "</" + tag + ">"
if include_tags:
return opening_tag + text + closing_tag
else:
return text | 5,358,775 |
def sim_sample(
out_prefix: str,
sample_id: int,
chrom_start: int = 0,
chrom_end: int = 10000,
start_rate: float = 0.001,
end_rate: float = 0.01,
mut_rate: float = 0.01,
) -> Dict[str, File]:
"""
Simulate sequencing data for one sample (assume one chromosome).
regions are sequenced intervals of a chromsome.
muts are SNP locations, assume heterozygous.
"""
regions = []
muts = []
region_start: Optional[int]
# Sample initial state.
non_seq_len = 1 / start_rate
seq_len = 1 / end_rate
if random.random() < seq_len / (seq_len + non_seq_len):
region_start = chrom_start
else:
region_start = None
# Use poisson process to sample regions and mutation sites.
pos = chrom_start
while pos < chrom_end:
pos += 1
if region_start is None:
pos += int(sample_exponential(start_rate))
if pos >= chrom_end:
break
region_start = pos
else:
region_end = min(pos + int(sample_exponential(end_rate)), chrom_end - 1)
mut_pos = pos + int(sample_exponential(mut_rate))
if region_end <= mut_pos:
regions.append((region_start, region_end, 2))
region_start = None
pos = region_end
else:
pos = mut_pos
muts.append((mut_pos, 1))
return {
"regions": write_regions(f"{out_prefix}/regions/{sample_id}.regions", regions),
"mutations": write_mutations(f"{out_prefix}/muts/{sample_id}.muts", muts),
} | 5,358,776 |
def sync_with_douban():
"""三件事:一从豆瓣上同步评论到本地;二将本地作品的评论同步到豆瓣;三:将本地推荐的作品同步到豆瓣
一不需要access_token,二三需要
策略:豆瓣本次登陆将access_token存到user的access_token字段中
access_token有效期3920s > 1h,定时任务1h1次,在豆瓣用户登陆账户一小时之类利用有效期内的access_token抓取其评论数据
分析:豆瓣用户每次登陆之后在本站的评论或者推荐信息 与 在豆瓣上的评论信息,在一小时之类必定会与豆瓣进行有效同步
"""
flask_app = create_app()
with flask_app.app_context():
one_hour_ago = datetime.utcnow() + timedelta(hours=-1)
# print one_hour_ago
for user in User.query.filter_by(is_activated=1):
# print user.douban_abbr
# xiaoaifansion
# 不需要豆瓣读书相关的权限
collections_info = requests.get(
collections_url % user.douban_abbr).json()
if collections_info:
recommendations = Recommendation.query.filter_by(status_id=3).filter_by(
user_id=user.id).filter(Recommendation.finished > one_hour_ago).all()
work_dict = {}
for recommendation in recommendations:
work_dict[recommendation.work.url.strip(
'/').split('/')[-1]] = recommendation.work
# print work_dict
# {u'1052241': Work 设计模式}
collection_ids = []
collections = collections_info["collections"]
if collections:
comments = Comment.query.filter_by(user_id=user.id).filter(
Comment.created > one_hour_ago).all()
# crawl comments
# 在豆瓣上已读并已评论的作品中选出其中在系统中已上架的作品,将豆瓣上的评论抓取到本地
for collection in collections:
collection_ids.append(collection['book']['id'])
# 已上架作品
work = Work.query.filter_by(
url=collection['book']['alt']).first()
if work:
# 已读并豆瓣上评论过
if collection['status'] == 'read' and collection['comment']:
# comment = Comment.query.filter_by(
# content=collection['comment']).first()
comment = Comment.query.filter_by(
user_id=user.id).filter_by(work_id=work.id).first()
# 该评论没有被抓取,则新增评论添加到系统
if not comment:
# 豆瓣上评论时间为utc+8,变为utc+0存到服务器
comment = Comment(
content=collection['comment'], user_id=user.id, work_id=work.id, created=datetime.strptime(collection['updated'], "%Y-%m-%d %H:%M:%S") + timedelta(hours=-8))
else: # 若系统中已经添加了该评论,则直接修改内容
comment.content = collection['comment']
# print comment.content
# 测试
db.session.add(comment)
db.session.commit()
access_token = user.access_token
headers = {
"Authorization": "Bearer %s" % access_token}
# print comments
# [Comment 20, Comment 21]
# print config.DOUBAN_CLIENT_ID
# 088cbee5d793b72a19a63cb186bb257e
# print access_token
# 4b02cc1fdfae6fa9e108645f9f0b4efb
# print headers
# {'Authorization': u'Bearer 4b02cc1fdfae6fa9e108645f9f0b4efb'}
if access_token:
for collection in collections:
# push comments
# 将系统中已上架作品的评论同步到豆瓣
# 需要权限,目前会失败
# push成功20150126
for comment in comments:
if comment.user_id == user.id and collection['book']['alt'] == comment.work.url:
data = {
'status': collection['status'],
'comment': comment.content,
'scope': 'douban_basic_common'
}
res = requests.put(
collection_url % collection['book']['id'], data, headers=headers)
# print res.status_code
# 202
# print res.content
# print comment
break
# push recommendations
# 在系统中推荐,将推荐作品同步到豆瓣收藏
# 需要权限,目前会失败
# push成功20150126
# print collection_ids
# [u'1052241', u'1858513', u'6709783', u'2567698', u'1230413', u'1788421', u'24738302', u'6021440', u'1084336', u'3117898', u'3688489', u'3323633', u'1894695', u'1786387', u'2209702', u'6709809', u'11229716', u'25814739', u'25863621', u'25900403']
for work_id, work in work_dict.iteritems():
if not work_id in collection_ids:
data = {
'status': 'read',
'comment': work.recommendation.recomm_reason,
'scope': 'douban_basic_common'
}
res = requests.post(
collection_url % work_id, data, headers=headers)
# print res.status_code
# print res.content | 5,358,777 |
def vif_col(X, y, col_name):
"""计算vif
计算具体一个column的vif,
一般阈值在5或者10,超过这个数字则表明有
共线性。
Attributes:
X (pd.DataFrame): 自变量
y (pd.Series): 因变量
col_name (str): 需要判断的列
References:
James, Gareth, Daniela Witten, Trevor Hastie, and Robert Tibshirani.
An Introduction to Statistical Learning. pp. 112, Vol. 112: Springer, 2013.
"""
r_square_minus = model(X.loc[:, X.columns != col_name].values, y).rsquared
return 1 / (1 - r_square_minus) | 5,358,778 |
def test_get_group_not_found(client):
"""Test that a group that does not exist returns a HTTP Not Found."""
response = client.get('/group/does-not-exist')
assert response.status == '404 Not Found'
assert response.data == b'' | 5,358,779 |
def get_mesh_stat(stat_id_start_str, attr_value, xmin, ymin, xmax, ymax):
"""
地域メッシュの統計情報を取得する
@param stat_id_start_str 統計IDの開始文字 この文字から始まるIDをすべて取得する.
@param attr_value cat01において絞り込む値
@param xmin 取得範囲
@param ymin 取得範囲
@param xmax 取得範囲
@param ymax 取得範囲
"""
rows = database_proxy.get_conn().execute("""
SELECT
statValue.value,
AsGeoJson(MapArea.Geometry)
FROM
MapArea
inner join idx_MapArea_Geometry ON pkid = MapArea.id AND xmin > ? AND ymin > ? AND xmax < ? AND ymax < ?
inner join statValueAttr ON MapArea.stat_val_attr_id = statValueAttr.id
inner join statValueAttr AS b ON b.stat_value_id = statValueAttr.stat_value_id AND b.attr_value = ?
inner join statValue ON statValue.id = b.stat_value_id
WHERE
MapArea.stat_id like ?;
""", (xmin, ymin, xmax, ymax, attr_value, stat_id_start_str + '%'))
ret = []
for r in rows:
ret.append({
'value': r[0],
'geometory': r[1]
})
return ret | 5,358,780 |
def free(ptr):
"""Free the given pointer, as returned by C malloc. If it is NULL, nothing happens."""
libc.free(ptr) | 5,358,781 |
def babel_extract(fileobj, keywords, comment_tags, options):
"""Babel extraction method for Jinja templates.
.. versionchanged:: 2.3
Basic support for translation comments was added. If `comment_tags`
is now set to a list of keywords for extraction, the extractor will
try to find the best preceeding comment that begins with one of the
keywords. For best results, make sure to not have more than one
gettext call in one line of code and the matching comment in the
same line or the line before.
.. versionchanged:: 2.5.1
The `newstyle_gettext` flag can be set to `True` to enable newstyle
gettext calls.
.. versionchanged:: 2.7
A `silent` option can now be provided. If set to `False` template
syntax errors are propagated instead of being ignored.
:param fileobj: the file-like object the messages should be extracted from
:param keywords: a list of keywords (i.e. function names) that should be
recognized as translation functions
:param comment_tags: a list of translator tags to search for and include
in the results.
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples.
(comments will be empty currently)
"""
extensions = set()
for extension in options.get('extensions', '').split(','):
extension = extension.strip()
if not extension:
continue
extensions.add(import_string(extension))
if InternationalizationExtension not in extensions:
extensions.add(InternationalizationExtension)
def getbool(options, key, default=False):
return options.get(key, str(default)).lower() in \
('1', 'on', 'yes', 'true')
silent = getbool(options, 'silent', True)
environment = Environment(
options.get('block_start_string', BLOCK_START_STRING),
options.get('block_end_string', BLOCK_END_STRING),
options.get('variable_start_string', VARIABLE_START_STRING),
options.get('variable_end_string', VARIABLE_END_STRING),
options.get('comment_start_string', COMMENT_START_STRING),
options.get('comment_end_string', COMMENT_END_STRING),
options.get('line_statement_prefix') or LINE_STATEMENT_PREFIX,
options.get('line_comment_prefix') or LINE_COMMENT_PREFIX,
getbool(options, 'trim_blocks', TRIM_BLOCKS),
getbool(options, 'lstrip_blocks', LSTRIP_BLOCKS),
NEWLINE_SEQUENCE,
getbool(options, 'keep_trailing_newline', KEEP_TRAILING_NEWLINE),
frozenset(extensions),
cache_size=0,
auto_reload=False
)
if getbool(options, 'trimmed'):
environment.policies['ext.i18n.trimmed'] = True
if getbool(options, 'newstyle_gettext'):
environment.newstyle_gettext = True
source = fileobj.read().decode(options.get('encoding', 'utf-8'))
try:
node = environment.parse(source)
tokens = list(environment.lex(environment.preprocess(source)))
except TemplateSyntaxError as e:
if not silent:
raise
# skip templates with syntax errors
return
finder = _CommentFinder(tokens, comment_tags)
for lineno, func, message in extract_from_ast(node, keywords):
yield lineno, func, message, finder.find_comments(lineno) | 5,358,782 |
def main(directory='.', verbose=True):
"""Lists "data" files recursively in a given directory, tar files
are extracted.
The "data" files have :file:`info` and :file:`pickle` extensions.
TODO: not only recognize .tar and .tar.gz and .tgz but .zip...
"""
filelist = list()
directory = get_directory(directory, True)
# Search through the directory directory and all its subfolders.
for root, _dirs, files in os.walk(directory):
if verbose:
print 'Searching in %s ...' % root
for elem in files:
if elem.endswith('.info') or elem.endswith('.pickle') or elem.endswith('.pickle.gz'):
filelist.append(os.path.join(root, elem))
if verbose:
print 'Found %d file(s).' % (len(filelist))
if not filelist:
warnings.warn('Could not find any file of interest in %s!' % root)
return filelist | 5,358,783 |
def onedthreegaussian(x, H, A1, dx1, w1, A2, dx2, w2, A3, dx3, w3):
"""
Returns two 1-dimensional gaussian of form
H+A*numpy.exp(-(x-dx)**2/(2*w**2))
"""
g1 = A1 * numpy.exp(-(x-dx1)**2 / (2*w1**2))
g2 = A2 * numpy.exp(-(x-dx2)**2 / (2*w2**2))
g3 = A3 * numpy.exp(-(x-dx3)**2 / (2*w3**2))
return H + g1 + g2 + g3 | 5,358,784 |
def match_any_if_key_matches(audit_id, result_to_compare, args):
"""
We want to compare things if we found our interested key
Even if the list does not have my interested name, it will pass
Match dictionary elements dynamically. Match from a list of available dictionaries
There is an argument: match_key. Match only when we found this key in result_to_compare
True, if match_key found, and mentioned attributes matches
, if match_key NOT found. Not even try to match anything else
False, if match_key found and attributes do not match
comparator:
type: dict
match_any_if_key_matches:
match_key: name
args:
- name: abc
running: false
- name: xyz
running: false
Input: {name: hjk, running: false}
Output: True, as didn't found name: hjk
Input: {name: abc, running: false}
Output: True, as found name: abc and matched running: false
:param result_to_compare:
Dictionary values to compare
:param args:
Comparator dictionary as mentioned in the check.
"""
log.debug('Running dict::match_any_if_key_matches for audit_id: {0}'.format(audit_id))
key_name = args['match_any_if_key_matches']['match_key']
if key_name not in result_to_compare:
log.debug("Required key '%s' is not found in '%s' for audit_id '%s'", key_name, result_to_compare, audit_id)
return True, "pass_as_key_not_found"
key_found_once = False
for to_match_dict in args['match_any_if_key_matches']['args']:
errors = []
if result_to_compare[key_name] == to_match_dict[key_name]:
key_found_once = True
_compare_dictionary(audit_id, result_to_compare, to_match_dict, errors)
if not errors:
# found a match
log.debug("dictionary comparison successful."
" '%s' matches '%s'", to_match_dict, result_to_compare)
return True, "Dictionary comparison passed"
else:
log.debug("dictionary comparison is not successful."
" '%s' does not match '%s'", to_match_dict, result_to_compare)
return False, "Dictionary comparison failed in dict::match_any_if_key_matches, " \
"errors={0}".format(str(errors))
if not key_found_once:
error_message = "key '{0}' exists in dict '{1}', " \
"but does not match intended values".format(key_name, result_to_compare)
log.debug(error_message)
return False, error_message | 5,358,785 |
def confusion_matrix_by_prob(true: np.ndarray,
predicted_prob: np.ndarray,
thresholds: Optional[Union[list, tuple, np.ndarray]] = None,
pos_label: Union[bool, str, int] = _DEFAULT_POS_LABEL,
output_metrics: Optional[list] = None,
table: bool = True,
**kwargs):
"""
confusion matrix for binary classification according to a given set of thresholds;
:param true: numpy.ndarray(shape=(m), ), an array of true classes;
:param predicted_prob: numpy.ndarray(shape=(m), ),
an array of predicted probabilities of being the positive class;
:param thresholds: [list, tuple, np.array, None] the thresholds set on predicted probabilities
such that any predicted probability greater or equal to the threshold will be classified as the positive class;
:param pos_label: [str, bool, int], positive class label, label that is considered as the positive class;
:param output_metrics: [list, None], metrics to be outputted if selected;
:param table: bool, if exported as a pd table table;
:param kwargs:
:param metric_order: [list, None], if table is selected to be the output, metric order specifies the order of
metrics presented in the table;
:return: dict, a set of confusion matrices, {threshold: {metric_name: metric_value, ...}, ...};
"""
# convert true series to positive series
true = true == pos_label
# select output:
if isinstance(output_metrics, list):
for selected_metric in output_metrics:
if selected_metric not in _FULL_METRICS:
raise KeyError(f"metric {selected_metric} is not recognized.")
elif output_metrics == 'confusion':
output_metrics = ['TP', 'FN', 'FP', 'TN',
'Recall', 'FNR', 'FPR', 'TNR', 'Precision', 'FOR', 'FDR', 'NPV',
'Prevalence', 'Accuracy']
else:
output_metrics = _FULL_METRICS
metrics_by_thresholds = dict()
for threshold in thresholds:
metrics_by_threshold = dict()
predicted = predicted_prob >= threshold
confusion_matrix_dict = confusion_matrix(true=true, predicted=predicted, normalize=False)
confusion_matrix_nor_true = normalize_confusion_matrix(confusion_matrix_dict=confusion_matrix_dict,
normalize_index=0)
confusion_matrix_nor_predicted = normalize_confusion_matrix(confusion_matrix_dict=confusion_matrix_dict,
normalize_index=1)
if 'TP' in output_metrics:
metrics_by_threshold['TP'] = confusion_matrix_dict[(True, True)]
if 'FN' in output_metrics:
metrics_by_threshold['FN'] = confusion_matrix_dict[(True, False)]
if 'FP' in output_metrics:
metrics_by_threshold['FP'] = confusion_matrix_dict[(False, True)]
if 'TN' in output_metrics:
metrics_by_threshold['TN'] = confusion_matrix_dict[(False, False)]
if 'Recall' in output_metrics:
metrics_by_threshold['Recall'] = confusion_matrix_nor_true[(True, True)]
if 'FNR' in output_metrics:
metrics_by_threshold['FNR'] = confusion_matrix_nor_true[(True, False)]
if 'FPR' in output_metrics:
metrics_by_threshold['FPR'] = confusion_matrix_nor_true[(False, True)]
if 'TNR' in output_metrics:
metrics_by_threshold['TNR'] = confusion_matrix_nor_true[(False, False)]
if 'Precision' in output_metrics:
metrics_by_threshold['Precision'] = confusion_matrix_nor_predicted[(True, True)]
if 'FOR' in output_metrics:
metrics_by_threshold['FOR'] = confusion_matrix_nor_predicted[(True, False)]
if 'FDR' in output_metrics:
metrics_by_threshold['FDR'] = confusion_matrix_nor_predicted[(False, True)]
if 'NPV' in output_metrics:
metrics_by_threshold['NPV'] = confusion_matrix_nor_predicted[(False, False)]
if 'Prevalence' in output_metrics:
metrics_by_threshold['Prevalence'] = \
(confusion_matrix_dict[(True, True)] + confusion_matrix_dict[(True, False)]) / sum(confusion_matrix_dict.values())
if 'Accuracy' in output_metrics:
metrics_by_threshold['Accuracy'] = \
(confusion_matrix_dict[(True, True)] + confusion_matrix_dict[(False, False)]) / sum(confusion_matrix_dict.values())
if 'LR+' in output_metrics:
# positive likelihood ratio:
try:
metrics_by_threshold['LR+'] = confusion_matrix_nor_true[(True, True)] / confusion_matrix_nor_true[(False, True)]
except ZeroDivisionError:
metrics_by_threshold['LR+'] = '-'
if 'LR-' in output_metrics:
# negative likelihood ratio:
try:
metrics_by_threshold['LR-'] = confusion_matrix_nor_true[(True, False)] / confusion_matrix_nor_true[(False, False)]
except ZeroDivisionError:
metrics_by_threshold['LR-'] = '-'
if 'DOR' in output_metrics:
# diagnostic odds ratio:
try:
metrics_by_threshold['DOR'] = (confusion_matrix_nor_true[(True, True)] / confusion_matrix_nor_true[(False, True)]) / \
(confusion_matrix_nor_true[(True, False)] / confusion_matrix_nor_true[(False, False)])
except ZeroDivisionError:
metrics_by_threshold['DOR'] = '-'
if 'F1' in output_metrics:
# F1 score:
try:
metrics_by_threshold['F1'] = 2 * (confusion_matrix_nor_true[(True, True)] * confusion_matrix_nor_predicted[(True, True)]) / \
(confusion_matrix_nor_true[(True, True)] + confusion_matrix_nor_predicted[(True, True)])
except ZeroDivisionError:
metrics_by_threshold['F1'] = '-'
metrics_by_thresholds[threshold] = metrics_by_threshold
if table:
if 'metric_order' in kwargs:
metric_order = kwargs['metric_order']
else:
metric_order = None
metrics_by_thresholds = \
convert_confusion_matrix_by_prob_to_table_with_reformat_precision(metrics_by_thresholds=metrics_by_thresholds,
metric_order=metric_order)
return metrics_by_thresholds | 5,358,786 |
def group_slaves_by_key_func(
key_func: _GenericNodeGroupingFunctionT,
slaves: Sequence[_GenericNodeT],
sort_func: _GenericNodeSortFunctionT = None,
) -> Mapping[_KeyFuncRetT, Sequence[_GenericNodeT]]:
""" Given a function for grouping slaves, return a
dict where keys are the unique values returned by
the key_func and the values are all those slaves which
have that specific value.
:param key_func: a function which consumes a slave and returns a value
:param slaves: a list of slaves
:returns: a dict of key: [slaves]
"""
sorted_slaves: Sequence[_GenericNodeT]
if sort_func is None:
sorted_slaves = sorted(slaves, key=key_func)
else:
sorted_slaves = sort_func(slaves)
return {k: list(v) for k, v in itertools.groupby(sorted_slaves, key=key_func)} | 5,358,787 |
def init_clfs():
""" init classifiers to train
Returns:
dict, clfs
"""
clfs = dict()
# clfs['xgb'] = XGBClassifier(n_jobs=-1)
clfs['lsvc'] = LinearSVC()
return clfs | 5,358,788 |
def get_field_types():
"""Get a dict with all registration field types."""
return get_field_definitions(RegistrationFormFieldBase) | 5,358,789 |
def get_md_links(filepath):
"""Get markdown links from a md file.
The links' order of appearance in the file IS preserved in the output.
This is to check for syntax of the format [...](...).
The returned 'links' inside the () are not checked for validity or
subtle differences (e.g. '/' vs no '/' at the end of a URL).
Args:
filepath (pathlib Path): Path object representing the file from
which info will be extracted.
Returns:
list of strings
"""
text_str = _get_ascii_plaintext_from_md_file(filepath)
links = _get_all_md_link_info_from_ascii_plaintext(text_str)
if links: # links only, not their text
return [t[-1] for t in links]
else:
return links | 5,358,790 |
def write_number_string(fp, data_element, padding=' '):
"""Handle IS or DS VR - write a number stored as a string of digits."""
# If the DS or IS has an original_string attribute, use that, so that
# unchanged data elements are written with exact string as when read from file
val = data_element.value
if isinstance(val, (list, tuple)):
val = "\\".join((x.original_string if hasattr(x, 'original_string')
else str(x) for x in val))
else:
val = val.original_string if hasattr(val, 'original_string') else str(val)
if len(val) % 2 != 0:
val = val + padding # pad to even length
if not in_py2:
val = bytes(val, default_encoding)
fp.write(val) | 5,358,791 |
def dask_to_zarr(df, z, loc, chunk_size, nthreads: int, msg: str = None):
# TODO: perhaps change name of Dask array so it does not get confused with a dataframe
"""
Creates a Zarr hierarchy from a Dask array.
Args:
df (): Dask array.
z (): Zarr hierarchy.
loc (): Location to write data/Zarr hierarchy to.
chunk_size (): Size of chunks to load into memory and process.
nthreads (int): Number of threads to use.
msg (str): Message to use with progress bar (Default: f"Writing data to {loc}").
"""
if msg is None:
msg = f"Writing data to {loc}"
og = create_zarr_dataset(z, loc, chunk_size, 'float64', df.shape)
pos_start, pos_end = 0, 0
for i in tqdm(df.blocks, total=df.numblocks[0], desc=msg):
pos_end += i.shape[0]
og[pos_start:pos_end, :] = controlled_compute(i, nthreads)
pos_start = pos_end
return None | 5,358,792 |
def get_reactor_logs(project_id, application_id, api_key=None, **request_kwargs):
"""
Get the logs of a Reactor script.
:param project_id: The Project of the Application.
:type project_id: str
:param application_id: The Application to get the script logs for.
:type application_id: str
:param api_key: The API key to authorize request against.
:type api_key: str
:return:
"""
url = '/projects/{}/applications/{}/reactorLogs'.format(
project_id, application_id)
return utils.request('GET', url, api_key=api_key, accept=True, **request_kwargs) | 5,358,793 |
def bids_init(bids_src_dir, overwrite=False):
"""
Initialize BIDS source directory
:param bids_src_dir: string
BIDS source directory
:param overwrite: string
Overwrite flag
:return True
"""
# Create template JSON dataset description
datadesc_json = os.path.join(bids_src_dir, 'dataset_description.json')
meta_dict = dict({'BIDSVersion': "1.0.0",
'License': "This data is made available under the Creative Commons BY-SA 4.0 International License.",
'Name': "The dataset name goes here",
'ReferencesAndLinks': "References and links for this dataset go here"})
# Write JSON file
bids_write_json(datadesc_json, meta_dict, overwrite)
return True | 5,358,794 |
def summation_i_squared(n):
"""Summation without for loop"""
if not isinstance(n, int) or n < 1:
return None
return int(((n*(n+1)*(2*n+1))/6)) | 5,358,795 |
def str_to_number(this):
"""
Convert string to a Number
"""
try:
return mknumber(int(this.value))
except ValueError:
return mknumber(float(this.value)) | 5,358,796 |
def _LinterRunCommand(cmd, debug, **kwargs):
"""Run the linter with common RunCommand args set as higher levels expect."""
return cros_build_lib.RunCommand(cmd, error_code_ok=True, print_cmd=debug,
debug_level=logging.NOTICE, **kwargs) | 5,358,797 |
def plotTSNE(Xdata, target = None, useMulti=True, num=2500, savename=None, njobs=4, size=4, cmap=None, dim=(12,8)):
"""
Plot TSNE for training data
Inputs:
> Xdata: The training feature data (DataFrame)
> target: The training target data (Series)
> num (2500 by default): The number of rows to use
Output: None
"""
sns.set(style="ticks")
if Xdata is None:
print("Xdata is NONE in plotTSNE!")
return None
if not isDataFrame(Xdata):
print("Xdata is not a Pandas DataFrame!")
return None
if target is not None:
if not isSeries(target):
print("target is not a Pandas Series!")
return None
print("Computing TSNE for {0} events with {1} features".format(num, Xdata.shape[1]))
projection, tsneFeatures, tsneTarget = computeTSNE(Xdata=Xdata, target=target, useMulti=useMulti, num=num, njobs=njobs)
print("Plotting TSNE for {0} events".format(num))
showTSNE(projection=projection, target=target, savename=savename, title="TSNE", size=size, cmap=cmap, dim=dim)
return projection, tsneFeatures, tsneTarget | 5,358,798 |
async def test_arm_home_with_pending(hass):
"""Test arm home method."""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
"alarm_control_panel": {
"platform": "manual",
"name": "test",
"code": CODE,
"arming_time": 1,
"disarm_after_trigger": False,
}
},
)
entity_id = "alarm_control_panel.test"
assert STATE_ALARM_DISARMED == hass.states.get(entity_id).state
await common.async_alarm_arm_home(hass, CODE, entity_id)
assert STATE_ALARM_ARMING == hass.states.get(entity_id).state
state = hass.states.get(entity_id)
assert state.attributes["next_state"] == STATE_ALARM_ARMED_HOME
future = dt_util.utcnow() + timedelta(seconds=1)
with patch(
("homeassistant.components.manual.alarm_control_panel.dt_util.utcnow"),
return_value=future,
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == STATE_ALARM_ARMED_HOME | 5,358,799 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.