content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def serve_application(
kb_model_dir: Text = KB_DEFAULT_MODEL_DIR,
kb_data_dir: Text = KB_DEFAULT_DATA_DIR,
qa_model_dir: Text = QA_MODEL_DIR,
es_url: Text = ELASTICSEARCH_URL,
index: Text = QA_INDEX,
interface: Optional[Text] = DEFAULT_SERVER_INTERFACE,
port: int = DEFAULT_SERVER_PORT,
cors: Optional[Union[Text, List[Text]]] = None,
auth_token: Optional[Text] = None,
response_timeout: int = DEFAULT_RESPONSE_TIMEOUT,
jwt_secret: Optional[Text] = None,
jwt_method: Optional[Text] = None,
log_file: Optional[Text] = None,
ssl_certificate: Optional[Text] = None,
ssl_keyfile: Optional[Text] = None,
ssl_ca_file: Optional[Text] = None,
ssl_password: Optional[Text] = None,
use_syslog: Optional[bool] = False,
syslog_address: Optional[Text] = None,
syslog_port: Optional[int] = None,
syslog_protocol: Optional[Text] = None,
) -> None:
"""Run the API entrypoint."""
app = configure_app(
cors,
auth_token,
response_timeout,
jwt_secret,
jwt_method,
)
ssl_context = server.create_ssl_context(
ssl_certificate, ssl_keyfile, ssl_ca_file, ssl_password
)
protocol = "https" if ssl_context else "http"
logger.info(f"Starting Knowlife server on {protocol}://{interface}:{port}")
app.register_listener(
partial(load_agent_on_start,
kb_model_dir,
kb_data_dir,
qa_model_dir,
es_url,
index),
"before_server_start",
)
# app.register_listener(close_resources, "after_server_stop")
number_of_workers = sanic_utils.number_of_sanic_workers()
sanic_utils.update_sanic_log_level(
log_file, use_syslog, syslog_address, syslog_port, syslog_protocol,
)
app.run(
host=interface,
port=port,
ssl=ssl_context,
backlog=int(os.environ.get(ENV_SANIC_BACKLOG, "100")),
workers=number_of_workers,
) | 500 |
def decimal_to_boolean_list(num: int, padding: int = 0) -> Tuple[bool, ...]:
"""
Convert a decimal number into a tuple of booleans, representing its binary value.
"""
# Convert the decimal into binary
binary = bin(num).replace('0b', '').zfill(padding)
# Return a tuple of booleans, one for each element of the binary number (it's either '0' or '1' so we can convert
# directly to boolean)
return tuple(char == '1' for char in binary) | 501 |
def test_initialize_database(pristine_db_engine: Engine):
"""Test that the database can be correctly initialized and that the
expected tables and views exist.
"""
config = PacsaniniConfig(
storage=StorageConfig(resources=str(pristine_db_engine.url), directory="./")
)
utils.initialize_database(config, echo=False)
inspector = inspect(pristine_db_engine)
expected_table_names = [
"alembic_version",
models.Image.__tablename__,
models.Series.__tablename__,
models.Study.__tablename__,
models.StudyFind.__tablename__,
models.Patient.__tablename__,
]
assert set(inspector.get_table_names()) == set(expected_table_names)
expected_view_names = [
views.StudyMetaView.__tablename__,
views.ManufacturerView.__tablename__,
]
assert set(inspector.get_view_names()) == set(expected_view_names) | 502 |
def get_credentials(fn, url, username, allowed):
"""Call fn and return the credentials object"""
url_str = maybe_string(url)
username_str = maybe_string(username)
creds = fn(url_str, username_str, allowed)
credential_type = getattr(creds, 'credential_type', None)
credential_tuple = getattr(creds, 'credential_tuple', None)
if not credential_type or not credential_tuple:
raise TypeError("credential does not implement interface")
cred_type = credential_type
if not (allowed & cred_type):
raise TypeError("invalid credential type")
ccred = ffi.new('git_cred **')
if cred_type == C.GIT_CREDTYPE_USERPASS_PLAINTEXT:
name, passwd = credential_tuple
err = C.git_cred_userpass_plaintext_new(ccred, to_bytes(name),
to_bytes(passwd))
elif cred_type == C.GIT_CREDTYPE_SSH_KEY:
name, pubkey, privkey, passphrase = credential_tuple
if pubkey is None and privkey is None:
err = C.git_cred_ssh_key_from_agent(ccred, to_bytes(name))
else:
err = C.git_cred_ssh_key_new(ccred, to_bytes(name),
to_bytes(pubkey), to_bytes(privkey),
to_bytes(passphrase))
else:
raise TypeError("unsupported credential type")
check_error(err)
return ccred | 503 |
def tpack(text, width=100):
"""Pack a list of words into lines, so long as each line (including
intervening spaces) is no longer than _width_"""
lines = [text[0]]
for word in text[1:]:
if len(lines[-1]) + 1 + len(word) <= width:
lines[-1] += (' ' + word)
else:
lines += [word]
return lines | 504 |
async def on_ready():
"""
Prints Logged in when the Bot is Ready
"""
print("Logged in as: " + bot.user.name + "\n") | 505 |
def _log_evidence_func(arr):
"""Returns an estimate of the log evidence from a set of log importance wegiths
in arr. arr has shape TxN where T is the number of trials and N is the number
of samples for estimation.
Args:
arr (torch.FloatTensor of shape TxN): log importance weights
Returns:
A tensor of shape (T,) representing the estimates for each set of sampels.
"""
T, N = arr.shape
log_evidence = torch.logsumexp(arr, dim=1) - np.log(N)
return log_evidence | 506 |
def _accumulated_moments_for_inference(mean, variance, is_training):
"""Use accumulated statistics for moments during inference.
After training the user is responsible for filling the accumulators with the
actual values. See _UpdateBnAccumulators() in eval_gan_lib.py for an example.
Args:
mean: Tensor of shape [num_channels] with the mean of the current batch.
variance: Tensor of shape [num_channels] with the variance of the current
batch.
is_training: Boolean, wheather to construct ops for training or inference
graph.
Returns:
Tuple of (mean, variance) to use. This can the same as the inputs.
"""
variable_collections = [
tf.GraphKeys.MODEL_VARIABLES, tf.GraphKeys.GLOBAL_VARIABLES,
]
with tf.variable_scope("accu", values=[mean, variance]):
# Create variables for accumulating batch statistic and use them during
# inference. The ops for filling the accumulators must be created and run
# before eval. See docstring above.
accu_mean = tf.get_variable(
"accu_mean",
shape=mean.shape,
initializer=tf.zeros_initializer(),
trainable=False,
collections=variable_collections)
accu_variance = tf.get_variable(
"accu_variance",
shape=variance.shape,
initializer=tf.zeros_initializer(),
trainable=False,
collections=variable_collections)
accu_counter = tf.get_variable(
"accu_counter",
shape=[],
initializer=tf.initializers.constant(1e-12),
trainable=False,
collections=variable_collections)
update_accus = tf.get_variable(
"update_accus",
shape=[],
dtype=tf.int32,
initializer=tf.zeros_initializer(),
trainable=False,
collections=variable_collections)
mean = tf.identity(mean, "mean")
variance = tf.identity(variance, "variance")
if is_training:
return mean, variance
logging.debug("Using accumulated moments.")
# Return the accumulated batch statistics and add current batch statistics
# to accumulators if update_accus variables equals 1.
def update_accus_fn():
return tf.group([
tf.assign_add(accu_mean, mean),
tf.assign_add(accu_variance, variance),
tf.assign_add(accu_counter, 1),
])
dep = tf.cond(
tf.equal(update_accus, 1),
update_accus_fn,
tf.no_op)
with tf.control_dependencies([dep]):
return accu_mean / accu_counter, accu_variance / accu_counter | 507 |
def parse_bgp_attr(atype, aval_buf):
"""Given a type and value buffer, parses a BGP attribute and returns the value
parsed"""
if atype == BGP_ATYPE_ORIGIN:
attr = 'ORIGIN'
if len(aval_buf) != 1:
return None, None, -1
aval = struct.unpack('B', aval_buf)[0]
aval = BGP_ORIGIN_TYPES[aval]
return attr, aval, 1
elif atype == BGP_ATYPE_ASPATH:
attr = 'ASPATH'
segtype, seglen = struct.unpack('BB', aval_buf[:2])
ases = []
segproc = 2
for i in range(seglen):
as_, = struct.unpack('>I', aval_buf[segproc:segproc+4])
segproc += 4
ases.append(as_)
return attr, ases, len(aval_buf)
elif atype == BGP_ATYPE_NEXTHOP:
attr = 'NEXTHOP'
aval = inet_ntoa(aval_buf)
return attr, aval, 4
else:
return None, None, len(aval_buf) | 508 |
def _get_embedding_filename(base_dir, split_name, step):
"""Create the filename for embeddings."""
return os.path.join(base_dir, str(step), f'{split_name}-embeddings.tfrecord') | 509 |
def getUsage():
""" Get usage information about running APBS via Python
Returns (usage)
usage: Text about running APBS via Python
"""
usage = "\n\n\
----------------------------------------------------------------------\n\
This driver program calculates electrostatic potentials, energies,\n\
and forces using both multigrid methods.\n\
It is invoked as:\n\n\
python main.py apbs.in\n\
----------------------------------------------------------------------\n\n"
return usage | 510 |
def run_length_div_decode(x, n, divisor):
"""Decodes a run length encoded array and scales/converts integer values to float
Parameters
----------
x : encoded array of integers (value, repeat pairs)
n : number of element in decoded array
"""
y = np.empty(n, dtype=np.float32)
start = 0
for i in range(0, x.shape[0] - 1, 2):
end = x[i + 1] + start
y[start:end] = x[i] / divisor
start = end
return y | 511 |
def parse_args():
"""Build file label list"""
parser = argparse.ArgumentParser(description='Build file label list')
parser.add_argument('data_path', type=str,
help='root directory for the dataset')
parser.add_argument('dataset', type=str, choices=[
'ucf101', 'hmdb51',
'kinetics400', 'kinetics600', 'kinetics700',
'sthv1', 'sthv2'],
help='name of the dataset')
parser.add_argument('--ann_root', type=str, default='annotation')
parser.add_argument('--out_root', type=str, default='../datalist')
parser.add_argument('--phase', type=str, default='train',
choices=['train', 'val'])
parser.add_argument('--level', type=int, default=2, choices=[1, 2])
parser.add_argument('--source', type=str, default='rgb',
choices=['rgb', 'flow', 'video'])
parser.add_argument('--split', type=int, default=1, choices=[1, 2, 3])
args = parser.parse_args()
return args | 512 |
def modelFnBuilder(config):
"""Returns 'model_fn' closure for Estimator."""
def model_fn(features, labels, mode, params):
print('*** Features ***')
for name in sorted(features.keys()):
tf.logging.info(' name = {}, shape = {}'.format(name, features[name].shape))
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
# get the data
input_texts = features['input_texts']
input_texts_length = features['input_texts_length']
input_chars = features['input_chars']
input_chars_length = features['input_chars_length']
output_tags = labels['output_tags'] if is_training else None
# build the model
model = MultiTaskIntentModel(config,
cg.BATCH_SIZE,
is_training,
input_texts=input_texts,
input_texts_length=input_texts_length,
input_chars=input_chars,
input_chars_length=input_chars_length,
output_tags=output_tags)
# predict
if mode == tf.estimator.ModeKeys.PREDICT:
intent_logits = model.getResults('intent_logits')
intent_probs = tf.nn.softmax(intent_logits, axis=-1)
intent_labels = tf.math.argmax(intent_probs, axis=-1)
tag_logits = model.getResults('tag_logits')
viterbi_sequence, viterbi_score = model.decode(logit=tag_logits, sequence_lengths=input_texts_length)
predictions = {'intent_labels': intent_labels,
'viterbi_sequence': viterbi_sequence,
'viterbi_score': viterbi_score}
output_spec = tf.estimator.EstimatorSpec(mode, predictions)
elif mode == tf.estimator.ModeKeys.TRAIN:
gold_intent_labels = labels['output_indents']
intent_logits = model.getResults('intent_logits')
# max_time = tf.shape(gold_intent_labels)[1]
# target_weights = tf.sequence_mask(input_texts_length, max_time, dtype=intent_logits.dtype)
batch_size = tf.cast(cg.BATCH_SIZE, dtype=tf.float32)
intent_loss = tf.reduce_sum(
tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=gold_intent_labels, logits=intent_logits)) / batch_size
tag_log_likelihood = model.getResults('log_likelihood')
tag_loss = tf.reduce_mean(-tag_log_likelihood)
loss = intent_loss + tag_loss
tvars = tf.trainable_variables()
l2_loss = 1e-2 * (tf.reduce_mean([tf.nn.l2_loss(v) for v in tvars]))
loss += l2_loss
lr = tf.train.polynomial_decay(
cg.LEARNING_RATE,
tf.train.get_or_create_global_step(),
cg.TRAIN_STEPS)
lr = tf.maximum(tf.constant(cg.LEARNING_RATE_LIMIT), lr)
# create optimizer and update
optimizer = tf.train.AdamOptimizer(learning_rate=lr)
gradients = tf.gradients(loss, tvars, colocate_gradients_with_ops=True)
clipped_gradients, _ = tf.clip_by_global_norm(gradients, 5.0)
train_op = optimizer.apply_gradients(zip(clipped_gradients, tvars), global_step=tf.train.get_global_step())
logging_hook = tf.train.LoggingTensorHook({'step': tf.train.get_global_step(),
'loss': loss,
'l2_loss': l2_loss,
'lr': lr,
'intent_loss': intent_loss,
'tag_loss': tag_loss}, every_n_iter=1)
output_spec = tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op, training_hooks=[logging_hook])
else:
raise NotImplementedError
return output_spec
return model_fn | 513 |
def jack_is_dull():
"""Take the sentence: All work and no play makes Jack a dull boy.
Store each word in a separate variable, then print out the sentence on one
line using print."""
for i in ["All", "work", "and", "no", "play", "makes", "Jack", "a", "dull", "boy."]:
print i,
print | 514 |
def accreds_validate(request, pk):
"""Validate an accred"""
accreds = [get_object_or_404(Accreditation, pk=pk_, end_date=None) for pk_ in filter(lambda x: x, pk.split(','))]
multi_obj = len(accreds) > 1
for accred in accreds:
if not accred.rights_can('VALIDATE', request.user):
raise Http404
if request.method == 'POST':
for accred in accreds:
accred.need_validation = False
accred.save()
accred.user.clear_rights_cache()
AccreditationLog(accreditation=accred, who=request.user, type='validated').save()
from notifications.utils import notify_people, unotify_people
dest_users = accred.unit.users_with_access('ACCREDITATION', no_parent=True)
notify_people(request, 'Accreds.Validated', 'accreds_validated', accred, dest_users)
unotify_people('Accreds.ToValidate', accred)
if multi_obj:
messages.success(request, _(u'Accréditations validées !'))
else:
messages.success(request, _(u'Accréditation validée !'))
return redirect('units-views-accreds_list')
return render(request, 'units/accreds/validate.html', {'accreds': accreds, 'multi_obj': multi_obj}) | 515 |
def need_verified_email(request, *args, **kwargs): # pylint: disable=unused-argument
"""
Returns error page for unverified email on edX
"""
return standard_error_page(request, 401, "verify_email.html") | 516 |
def random_masking(token_ids_all):
"""对输入进行随机mask,增加泛化能力
"""
result = []
for token_ids in token_ids_all:
rands = np.random.random(len(token_ids))
result.append([
t if r > 0.15 else np.random.choice(token_ids)
for r, t in zip(rands, token_ids)
])
return result | 517 |
def is_directory(dir_path):
"""Validates that the argument passed into 'argparse' is a directory."""
if not os.path.isdir(dir_path):
raise ValueError('Path is not a directory: %s' % dir_path)
return dir_path | 518 |
def test042():
"""
check that a modified loaded version of p isn't equal
"""
assert isinstance(p, Pod)
code = get_python_source(p, style="black")
x = eval(code, globals(), locals())
assert isinstance(x, Pod)
x.spec.containers[1].lifecycle.postStart.httpGet.port = 4
assert x != p | 519 |
def used_caches_and_sources(layers, caches, sources):
"""
Find used cache and source names in layers and caches configuration.
"""
used_layer_sources = find_layer_sources(layers)
used_cache_sources = find_cache_sources(caches)
all_used_sources = used_layer_sources.union(used_cache_sources)
avail_caches = set(caches.keys())
avail_sources = set(sources.keys())
used_caches = avail_caches.intersection(all_used_sources)
used_sources = avail_sources.intersection(all_used_sources).difference(used_caches)
return used_caches, used_sources | 520 |
def encode_labels(
labels: Union[list, np.ndarray, pd.Series],
multi_label: bool = False,
sep: str = '|'
):
"""Encode labels
Return coded labels, encoder, and decoder.
Examples:
>>> # multi-class problem
>>> labels = ['OK', 'OK', 'NG1', 'NG2', 'OK']
>>> encode_labels(labels)
(
[0, 0, 1, 2, 0],
{'OK': 0, 'NG1': 1, 'NG2': 2},
{0: 'OK', 1: 'NG1', 2: 'NG2}
)
>>> # multi-label problem, a.k.a. one hot encoding
>>> labels = ['dog', 'cat', 'dog|cat']
>>> encode_labels(labels, multi_label=True)
(
[[0, 1], [1, 0], [1, 1]],
{'dog': 0, 'cat': 1},
{0: 'dog', 1: 'cat'}
)
Args:
labels (list, np.ndarray): List of labels with string elements.
multi_label (bool, optional): Is multi label classification.
sep (str, optional): For multi-label only. Default is '|'.
Returns:
list or np.array: Coded labels. List in list out, array in array out.
dict: encoder
dict: decoder
"""
# get classes
if not multi_label:
classes = mlsorted(filter(None, set(labels)))
else:
classes = mlsorted(
{labs for item in filter(None, labels) for labs in item.split(sep)}
)
classes = [_ for _ in classes if _ not in ['']]
n_classes = len(classes)
# generate encoder and decoder
encoder = {_class: code for code, _class in enumerate(classes)}
decoder = {v: k for k, v in encoder.items()}
# create coded labels
if not multi_label:
coded_labels = [encoder[x] if x is not None else x for x in labels]
else:
coded_labels = list()
for x in labels:
labs = [0] * n_classes
if x is not None:
for lab in x.split(sep):
labs[encoder[lab]] = 1
coded_labels.append(labs)
# to numpy or to dataframe
if isinstance(labels, (pd.Series, pd.DataFrame)):
if multi_label:
coded_labels = pd.DataFrame(
coded_labels, columns=encoder.keys()
)
else:
coded_labels = pd.DataFrame(
{'y': coded_labels}, dtype=np.int32
)
elif isinstance(labels, (np.ndarray, Categorical)):
coded_labels = np.array(coded_labels, dtype=np.int32)
return coded_labels, encoder, decoder | 521 |
def test_perspective_transform():
"""
Tests with no parameters
"""
simp_manager = SimplificationManager()
with pytest.raises(TypeError):
simp_manager.perspectiveTransformation() | 522 |
def sigmoid_xent(*, logits, labels, reduction=True):
"""Computes a sigmoid cross-entropy (Bernoulli NLL) loss over examples."""
log_p = jax.nn.log_sigmoid(logits)
log_not_p = jax.nn.log_sigmoid(-logits)
nll = -jnp.sum(labels * log_p + (1. - labels) * log_not_p, axis=-1)
return jnp.mean(nll) if reduction else nll | 523 |
def is_palindrome(s: str) -> bool:
"""Return whether a string is a palindrome
This is as efficient as you can get when computing whether a string is a
palindrome. It runs in O(n) time and O(1) space.
"""
if len(s) <= 1:
return True
i = 0
j = len(s) - 1
while i < j:
if s[i] != s[j]:
return False
i += 1
j -= 1
return True | 524 |
def accuracy(pred_cls, true_cls, nclass=3):
"""
compute per-node classification accuracy
"""
accu = []
for i in range(nclass):
intersect = ((pred_cls == i) + (true_cls == i)).eq(2).sum().item()
thiscls = (true_cls == i).sum().item()
accu.append(intersect / thiscls)
return np.array(accu) | 525 |
def my_hostogram(gray, bins):
""" pixel values has to be within bins range, otherwise index out of range, for example
if pixel 400th has value 70, but bins are -> [0...40], then histogram[70] yields IOR
"""
histogram = [0 for i in bins]
for i in range(gray.shape[0]):
for j in range(gray.shape[1]):
histogram[gray[i][j]] = histogram[gray[i][j]] + 1
return histogram | 526 |
def reverse(password, position_x, position_y):
"""Reverse from position_x to position_y in password."""
password_slice = password[position_x:position_y + 1]
password[position_x:position_y + 1] = password_slice[::-1]
return password | 527 |
def test_encode_mixed_items():
"""
>>> encode(['a', ('b', 'c')])
'a; b=c;'
>>> encode([('', 'a'), ('b', 'c')])
'a; b=c;'
>>> encode([('b', 'c'), 'a'])
'b=c; a;'
>>> encode([('b', 'c'), ('', 'a')])
'b=c; a;'
""" | 528 |
def gen_sentence(
start_seq: str = None,
N: int = 4,
prob: float = 0.001,
output_str: bool = True
) -> Union[List[str], str]:
"""
Text generator using Thai2fit
:param str start_seq: word for begin word.
:param int N: number of word.
:param bool output_str: output is str
:param bool duplicate: duplicate word in sent
:return: list words or str words
:rtype: List[str], str
:Example:
::
from pythainlp.generate.thai2fit import gen_sentence
gen_sentence()
# output: 'แคทรียา อิงลิช (นักแสดง'
gen_sentence("แมว")
# output: 'แมว คุณหลวง '
"""
if start_seq is None:
start_seq = random.choice(list(thwiki_itos))
list_word = learn.predict(
start_seq,
N,
temperature=0.8,
min_p=prob,
sep='-*-'
).split('-*-')
if output_str:
return ''.join(list_word)
return list_word | 529 |
def GetBoolValueFromString(s):
"""Returns True for true/1 strings, and False for false/0, None otherwise."""
if s and s.lower() == 'true' or s == '1':
return True
elif s and s.lower() == 'false' or s == '0':
return False
else:
return None | 530 |
def genomic_del6_abs_37(genomic_del6_37_loc):
"""Create test fixture absolute copy number variation"""
return {
"type": "AbsoluteCopyNumber",
"_id": "ga4gh:VAC.60XjT6dzYKX8rn6ocG4AVAxCoUFfdjI6",
"subject": genomic_del6_37_loc,
"copies": {"type": "Number", "value": 1}
} | 531 |
def bar_chart_classification_report(classification_report, title, folder):
"""
Plot a bar graph which sums up the classification report of the scikit
learn tool.
:param classification_report: Sliced classification report : classes,
toPlot, support. toPlot must be a tuple (precision, recall, f1-score)
"""
classes, toPlot, support = slice_classification_report(
classification_report)
N = 3
bar_width = 0.05
ind = np.arange(N)
fig, ax = plt.subplots()
# Enumerate over each class except the last one which represent the average
# and total
bars = []
for i in range(len(classes)):
bar_i = ax.bar(ind + i * bar_width, toPlot[i], bar_width)
bars.append(bar_i)
# Add some text for labels, title and axes ticks
ax.set_ylabel("Percent")
ax.set_title(title)
ax.set_xticks(ind + bar_width / len(classes))
ax.set_xticklabels(("Precision", "Recall", "F1-score"))
ax.legend(bars, classes, loc="best")
plt.savefig(folder+"/"+title.replace(" ", "_")+".png", format="png",
dpi=1000) | 532 |
def get_typical_qualifications(cfg):
"""
create qualification list to filter just workers with:
- + 98% approval rate
- + 500 or more accepted HIT
- Location USA
:param cfg:
:return:
"""
if not cfg['hit_type'].getboolean('apply_qualification'):
return []
qualification_requirements=[
{
# Worker_NumberHITsApproved
'QualificationTypeId': '00000000000000000040',
'Comparator': 'GreaterThanOrEqualTo',
'IntegerValues': [
500,
],
'RequiredToPreview': False,
'ActionsGuarded': 'Accept'
}, {
# Worker_PercentAssignmentsApproved
'QualificationTypeId': '000000000000000000L0',
'Comparator': 'GreaterThanOrEqualTo',
'IntegerValues': [
98,
],
'RequiredToPreview': False,
'ActionsGuarded': 'Accept'
}, {
# Worker_Locale
'QualificationTypeId': '00000000000000000071',
'Comparator': 'EqualTo',
'LocaleValues': [
{
'Country':"US"
}
],
'RequiredToPreview': False,
'ActionsGuarded': 'Accept'
},
]
return qualification_requirements | 533 |
def remove_files(pattern=None, paths=None, ensure_success=True):
"""
Removes a file from disk.
Parameters
----------
pattern : str, optional
paths : iterable of str, optional
ensure_success : bool, optional
"""
assert [pattern, paths].count(None) == 1
if (paths is None):
paths = glob.glob(pattern)
for path in paths:
if (ensure_success):
if (op.exists(path)):
os.remove(path)
if (op.exists(path)):
raise RuntimeError("Cannot remove file: %s" % show_string(path))
else:
if (op.isfile(path)):
os.remove(path) | 534 |
def _gen_codegen_dev_to_reg(
nybble: int,
) -> Callable[[Context, Op], Tuple[Context, Op]]:
"""'Code generator generator' for device-to-register instructions."""
def codegen_dev_to_reg(context: Context, op: Op) -> Tuple[Context, Op]:
op = op._replace(args=parse_args_if_able(
_PARSE_OPTIONS, context, op, Type.REGISTER, Type.ADDRESS))
if all_args_parsed(op.args):
_regcheck(op.args[0])
_devcheck(op.args[1])
digits = (nybble, op.args[0].integer, op.args[1].integer)
op = op._replace(todo=None, hex='{:X}{:X}{:X}F'.format(*digits))
# We can still update pos whether we've parsed all args or not.
return context.advance_by_bytes(2), op
return codegen_dev_to_reg | 535 |
def _shell_wrap_inner(command, shell=True, sudo_prefix=None):
"""
Conditionally wrap given command in env.shell (while honoring sudo.)
(Modified from fabric.operations._shell_wrap to avoid double escaping,
as the wrapping host command would also get shell escaped.)
"""
# Honor env.shell, while allowing the 'shell' kwarg to override it (at
# least in terms of turning it off.)
if shell and not env.use_shell:
shell = False
# Sudo plus space, or empty string
if sudo_prefix is None:
sudo_prefix = ""
else:
sudo_prefix += " "
# If we're shell wrapping, prefix shell and space, escape the command and
# then quote it. Otherwise, empty string.
if shell:
shell = env.shell + " "
command = '"%s"' % command # !! removed _shell_escape() here
else:
shell = ""
# Resulting string should now have correct formatting
return sudo_prefix + shell + command | 536 |
def events_until(events: List[ScheduleEvent],
until: time, *, after: time = None) \
-> List[ScheduleEvent]:
"""
Return events up to and including the given time.
Keyword arguments:
after -- if specified, only events after this time will be included.
"""
if after is not None:
events = events_after(events, after)
return [event for event in events if event[0] <= until] | 537 |
def get_ip():
"""
Get local ip from socket connection
:return: IP Addr string
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('bing.com', 80))
return s.getsockname()[0] | 538 |
def test_compute_projection(shape):
"""Test PCA projection of X vs X.T @ X"""
X = jax.random.uniform(random.generate_key(), shape=shape)
XTX = X.T @ X
k = 1 if X.ndim == 1 else min(X.shape)
p1 = compute_projection(X, k)
p2 = compute_projection(XTX, k)
np.testing.assert_array_almost_equal(abs(p1), abs(p2), decimal=3) | 539 |
def check_trash_path(filename):
"""Check trash directory exist for given filename, create it otherwise"""
trash_path = get_trash_path(filename)
if not os.path.exists(trash_path):
os.makedirs(trash_path) | 540 |
def shave_marks(txt):
"""去掉全部变音符号"""
norm_txt = unicodedata.normalize('NFD', txt) # 把所有的字符分解为基字符和组合记号
shaved = ''.join(c for c in norm_txt
if not unicodedata.combining(c)) # 过滤掉所有的组合记号
return unicodedata.normalize('NFC', shaved) | 541 |
def test_demag_2d_pbc():
"""
Attempt to check that demag with 2d_pbc option does
not give a nonsensical answer.
"""
A=1.3e-11
Ms=8.6e5
n = 40
d = 2.5
mesh = fidimag.common.CuboidMesh(nx=n, ny=n, nz=1, dx=d, dy=d, dz=d, unit_length=1e-9, periodicity=(True, True, False))
sim = fidimag.micro.Sim(mesh, name="pbc_2d_bug")
sim.set_Ms(Ms)
sim.set_m((0, 0, 1.0), normalise=True)
demag = fidimag.micro.Demag(pbc_2d=True)
sim.add(demag)
sim.compute_effective_field(0)
assert not np.isnan(demag.demag.tensor_xx).any()
assert not np.isnan(demag.demag.tensor_xy).any()
assert not np.isnan(demag.demag.tensor_xz).any()
assert not np.isnan(demag.demag.tensor_yy).any()
assert not np.isnan(demag.demag.tensor_yz).any()
assert not np.isnan(demag.demag.tensor_zz).any()
assert not np.isnan(sim.field).any(), "NaN in demag array" | 542 |
def _kld_gamma(p_data, q_data):
"""
Computes the Kullback-Leibler divergence between two gamma PDFs
Parameters
----------
p_data: np.array
Data of the first process
q_data: np.array
Data of the first process
Returns
-------
r_kld_gamma: numeric
Kullback-Leibler Divergence Quantity
References
----------
[1] Bauckhage, Christian. (2014). Computing the Kullback-Leibler Divergence between two Generalized Gamma Distributions. arXiv. 1401.6853.
"""
# -------------------------------------------------------------------------- Distribution Parameters -- #
def _gamma_params(data, method='MoM'):
"""
Computes the parameters of a gamma probability density function (pdf), according to the selected
method.
Parameters
----------
data: np.array
The data with which will be adjusted the pdf
method: str
Method to calculate the value of the parameters for the pdf
'MoM': Method of Moments (Default)
Returns
-------
r_params: dict
{'alpha': gamma distribution paramerter, 'beta': gamma distribution parameter}
"""
# -- Methods of Moments -- #
if method == 'MoM':
# first two moments
mean = np.mean(data)
variance = np.var(data)
# sometimes refered in literature as k
alpha = mean**2/variance
# sometimes refered in literature as 1/theta
beta = mean/variance
# return the gamma distribution empirically adjusted parameters
return alpha, beta
# -- For errors or other unsupported methods
else:
raise ValueError("Currently, the supported methods are: 'MoM'")
# alpha_1: Distribution 1: shape parameter, alpha_1 > 0
# beta_1: Distribution 1: rate or inverse scale distribution parameter, beta_1 > 0
alpha_1, beta_1 = _gamma_params(data=p_data)
# alpha_2: Distribution 2: shape parameter, alpha_2 > 0
# beta_2: Distribution 2: rate or inverse scale parameter, beta_2 > 0
alpha_2, beta_2 = _gamma_params(data=q_data)
# Expression with beta instead of theta
theta_1 = 1/beta_1
theta_2 = 1/beta_2
p1, p2 = 1, 1 # Generalized Gamma Distribution with p=1 is a gamma distribution [1]
# Calculations, see [1] for mathematical details.
a = p1*(theta_2**alpha_2)*sps.gamma(alpha_2/p2)
b = p2*(theta_1**alpha_1)*sps.gamma(alpha_1/p1)
c = (((sps.digamma(alpha_1/p1))/p1) + np.log(theta_1))*(alpha_1 - alpha_2)
# Bi-gamma functions
d = sps.gamma((alpha_1+p2)/p1)
e = sps.gamma((alpha_1/p1))
# Calculations
f = (theta_1/theta_2)**(p2)
g = alpha_1/p1
# General calculation and output
r_kld = np.log(a/b) + c + (d/e)*f - g
# Final Kullback-Leibler Divergence for Empirically Adjusted Gamma PDFs
return r_kld | 543 |
def pickAColor() -> colors.Color: # pylint: disable=invalid-name
""" Not Implemented
:rtype colors.Color:
"""
not_implemented = " is not implemented in the MediaComp.jes module"
raise NotImplementedError("pickAColor()" + not_implemented) | 544 |
def interpolate_rat(nodes, values, use_mp=False):
"""Compute a rational function which interpolates the given nodes/values.
Args:
nodes (array): the interpolation nodes; must have odd length and
be passed in strictly increasing or decreasing order
values (array): the values at the interpolation nodes
use_mp (bool): whether to use ``mpmath`` for extended precision. Is
automatically enabled if `nodes` or `values` use ``mpmath``.
Returns:
BarycentricRational: the rational interpolant. If there are `2n + 1` nodes,
both the numerator and denominator have degree at most `n`.
References:
https://doi.org/10.1109/LSP.2007.913583
"""
# ref: (Knockaert 2008), doi:10.1109/LSP.2007.913583
# see also: (Ionita 2013), PhD thesis, Rice U
values = np.asanyarray(values)
nodes = np.asanyarray(nodes)
n = len(values) // 2 + 1
m = n - 1
if not len(values) == n + m or not len(nodes) == n + m:
raise ValueError('number of nodes should be odd')
xa, xb = nodes[0::2], nodes[1::2]
va, vb = values[0::2], values[1::2]
# compute the Loewner matrix
B = (vb[:, None] - va[None, :]) / (xb[:, None] - xa[None, :])
# choose a weight vector in the nullspace of B
weights = _nullspace_vector(B, use_mp=use_mp)
return BarycentricRational(xa, va, weights) | 545 |
def binder_update_page_range(payload):
"""Parser for `binder_update_page_range`"""
try:
match = re.match(binder_update_page_range_pattern, payload)
if match:
match_group_dict = match.groupdict()
return BinderUpdatePageRange(int(match.group(1)), int(match.group(2)), int(match.group(3)), int(match.group(4)))
except Exception as e:
raise ParserError(e.message) | 546 |
def generate_breadcrumb(url: str, separator: str) -> str:
"""
Fungsi yang menerima input berupa string url dan separator
dan mengembalikan string yang berisi navigasi breadcrumb.
Halaman Wikipedia tentang navigasi breadcrumb:
https://en.wikipedia.org/wiki/Breadcrumb_navigation
Contoh:
>>> generate_breadcrumb("youtube.com", " > ")
'<span class="active">HOME</span>'
>>> generate_breadcrumb("https://github.com/harmonify/index.html", " > ")
'<a href="/">HOME</a> > <span class="active">HARMONIFY</span>'
>>> generate_breadcrumb("facebook.com/sebuah-slug-yang-panjang-sekali", " / ")
'<a href="/">HOME</a> / <span class="active">SSYPS</span>'
"""
# inisialisasi variabel untuk menampung hasil
result = []
# ambil path dari url
path = parse_path(url)
# filter akhiran index.* dari path
path = re.sub(r"index\.?.*$", "", path)
# split path menjadi list
pathList = path.split("/")
if pathList[-1] == "":
pathList.pop()
# generate tag anchor dari awal sampai dengan
# elemen kedua terakhir dari pathList
for i in range(len(pathList[:-1])):
url = "/".join(pathList[: i + 1])
desc = generate_description(pathList[i])
anchor = generate_anchor_tag(url, desc)
result.append(anchor)
# generate tag span dengan elemen terakhir dari pathList
span = generate_span_tag(generate_description(pathList[-1]))
result.append(span)
# return hasil join tag anchor dengan separator
return separator.join(result) | 547 |
def cleanup_files(session):
"""Deletes all entries in table File in session without any entry
in table Match.
Parameters:
session: An SQLAlchemy database session.
Side-effects:
All entries in table File whose id do not exist in Match.file_id
deleted.
"""
s = session.query(File.id).filter(~File.contains.any()).all()
if s:
session.execute(File.__table__.delete(),
[{"id": t[0]} for t in s])
session.commit() | 548 |
def publish_events():
"""
Publish a Py_ps_event_msg
"""
event_msg = Py_ps_event_msg()
event_msg.dest_guid = 1234
event_msg.id = 9876
event_msg.data.kind = event_msg.data.kinds['PARAMETER_VALUE_STRING']
event_msg.data.value = b"Hello, world!"
events.publish(event_msg) | 549 |
def get_work_path():
"""
获取工作目录,若不存在并创建
:return: work_path str
"""
work_dir = config.WORK_DIR
work_path = f'{work_dir}/{time.strftime("%Y%m%d%H%M%S")}'
print(f'work path: {work_path}')
if not os.path.exists(work_path):
os.makedirs(work_path)
return work_path | 550 |
def extract_all_sentences(dataset_path, features_outfile=None):
""" Extract features from sentences using pretrained universal sentence embeddings and save them in a pickle file
:param dataset_path: the path of the dataset to use
:param features_outfile: file used to store the extracted features
:return: extracted embeddings
"""
model_path = Path(__file__).parent.parent.parent / "data" / "models" / "use"
use = hub.load(str(model_path.absolute()))
feature_extractor = TextFeatureExtractor(use)
return feature_extractor.extract_all_features(dataset_path, features_outfile) | 551 |
def _load_yaml(blueoil_config_filename):
"""load blueoil config yaml
Args:
blueoil_config_filename(str): File path of blueoil config yaml file.
Returns:
blueoil_config(dict): dict of blueoil config.
"""
if not os.path.exists(blueoil_config_filename):
FileNotFoundError("File not found: {}".format(blueoil_config_filename))
with open(blueoil_config_filename, "r") as f:
blueoil_config = yaml.load(f)
model_name, _ = os.path.splitext(os.path.basename(blueoil_config_filename))
blueoil_config["model_name"] = model_name
return blueoil_config | 552 |
def bat_activity(request):
""" """
# TODO:
wave_file_name = 'WURB-2_20160908T220024+0200_N57.6627E12.6393_TE-384.wav'
# Pandas data frame
peak_df = None
try:
# Prod:
peak_df = pd.read_csv('/srv/django/cloudedbats/src/test_data/peak_file.txt',
sep="\t")
except:
# Dev:
# peak_df = pd.read_csv('cloudedbats_django/cloudedbats_django/test_data/peak_file.txt',
# sep="\t")
peak_df = pd.read_csv('test_data/peak_file.txt',
sep="\t")
peak_df['time_s'] = peak_df.time/1000
peak_df['amplitude_log'] = np.log(peak_df.amplitude + 2) * 3 #* 10
# Bokeh data source.
ds = ColumnDataSource(peak_df)
#
### TOOLS="pan, box_zoom, wheel_zoom, undo, redo, reset, hover, resize, save"
TOOLS="pan, box_zoom, wheel_zoom, undo, redo, reset, hover, save"
# MORE_TOOLS="crosshair, tap,box_select, poly_select, lasso_select, tap"
p = figure(tools=TOOLS, toolbar_location="above")
# p = figure(tools=TOOLS, toolbar_location="above", active_drag="box_zoom")
# p.title.text="WURB-2_20160908T220024+0200_N57.6627E12.6393_TE-384"
p.plot_width = 700 # 1800
p.plot_height = 300
#
s = p.scatter(source = ds, x='time_s', y='frequency',
marker='circle',
size='amplitude_log',
line_color="navy", fill_color="red", alpha=0.5,
)
p.xaxis.axis_label="Time (sec)"
p.yaxis.axis_label="Peak frequency (kHz)"
p.x_range = Range1d(0, 300, bounds=(0, 300))
p.y_range = Range1d(0, 100, bounds=(0, 150))
#
hover = p.select_one(HoverTool)
hover.point_policy = "follow_mouse"
hover.tooltips = [
("Frequency (kHz)", "@frequency"),
("Amplitude", "@amplitude"),
("Time (sec.)", "@time_s")]
#
script, div = components(p)
#
js_resources = INLINE.render_js()
css_resources = INLINE.render_css()
#
return render(request, "cloudedbats_bat_activity.html",
{
'wave_file_name': wave_file_name,
'js_resources': js_resources,
'css_resources': css_resources,
'plot_script': script,
'plot_div': div}
) | 553 |
def search_sorted(array, value):
"""
Searches the given sorted array for the given value using a
BinarySearch which should execute in O(log N).
array a 1D sorted numerical array
value the numerical value to search for
returns index of array closest to value
returns None if value is outside variable bounds
"""
def index_to_check(rmin, rmax):
return (rmin + rmax) / 2
range_min = 0
range_max_0 = len(array)
range_max = range_max_0
numloops = 0
while numloops < 100:
numloops += 1
if (range_max - range_min) == 1:
if (range_max == range_max_0) or (range_min == 0):
raise LookupError("For some reason, range_max-" +\
"range_min reached 1 before " +\
"the element was found. The " +\
"element being searched for " +\
("was %s. (min,max)" % (value,) +\
("=%s" % ((range_min, range_max),))))
else:
high_index = range_max
else:
high_index = index_to_check(range_min, range_max)
high_val = array[high_index]
low_val = array[high_index - 1]
if value < low_val:
range_max = high_index
elif value > high_val:
range_min = high_index
else: # low_val <= value <= high_val
if (2 * (high_val - value)) < (high_val - low_val):
return high_index
else:
return high_index - 1
raise NotImplementedError("Something went wrong! I " +\
"caught a pseudo-infinite loop!") | 554 |
def to_doc(d: DatasetDoc) -> Dict:
"""
Serialise a DatasetDoc to a dict
If you plan to write this out as a yaml file on disk, you're
better off with `to_formatted_doc()`.
"""
doc = attr.asdict(
d,
recurse=True,
dict_factory=dict,
# Exclude fields that are the default.
filter=lambda attr, value: "doc_exclude" not in attr.metadata
and value != attr.default
# Exclude any fields set to None. The distinction should never matter in our docs.
and value is not None,
retain_collection_types=False,
)
doc["$schema"] = ODC_DATASET_SCHEMA_URL
if d.geometry is not None:
doc["geometry"] = shapely.geometry.mapping(d.geometry)
doc["id"] = str(d.id)
doc["properties"] = dict(d.properties)
return doc | 555 |
def test_search_iter():
"""Test iteration of search results."""
for result in cs.search('glucose'):
assert isinstance(result.csid, int) | 556 |
def annealing_exp(start, end, pct):
"""Exponentially anneal from start to end as pct goes from 0.0 to 1.0."""
return start * (end / start) ** pct | 557 |
def _compile_theano_function(param, vars, givens=None):
"""Compile theano function for a given parameter and input variables.
This function is memoized to avoid repeating costly theano compilations
when repeatedly drawing values, which is done when generating posterior
predictive samples.
Parameters
----------
param : Model variable from which to draw value
vars : Children variables of `param`
givens : Variables to be replaced in the Theano graph
Returns
-------
A compiled theano function that takes the values of `vars` as input
positional args
"""
return function(vars, param, givens=givens,
rebuild_strict=True,
on_unused_input='ignore',
allow_input_downcast=True) | 558 |
def do_fertilization(cookies):
"""
施肥
"""
global totalLevelApplyFertilizerAmount
global totalLevelNeedFertilizerAmount
global totalRewardAmount
global can_steal_list
headers = {
'Host': 'farm.dmall.com',
'Connection': 'keep-alive',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache',
'User-Agent': UserAgent,
'Accept': '*/*',
'Referer': 'https://act.dmall.com/dac/mygarden/index.html?dmfrom=wx&dmTransStatusBar=true&dmShowTitleBar=false&bounces=false&dmNeedLogin=true',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,en-US;q=0.8',
'X-Requested-With': 'com.wm.dmall'
}
params = {
'landId': '1',
'cropId': '151',
'token': cookies['token'],
'ticketName': cookies['ticketName'],
'vendorId': '1',
'storeId': cookies['storeId'],
}
try:
response = requests.get('https://farm.dmall.com/farm/fertilization', headers=headers, params=params, cookies=cookies, verify=False)
except Exception as e:
print(e)
return
data = json.loads(response.text)
print(data)
if data.get('data') != None:
totalLevelApplyFertilizerAmount = data.get('data').get('userCrop').get('totalLevelApplyFertilizerAmount')
totalLevelNeedFertilizerAmount = data.get('data').get('userCrop').get('totalLevelNeedFertilizerAmount')
time.sleep(5)
do_fertilization(cookies)
elif data.get('code') == 'FARM2014':
# 播种香蕉
do_plant(cookies) | 559 |
def load_data():
""" Helper function to load and initialize data
"""
global input_shape, X_train, y_train_labels, y_train, X_test, y_test_labels, y_test
(X_train, y_train_labels), (X_test, y_test_labels) = mnist.load_data()
X_train, X_test, input_shape = preprocess_image_data(X_train, X_test, img_rows, img_cols, K)
# convert class vectors to binary class matrices
y_train = to_categorical(y_train_labels, num_classes)
y_test = to_categorical(y_test_labels, num_classes)
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples') | 560 |
def redirect(url):
"""Create a response object representing redirection.
:param url: a URL
:return: a Response
"""
headers = {
"Location": url,
}
return Response(headers=headers, code=HTTPStatus.FOUND) | 561 |
def test_is_valid_password_v2_false1():
"""
Test of is_valid_password_v2() with a false example, take 1
"""
result = is_valid_password_v2(
{"low": 1, "high": 2, "letter": "w", "password": "ww"}
)
assert not result | 562 |
def remove_ordereddict(data, dangerous=True):
"""turns a nested OrderedDict dict into a regular dictionary.
dangerous=True will replace unserializable values with the string '[unserializable]' """
# so nasty.
return json.loads(json_dumps(data, dangerous)) | 563 |
def test_list_base64_binary_max_length_2_nistxml_sv_iv_list_base64_binary_max_length_3_5(mode, save_output, output_format):
"""
Type list/base64Binary is restricted by facet maxLength with value 7.
"""
assert_bindings(
schema="nistData/list/base64Binary/Schema+Instance/NISTSchema-SV-IV-list-base64Binary-maxLength-3.xsd",
instance="nistData/list/base64Binary/Schema+Instance/NISTXML-SV-IV-list-base64Binary-maxLength-3-5.xml",
class_name="NistschemaSvIvListBase64BinaryMaxLength3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 564 |
def evaluate_v1(tokens: List[str]) -> Number:
"""Evaluates a tokenized expression and returns the result"""
stack: List = []
for token in tokens:
stack = consume_token(token, stack)
return get_result_from_stack(stack) | 565 |
def load_gromacs_reaction_coord_files(us_path, n_wins, step=10, verbose=False):
"""
Parameters
----------
us_path: string
Path to the xvg files with sampled reaction coordinate values
n_wins: integer
Number of umbrella runs
step: integer
Time interval for analysis
verbose: Boolean
Verbosity
Outputs
-------
us_pull_l: list
list of reaction coordinates values sampled in the umbrella runs
"""
us_pull_l = []
bar = pyprind.ProgBar(n_wins, update_interval=15)
for win_i in (range(1, n_wins+1)):
if verbose:
print(win_i)
us_pull_l.append(
np.loadtxt(us_path.format(win_i), skiprows=17)[::step])
bar.update(force_flush=False)
return us_pull_l | 566 |
def test_get_mount_target_in_az_no_az_id_match_to_az_name(mocker):
"""
When the az_name provided does not have a valid az_id
"""
get_mount_targets_info_response = [
MOUNT_TARGET_INFO,
{
"MountTargetId": "fsmt-ijklmnop",
"AvailabilityZoneId": "use2-az3",
"AvailabilityZoneName": "us-east-2c",
"FileSystemId": FS_ID,
"LifeCycleState": "available",
"IpAddress": "192.1.2.3",
},
{
"MountTargetId": "fsmt-qrstuvwx",
"AvailabilityZoneId": "use2-az1",
"AvailabilityZoneName": "us-east-2a",
"FileSystemId": FS_ID,
"LifeCycleState": "available",
"IpAddress": "192.4.5.6",
},
]
_test_get_mount_target_in_az(
mocker,
get_mount_targets_info_response,
desired_describe_time=1,
desired_get_az_id_time=1,
az_id=None,
desired_exception=mount_efs.FallbackException,
desired_message="No matching az id",
) | 567 |
def h_customer_role_playing(
process_configuration: Dict[str, str], h_customer: Hub, staging_table: StagingTable
) -> RolePlayingHub:
"""Define h_customer_role_playing test hub.
Args:
process_configuration: Process configuration fixture value.
h_customer: Hub customer fixture value.
staging_table: Staging table fixture value.
Returns:
Deserialized role playing hub h_customer_role_playing.
"""
h_customer_role_playing_fields = [
Field(
parent_table_name="h_customer_role_playing",
name="h_customer_role_playing_hashkey",
data_type=FieldDataType.TEXT,
position=1,
is_mandatory=True,
length=32,
),
Field(
parent_table_name="h_customer_role_playing",
name="r_timestamp",
data_type=FieldDataType.TIMESTAMP_NTZ,
position=2,
is_mandatory=True,
),
Field(
parent_table_name="h_customer_role_playing",
name="r_source",
data_type=FieldDataType.TEXT,
position=3,
is_mandatory=True,
),
Field(
parent_table_name="h_customer_role_playing",
name="customer_role_playing_id",
data_type=FieldDataType.TEXT,
position=4,
is_mandatory=True,
),
]
h_customer_role_playing = RolePlayingHub(
schema=process_configuration["target_schema"],
name="h_customer_role_playing",
fields=h_customer_role_playing_fields,
)
h_customer_role_playing.parent_table = h_customer
h_customer_role_playing.staging_table = staging_table
return h_customer_role_playing | 568 |
def call_port(command, arguments):
"""
This function calls the port executable with the specified parameters,
printing the output to stdout.
"""
command = ["port", command] + arguments
if (os.getuid != 0):
print("Using sudo to execute port.")
return subprocess.call(["sudo"] + command)
else:
return subprocess.call(command) | 569 |
def _classify(text:str, name:str=None, service:str=None, language:Language=None):
"""Takes the input text (and optional filename) and makes a best effort to extract/label the code content needed for classification.
E.g. a markdown file has codeblocks extracted and labeled with language, and a code file is extracted in its entirety and labeled accordingly."""
targeted_content = []
# First let's extract what metadata we can, as well as target our classification to important bits (code)
if is_markdown(text, name):
# Try to extract code blocks.
targeted_content += _extract_and_label_codefences(text)
# TODO: May want to refine this (e.g. don't run code-specific models on non-code)
# If none, or if code blocks don't do anything, fall back to treating whole thing as text.
# if not targeted_content:
# targeted_content.append((text, language, service))
# Treat as code as long as it's one of the languages we expect to deal with
elif is_code(text, name):
targeted_content.append((text, language or is_code(text, name), service))
# We also want to handle yaml, but we don't do anything special with that.
elif is_yaml(text, name):
targeted_content.append((text, language, service)) #TODO: Might want to do something custom for yaml in the future.
# otherwise short circuit out. ( e.g. json, etc)
else:
# Maybe should treat it as raw text, parse whole thing?
# TODO: figure this out.
targeted_content.append((text, language, service))
# TODO: If sdk/language aren't specified, try to determine them.
# If we know what they are with high confidence, use the targeted model, otherwise use a generic model. (Maybe run both anyhow and make sure they agree or mosaic)
return targeted_content | 570 |
def parse_command_line_arguments():
"""
Parse the command-line arguments being passed to RMG Py. This uses the
:mod:`argparse` module, which ensures that the command-line arguments are
sensible, parses them, and returns them.
"""
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i', '--input', metavar='FILE',
help='a predictor training input file')
parser.add_argument('-w', '--weights', metavar='H5',
help='Saved model weights to continue training on (typically for transfer learning)')
parser.add_argument('-d', '--data', metavar='FILE',
help='A file specifying which datasets to train on. Alternatively, a space-separated .csv file'
' with SMILES/InChI and output(s) in the first and subsequent columns, respectively.')
parser.add_argument('-o', '--out_dir', metavar='DIR', default=os.getcwd(),
help='Output directory')
parser.add_argument('-n', '--normalize', action='store_true',
help='Normalize output based on training set mean and standard deviation')
parser.add_argument('--save_tensors_dir', metavar='DIR',
help='Location to save tensors on disk (frees up memory)')
parser.add_argument('--keep_tensors', action='store_true',
help='Do not delete directory containing tensors at end of job')
parser.add_argument('-f', '--folds', type=int, default=5,
help='number of folds for training')
parser.add_argument('-tr', '--train_ratio', type=float, default=0.9,
help='Fraction of training data to use for actual training, rest is early-stopping validation')
parser.add_argument('-te', '--test_ratio', type=float, default=0.0,
help='Fraction of data to use for testing. If loading data from database,'
' test ratios are specified in datasets file')
parser.add_argument('-t', '--train_mode', default='full_train',
help='train mode: currently support in_house and keras for k-fold cross-validation,'
' and full_train for full training')
parser.add_argument('-bs', '--batch_size', type=int, default=1,
help='batch training size')
parser.add_argument('-lr', '--learning_rate', default='0.0007_30.0',
help='two parameters for learning rate')
parser.add_argument('-ep', '--nb_epoch', type=int, default=150,
help='number of epochs for training')
parser.add_argument('-pc', '--patience', type=int, default=10,
help='Number of consecutive epochs allowed for loss increase before stopping early.'
' Note: A value of -1 indicates that the best model will NOT be saved!')
parser.add_argument('-s', '--seed', type=int, default=0,
help='Numpy random seed')
return parser.parse_args() | 571 |
def _js_requires(offline: bool = False) -> str:
"""Format JS requires for Plotly dependency.
Args:
offline: if True, inject entire Plotly library for offline use.
Returns:
str: <script> block with Plotly dependency.
"""
helper_fxns = _load_js_resource(_AxPlotJSResources.HELPER_FXNS)
if offline:
script = Template(_load_js_resource(_AxPlotJSResources.PLOTLY_OFFLINE)).render(
library=plotly_offline.offline.get_plotlyjs()
)
else:
script = _load_js_resource(_AxPlotJSResources.PLOTLY_ONLINE)
return script + helper_fxns | 572 |
def resnet_retinanet(num_classes, backbone='resnet50', inputs=None, modifier=None, **kwargs):
""" Constructs a retinanet model using a resnet backbone.
Args
num_classes: Number of classes to predict.
backbone: Which backbone to use (one of ('resnet50', 'resnet101', 'resnet152')).
inputs: The inputs to the network (defaults to a Tensor of shape (None, None, 3)).
modifier: A function handler which can modify the backbone before using it in retinanet (this can be used to
freeze backbone layers for example).
Returns
RetinaNet model with a ResNet backbone.
"""
# choose default input
if inputs is None:
inputs = keras.layers.Input(shape=(None, None, 3))
# create the resnet backbone
if backbone == 'resnet50':
resnet = keras_resnet.models.ResNet50(inputs, include_top=False, freeze_bn=True)
elif backbone == 'resnet101':
resnet = keras_resnet.models.ResNet101(inputs, include_top=False, freeze_bn=True)
elif backbone == 'resnet152':
resnet = keras_resnet.models.ResNet152(inputs, include_top=False, freeze_bn=True)
else:
raise ValueError('Backbone (\'{}\') is invalid.'.format(backbone))
# invoke modifier if given
if modifier:
resnet = modifier(resnet)
# create the full model
return retinanet.retinanet(inputs=inputs, num_classes=num_classes, backbone_layers=resnet.outputs[1:], **kwargs) | 573 |
def trans_full_matrix_projection(input, size=0, param_attr=None):
"""
Different from full_matrix_projection, this projection performs matrix
multiplication, using transpose of weight.
.. math::
out.row[i] += in.row[i] * w^\mathrm{T}
:math:`w^\mathrm{T}` means transpose of weight.
The simply usage is:
.. code-block:: python
proj = trans_full_matrix_projection(input=layer,
size=100,
param_attr=ParamAttr(
name='_proj',
initial_mean=0.0,
initial_std=0.01))
:param input: input layer
:type input: LayerOutput
:param size: The parameter size. Means the width of parameter.
:type size: int
:param param_attr: Parameter config, None if use default.
:type param_attr: ParameterAttribute
:return: A TransposedFullMatrixProjection Object.
:rtype: TransposedFullMatrixProjection
"""
proj = TransposedFullMatrixProjection(
input_layer_name=input.name, size=size, **param_attr.attr)
proj.origin = input
return proj | 574 |
def encrypt_module(module_path, output_path, key, removal_expression=None):
"""Encrypts python file with into output path"""
with open(module_path, "r") as module_file:
module_content = module_file.read()
if removal_expression is not None:
module_content = _remove_tagged_source(module_content, removal_expression)
module_content = zlib.compress(module_content)
# The hash is calculated on the compressed module to detect wrong key
# before trying to decompress the code
code_hash = hashlib.sha256(module_content).hexdigest()
encryptor = pyaes.AESModeOfOperationCTR(key)
encrypted_module = base64.b64encode(encryptor.encrypt(module_content))
# format the encrypted code to sepreate lines
encrypted_module = os.linesep + os.linesep.join(_chunks(encrypted_module, MAX_LINE_LENGTH))
global template
if template is None:
with open(MODULE_TEMPLATE, "r") as template_file:
template = template_file.read()
with open(output_path, "w") as output_file:
output_file.write(
template.format(
module_name=__name__,
encryped_code=encrypted_module,
code_hash=code_hash
)
)
logger.debug("Encrypted file {} into {}".format(module_path, output_path)) | 575 |
def create_permissions_and_grant_privileges(*args, **kwargs):
"""
Creates database permissions to assign to a user.
Creates django permissions that reflect what a corresponding database user is
allowed to do when directly logged into the database. These permissions are
translated into database privileges and granted to a user when a user is saved.
Args:
args: Postional arguments for compatibility. Not used.
kwargs: Keyworded arguments for compatibility. Not used.
"""
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from serverside.models import User
# Workaround for a decade-old bug in django:
# See here: https://code.djangoproject.com/ticket/10827#no1 and
# here: https://github.com/pytest-dev/pytest-django/issues/18
ContentType.objects.clear_cache()
models = utils.get_all_models(True, False)
for m in models:
codename = utils.get_permission_codename("select", m)
name = f"Can SELECT from {m._meta.db_table} table" # nosec
content_type = ContentType.objects.get_for_model(m)
Permission.objects.update_or_create(
codename=codename, defaults={"name": name, "content_type": content_type}
)
# Grant privileges that existing users already have.
users = User.objects.all()
for user in users:
user.update_db_permissions() | 576 |
def http_head_deck_etag(gist_url):
"""Perform a HEAD against gist_url and return the etag."""
class HeadRequest(Request):
def get_method(self):
return 'HEAD'
head_request = HeadRequest(gist_url + '/raw')
response = urlopen(head_request)
headers = response.headers
etag = headers['etag']
return etag | 577 |
def updateContourProbabilities(contoursGroupedByImage, probabilityFunction):
"""Set probability that contour is salient based on a probability function."""
contourList = nonnullObjects(contoursGroupedByImage)
print "updateContourProbabilities"
for contour in contourList:
p = probabilityFunction(contour.features)
contour.setProbability(p)
print p
if p < 0:
limitedProbability = 0
elif p > 1:
limitedProbability = 1
else:
limitedProbability = p
color = 255.0 * array(((1.0 - limitedProbability) * 10.0,
(limitedProbability * 10.0),
0))
if 1:
contour.setColor(color) | 578 |
def _get_fluxes(sol, reactions):
"""Get the primal values for a set of variables."""
fluxes = {
r.id: sol.fluxes.loc[r.community_id, r.global_id] for r in reactions
}
return pd.Series(fluxes) | 579 |
def compatible_elfs(elf1, elf2):
"""See if two ELFs are compatible
This compares the aspects of the ELF to see if they're compatible:
bit size, endianness, machine type, and operating system.
Parameters
----------
elf1 : ELFFile
elf2 : ELFFile
Returns
-------
True if compatible, False otherwise
"""
osabis = frozenset([e.header['e_ident']['EI_OSABI'] for e in (elf1, elf2)])
compat_sets = (frozenset('ELFOSABI_%s' % x
for x in ('NONE',
'SYSV',
'GNU',
'LINUX', )), )
return ((len(osabis) == 1 or
any(osabis.issubset(x)
for x in compat_sets)) and elf1.elfclass == elf2.elfclass and
elf1.little_endian == elf2.little_endian and
elf1.header['e_machine'] == elf2.header['e_machine']) | 580 |
def initialize():
"""
Initializes the figure with reasonably sane settings--the most
important of which is telling matplotlib to use Type1 fonts (the ACM
paper format checker will complain endlessly if using Type3).
"""
matplotlib.rcParams.update({
'pdf.fonttype' : 42 # use Type1 fonts instead of Type3
,'ps.fonttype' : 42
,'figure.figsize' : [10, 10]
,'axes.linewidth' : 0.75
,'font.size' : 13
,'axes.titlesize' : 13
,'ytick.labelsize' : 13
,'xtick.labelsize' : 13
,'font.sans-serif' : ['Helvetica']
,'font.family' : 'sans-serif'
,'font.style' : 'normal'
,'font.weight' : 'normal'
,'mathtext.fontset' : 'cm'
,'text.usetex' : False
,'legend.frameon' : False
,'xtick.direction' : 'out'
,'xtick.major.pad' : 2
,'xtick.major.size' : 4
,'xtick.major.width' : 0.75
,'xtick.minor.pad' : 2
,'xtick.minor.size' : 2
,'ytick.direction' : 'out'
,'ytick.major.pad' : 2
,'ytick.major.size' : 4
,'ytick.major.width' : 0.75
,'ytick.minor.pad' : 2
,'ytick.minor.size' : 2
,'savefig.dpi' : 600
}) | 581 |
def load_source(source, delete_on_exit):
"""Loads the given source code as a Python module."""
with tempfile.NamedTemporaryFile(
mode='w',
suffix='.py',
prefix='__autograph_generated_file',
delete=False,
encoding='utf-8') as f:
module_name = os.path.basename(f.name[:-3])
file_name = f.name
f.write(source)
if delete_on_exit:
atexit.register(lambda: _remove_file(file_name))
spec = importlib.util.spec_from_file_location(module_name, file_name)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
# TODO(mdan): Use our own garbage-collected cache instead of sys.modules.
sys.modules[module_name] = module
return module, file_name | 582 |
def _tear_down_response(data):
"""Helper function to extract header, payload and end from received response
data."""
response_header = data[2:17]
# Below is actually not used
response_payload_size = data[18]
response_payload = data[19:-2]
response_end = data[-2:]
return response_header, response_payload, response_end | 583 |
def read_data_from_bd(query,
host,
user,
port,
database,
password):
""" get data from abc database
arg:
query: sql
username: database username
password: database password
return:
df: dataframe
"""
import pymysql
connection = pymysql.connect(host=host,
user=user,
port=port,
db=database,
password=password)
df = pd.read_sql(query, connection)
return df | 584 |
def stack_atomic_call_middleware(q_dict, q_queryset, logger, middleware):
""" Calls the middleware function atomically.
* Returns cached queue on error or None """
cached_q_dict = q_dict[:]
cached_q_query = q_queryset.all()
try:
middleware(q_dict, q_queryset, logger)
except:
logger.error('MM_STACK: Middleware exception occurred in %s' % middleware.__name__)
return [cached_q_dict, cached_q_query]
return None | 585 |
def collate_with_neg_fn(generator):
"""Collate a list of datapoints into a batch, with negative samples in last half of batch."""
users, items, item_attr, num_attr = collate_fn(generator)
users[len(users) // 2:] = users[:len(users) // 2]
return users, items, item_attr, num_attr | 586 |
def kudzify_logger(logger=None, format=BASIC_FORMAT):
"""Extends format string of a logger by request context placeholders.
It calls `kudzify_handler` on each handler registered to the given
logger. So this function must be called after handlers are configured.
"""
if not isinstance(logger, logging.Logger):
logger = logging.getLogger(logger)
for handler in logger.handlers:
kudzify_handler(handler, format=format) | 587 |
def build_scenario_3(FW, verbosity=None):
"""
Tests if override is cleared when all switch behaviours go out of scope.
And tests switch command with opaque value.
Returns a list of 2-lists: [time, 0ary function] that describes exactly
what needs to be executed when. The 0ary functions return a falsey value
when it succeeded, and a string describing what went wrong else.
"""
def setup_scenario_3():
sendBehaviour(0, buildTwilight(9, 14, 80))
sendBehaviour(1, buildSwitchBehaviour(9, 12, 70))
scenario = TestScenario(FW, "scenario 3")
add_common_setup(scenario)
scenario.addEvent(setup_scenario_3)
if verbosity is not None:
scenario.setVerbosity(verbosity)
# behaviours both become active
scenario.setTime(9, 0)
scenario.addExpect("SwitchAggregator", "overrideState", "-1", "overridestate should've been set to translucent")
scenario.addExpect("SwitchAggregator", "aggregatedState", "70", "aggregatedState should be equal to minimum of active behaviour and twilight")
# switch command occurs
scenario.setTime(10, 0)
scenario.addEvent(bind(sendSwitchCommand, 50))
scenario.addExpect("SwitchAggregator", "overrideState", "50", "overridestate should've been set to translucent")
scenario.setTime(10, 0)
scenario.addExpect("SwitchAggregator", "aggregatedState", "50", "aggregatedState should be equal to override state when it is opaque")
# all behaviours become inactive
scenario.setTime(12, 0)
scenario.addExpect("SwitchAggregator", "overrideState", "-1", "overridestate should've been cleared when it is non-zero and all switch behaviours become inactive")
scenario.addExpect("SwitchAggregator", "aggregatedState", "0", "aggregatedState should be equal to 0 when no override state or switch behaviours are active")
return scenario | 588 |
def main(environment_name, agent_cfg_file):
"""
Load, train and evaluate a Reinforcment Learning agent.
Parameters
----------
environment_name : str
agent_cfg_file : str
"""
cfg = load_cfg(agent_cfg_file)
# Set up environment and agent
env = gym.make(environment_name)
cfg['env'] = env
cfg['serialize_path'] = ('artifacts/{}-{}.pickle'
.format(cfg['model_name'], environment_name))
agent = load_agent(cfg, env)
agent = train_agent(cfg, env, agent)
rewards = test_agent(cfg, env, agent)
print("Average reward: {:5.3f}".format(rewards))
print("Trained episodes: {}".format(agent.episode)) | 589 |
def preorder(root):
""" preorder traversal (root, left, right) """
if root is not None:
print(root.data, end=" ")
preorder(root.left)
preorder(root.right) | 590 |
def test_faceinfo_v4(photosdb4, uuid_dict):
""" Test FaceInfo object """
import json
for uuid in uuid_dict:
photo = photosdb4.get_photo(uuid)
faces = photo.face_info
assert len(faces) == len(uuid_dict[uuid])
for face in faces:
assert face.uuid in uuid_dict[uuid]
assert face.asdict() == uuid_dict[uuid][face.uuid] | 591 |
def read(input):
"""Read an entire zonefile, returning an AST for it which contains formatting information."""
return _parse(input, actions=Actions()) | 592 |
def generate_conditionally(text='welcome', random_seed=1, **kwargs):
"""
Input:
text - str
random_seed - integer
Output:
stroke - numpy 2D-array (T x 3)
"""
model = ConditionalStrokeModel.load(
str(MODEL_DIR / 'conditional-stroke-model'),
batch_size=1, rnn_steps=1, is_train=False, char_seq_len=len(text) + 1)
return conditional_decode(model, seed=random_seed, text=text, **kwargs) | 593 |
def parse_field(
parser: argparse.ArgumentParser,
field: pydantic.fields.ModelField,
) -> None:
"""Adds standard pydantic field to argument parser.
Args:
parser (argparse.ArgumentParser): Argument parser to add to.
field (pydantic.fields.ModelField): Field to be added to parser.
"""
# Get Default
default = field.get_default()
# All other types are treated in a standard way
if field.required:
# Add Required Standard Field
parser.add_argument(
utils.argument_name(field.alias),
action=argparse._StoreAction, # pylint: disable=protected-access
help=utils.argument_description(field.field_info.description),
dest=field.alias,
metavar=field.alias.upper(),
required=True,
)
else:
# Add Optional Standard Field
parser.add_argument(
utils.argument_name(field.alias),
action=argparse._StoreAction, # pylint: disable=protected-access
default=default,
help=utils.argument_description(field.field_info.description, default),
dest=field.alias,
metavar=field.alias.upper(),
required=False,
) | 594 |
def test_properties_dataframe():
"""test if properties can be provided as a DataFrame"""
shape = (10, 2)
np.random.seed(0)
shape = (10, 2, 2)
data = np.random.random(shape)
data[:, 0, :] = 20 * data[:, 0, :]
properties = {'vector_type': np.array(['A', 'B'] * int(shape[0] / 2))}
properties_df = pd.DataFrame(properties)
properties_df = properties_df.astype(properties['vector_type'].dtype)
layer = Vectors(data, properties=properties_df)
np.testing.assert_equal(layer.properties, properties)
# test adding a dataframe via the properties setter
properties_2 = {'vector_type2': np.array(['A', 'B'] * int(shape[0] / 2))}
properties_df2 = pd.DataFrame(properties_2)
layer.properties = properties_df2
np.testing.assert_equal(layer.properties, properties_2) | 595 |
def cross_entropy(pred, soft_targets):
""" pred: unscaled logits
soft_targets: target-distributions (i.e., sum to 1)
"""
logsoftmax = nn.LogSoftmax(dim=1)
return torch.mean(torch.sum(-soft_targets * logsoftmax(pred), 1)) | 596 |
def test_separations():
"""Test if sky separations are the same in all spherical coordinate systems.
This is a simple consistency check.
Sky separations computed between consecutive event positions should
be the same in any spherical coordinate system.
"""
table = Table.read('hess_event_list_2.fits')
def separation(table, lon_colname, lat_colname):
lon = np.array(table[lon_colname], dtype=np.float64)
lat = np.array(table[lat_colname], dtype=np.float64)
pos1 = SkyCoord(lon[:1], lat[:1], unit='deg')
pos2 = SkyCoord(lon[1:], lat[1:], unit='deg')
sep = pos1.separation(pos2).arcsec
res = np.empty(len(table), dtype=np.float64)
res[:-1] = sep
res[-1] = np.nan
return res
table['SEP_RADEC'] = separation(table, 'RA', 'DEC')
table['SEP_RADEC_FOV'] = separation(table, 'FOV_RADEC_LON', 'FOV_RADEC_LAT')
table['SEP_RADEC_FOV_MINUS_SEP_RADEC'] = table['SEP_RADEC_FOV'] - table['SEP_RADEC']
print('Max separation difference RADEC_FOV to RADEC: {} arcsec'.format(np.nanmax(table['SEP_RADEC_FOV_MINUS_SEP_RADEC'])))
# TODO: this currently gives 14.9 arcsec, i.e. there's an issue!
table['SEP_RADEC_FOV_ASTROPY'] = separation(table, 'FOV_RADEC_LON_ASTROPY', 'FOV_RADEC_LAT_ASTROPY')
table['SEP_RADEC_FOV_ASTROPY_MINUS_SEP_RADEC'] = table['SEP_RADEC_FOV_ASTROPY'] - table['SEP_RADEC']
print('Max separation difference RADEC_FOV_ASTROPY to RADEC: {} arcsec'.format(np.nanmax(table['SEP_RADEC_FOV_ASTROPY_MINUS_SEP_RADEC'])))
# 0.02 arcsec => OK
# Note: for ALTAZ this is not expected to match RADEC, because the earth is rotating between events.
# table['SEP_ALTAZ'] = separation(table, 'AZ', 'ALT')
# table['SEP_RADEC_MINUS_SEP_ALTAZ'] = table['SEP_RADEC'] - table['SEP_ALTAZ']
# print('Max separation difference RADEC to ALTAZ: {}'.format(np.nanmax(table['SEP_RADEC_MINUS_SEP_ALTAZ'])))
# table.info('stats')
# table.write('temp.fits', overwrite=True) | 597 |
def turnAwayFrom(speed = SPEED, rotation = 0):
""" Turn robot away from a hazard """
if rotation > 20: # If hazard is >20° to the right:
turn(speed, -1) # - turn right.
elif rotation < -20: # If hazard is >20° to the left:
turn(speed, 1) # - turn left.
else: # If hazard is in front (ish):
turn(speed) # - turn randomly either left or right. | 598 |
def config(path) -> None:
"""Read the default config"""
logger.debug("Reading config from %s", path)
try:
with open(path, encoding="utf-8") as config_file_object:
# Read into dict
config_json = json.load(config_file_object)
logger.info("Loaded config into dict")
except FileNotFoundError:
logger.critical("Unable to find file %s", path)
stager.utils.dialog.error(
"Config file not found",
f"The config file {path} was not found",
)
return False
except json.JSONDecodeError as exception_:
logger.critical("Unable to parse %s: %s", path, exception_.msg)
stager.utils.dialog.error("Unable to parse config", exception_.msg)
return False
# Set defaults for config if not present
# Validate the config
config_json_keys = config_json.keys()
for req in REQUIRED:
if req not in config_json_keys:
logger.critical("Missing required key %s in config", req)
stager.utils.dialog.error(
"Config invalid", f"Missing required key {req} in config"
)
return False
# Fill in optional fields with a default
for opt in OPTIONAL_DEFAULTS: # TODO move to items
if opt not in config_json_keys:
# Add to the config json
config_json[opt] = OPTIONAL_DEFAULTS[opt]["default_value"]
# Reload prefs namespace
config_ns = convert_to_namespace(json.dumps(config_json))
stager.utils.CONFIG = config_ns
return True | 599 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.