code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def server_info(self):
"""
Query information about the server.
"""
response = self._post(self.apiurl + "/v2/server/info", data={'apikey': self.apikey})
return self._raise_or_extract(response) | Query information about the server. |
def get_by_index(self, index):
"""Returns a Volume or Disk by its index."""
try:
return self[index]
except KeyError:
for v in self.get_volumes():
if v.index == str(index):
return v
raise KeyError(index) | Returns a Volume or Disk by its index. |
def run_subprocess(executable_command,
command_arguments = [],
timeout=None,
print_process_output=True,
stdout_file=None,
stderr_file=None,
poll_seconds=.100,
buffer_size=-1,
daemon=False,
return_std=False):
"""Create and run a subprocess and return the process and
execution time after it has completed. The execution time
does not include the time taken for file i/o when logging
the output if stdout_file and stderr_file arguments are given.
Positional arguments:
executable_command (str) -- executable command to run
command_arguments (list) -- command line arguments
timeout (int/float) -- how many seconds to allow for process completion
print_process_output (bool) -- whether to print the process' live output
stdout_file (str) -- file to log stdout to
stderr_file (str) -- file to log stderr to
poll_seconds(int/float) -- how often in seconds to poll the subprocess
to check for completion
daemon(bool) -- whether the process is a daemon. If True, returns process
immediately after creation along with start time rather than
execution time.
return_std (bool) -- whether to return a reference to the processes' NBSRW stdout and stderr
"""
# validate arguments
# list
assert_variable_type(command_arguments, list)
# strings
assert_variable_type(executable_command, str)
_string_vars = [stdout_file,
stderr_file]
[assert_variable_type(x, [str, NoneType, unicode]) for x in _string_vars + command_arguments]
# bools
assert_variable_type(print_process_output, bool)
assert_variable_type(return_std, bool)
# floats
_float_vars = [timeout,
poll_seconds]
[assert_variable_type(x, [int, float, NoneType]) for x in _float_vars]
global process, _nbsr_stdout, _nbsr_stderr
process = None
_nbsr_stdout = None
_nbsr_stderr = None
def _exec_subprocess():
# create the subprocess to run the external program
global process, _nbsr_stdout, _nbsr_stderr
process = subprocess.Popen([executable_command] + command_arguments, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=buffer_size, preexec_fn=os.setsid)
# wrap p.stdout with a NonBlockingStreamReader object:
_nbsr_stdout = NBSRW(process.stdout, print_process_output, stdout_file)
_nbsr_stderr = NBSRW(process.stderr, print_process_output, stderr_file)
# if the process is a dameon break
# execution time returned is start time
if daemon:
return
# set deadline if timeout was set
_deadline = None
if timeout is not None:
_deadline = timeit.default_timer() + timeout
# poll process while it runs
while process.poll() is None:
# throw TimeoutError if timeout was specified and deadline has passed
if _deadline is not None and timeit.default_timer() > _deadline and process.poll() is None:
os.killpg(process.pid, signal.SIGTERM)
raise TimeoutError("Sub-process did not complete before %.4f seconds elapsed" %(timeout))
# sleep to yield for other processes
time.sleep(poll_seconds)
execution_time = timeit.timeit(_exec_subprocess, number=1)
# return process to allow application to communicate with it
# and extract whatever info like stdout, stderr, returncode
# also return execution_time to allow
if return_std:
return process, execution_time, _nbsr_stdout, _nbsr_stderr
return process, execution_time | Create and run a subprocess and return the process and
execution time after it has completed. The execution time
does not include the time taken for file i/o when logging
the output if stdout_file and stderr_file arguments are given.
Positional arguments:
executable_command (str) -- executable command to run
command_arguments (list) -- command line arguments
timeout (int/float) -- how many seconds to allow for process completion
print_process_output (bool) -- whether to print the process' live output
stdout_file (str) -- file to log stdout to
stderr_file (str) -- file to log stderr to
poll_seconds(int/float) -- how often in seconds to poll the subprocess
to check for completion
daemon(bool) -- whether the process is a daemon. If True, returns process
immediately after creation along with start time rather than
execution time.
return_std (bool) -- whether to return a reference to the processes' NBSRW stdout and stderr |
def main():
""" Main entry point - used for command line call
"""
args = _parse_arg(CountryConverter().valid_class)
coco = CountryConverter(additional_data=args.additional_data)
converted_names = coco.convert(
names=args.names,
src=args.src,
to=args.to,
enforce_list=False,
not_found=args.not_found)
print(args.output_sep.join(
[str(etr) for etr in converted_names] if
isinstance(converted_names, list) else [str(converted_names)])) | Main entry point - used for command line call |
def mkdir(dir_path):
# type: (AnyStr) -> None
"""Make directory if not existed"""
if not os.path.isdir(dir_path) or not os.path.exists(dir_path):
os.makedirs(dir_path) | Make directory if not existed |
async def genSchema(self, name, version, attrNames) -> Schema:
"""
Generates and submits Schema.
:param name: schema name
:param version: schema version
:param attrNames: a list of attributes the schema contains
:return: submitted Schema
"""
schema = Schema(name, version, attrNames, self.issuerId)
return await self.wallet.submitSchema(schema) | Generates and submits Schema.
:param name: schema name
:param version: schema version
:param attrNames: a list of attributes the schema contains
:return: submitted Schema |
def _clean_rule(self, rule):
"""
Cleans a css Rule by removing Selectors without matches on the tree
Returns None if the whole rule do not match
:param rule: CSS Rule to check
:type rule: A tinycss Rule object
:returns: A cleaned tinycss Rule with only Selectors matching the tree or None
:rtype: tinycss Rule or None
"""
# Always match @ rules
if rule.at_keyword is not None:
return rule
# Clean selectors
cleaned_token_list = []
for token_list in split_on_comma(rule.selector):
# If the token list matches the tree
if self._token_list_matches_tree(token_list):
# Add a Comma if multiple token lists matched
if len(cleaned_token_list) > 0:
cleaned_token_list.append(
cssselect.parser.Token('DELIM', ',', len(cleaned_token_list) + 1))
# Append it to the list of cleaned token list
cleaned_token_list += token_list
# Return None if selectors list is empty
if not cleaned_token_list:
return None
# Update rule token list
rule.selector = cleaned_token_list
# Return cleaned rule
return rule | Cleans a css Rule by removing Selectors without matches on the tree
Returns None if the whole rule do not match
:param rule: CSS Rule to check
:type rule: A tinycss Rule object
:returns: A cleaned tinycss Rule with only Selectors matching the tree or None
:rtype: tinycss Rule or None |
def GeneratePassphrase(length=20):
"""Create a 20 char passphrase with easily typeable chars."""
valid_chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
valid_chars += "0123456789 ,-_&$#"
return "".join(random.choice(valid_chars) for i in range(length)) | Create a 20 char passphrase with easily typeable chars. |
def element_to_objects(payload: Dict) -> List:
"""
Transform an Element to a list of entities recursively.
"""
entities = []
cls = MAPPINGS.get(payload.get('type'))
if not cls:
return []
transformed = transform_attributes(payload, cls)
entity = cls(**transformed)
if hasattr(entity, "post_receive"):
entity.post_receive()
entities.append(entity)
return entities | Transform an Element to a list of entities recursively. |
def last_modified(self):
""" Gets the most recent modification time for all entries in the view """
if self.entries:
latest = max(self.entries, key=lambda x: x.last_modified)
return arrow.get(latest.last_modified)
return arrow.get() | Gets the most recent modification time for all entries in the view |
def natsorted(seq, key=None, reverse=False, alg=ns.DEFAULT):
"""
Sorts an iterable naturally.
Parameters
----------
seq : iterable
The input to sort.
key : callable, optional
A key used to determine how to sort each element of the iterable.
It is **not** applied recursively.
It should accept a single argument and return a single value.
reverse : {{True, False}}, optional
Return the list in reversed sorted order. The default is
`False`.
alg : ns enum, optional
This option is used to control which algorithm `natsort`
uses when sorting. For details into these options, please see
the :class:`ns` class documentation. The default is `ns.INT`.
Returns
-------
out: list
The sorted input.
See Also
--------
natsort_keygen : Generates the key that makes natural sorting possible.
realsorted : A wrapper for ``natsorted(seq, alg=ns.REAL)``.
humansorted : A wrapper for ``natsorted(seq, alg=ns.LOCALE)``.
index_natsorted : Returns the sorted indexes from `natsorted`.
Examples
--------
Use `natsorted` just like the builtin `sorted`::
>>> a = ['num3', 'num5', 'num2']
>>> natsorted(a)
[{u}'num2', {u}'num3', {u}'num5']
"""
key = natsort_keygen(key, alg)
return sorted(seq, reverse=reverse, key=key) | Sorts an iterable naturally.
Parameters
----------
seq : iterable
The input to sort.
key : callable, optional
A key used to determine how to sort each element of the iterable.
It is **not** applied recursively.
It should accept a single argument and return a single value.
reverse : {{True, False}}, optional
Return the list in reversed sorted order. The default is
`False`.
alg : ns enum, optional
This option is used to control which algorithm `natsort`
uses when sorting. For details into these options, please see
the :class:`ns` class documentation. The default is `ns.INT`.
Returns
-------
out: list
The sorted input.
See Also
--------
natsort_keygen : Generates the key that makes natural sorting possible.
realsorted : A wrapper for ``natsorted(seq, alg=ns.REAL)``.
humansorted : A wrapper for ``natsorted(seq, alg=ns.LOCALE)``.
index_natsorted : Returns the sorted indexes from `natsorted`.
Examples
--------
Use `natsorted` just like the builtin `sorted`::
>>> a = ['num3', 'num5', 'num2']
>>> natsorted(a)
[{u}'num2', {u}'num3', {u}'num5'] |
def triggerid_get(hostid=None, trigger_desc=None, priority=4, **kwargs):
'''
.. versionadded:: Fluorine
Retrieve trigger ID and description based in host ID and trigger description.
.. note::
https://www.zabbix.com/documentation/3.4/manual/api/reference/trigger/get
:param hostid: ID of the host whose trigger we want to find
:param trigger_desc: Description of trigger (trigger name) whose we want to find
:param priority: Priority of trigger (useful if we have same name for more triggers with different priorities)
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
:return: Trigger ID and description. False if no trigger found or on failure.
CLI Example:
.. code-block:: bash
salt '*' zabbix.triggerid_get 1111 'trigger name to find' 5
'''
conn_args = _login(**kwargs)
ret = {}
try:
if conn_args:
method = 'trigger.get'
if not hostid or not trigger_desc:
return {'result': False, 'comment': 'hostid and trigger_desc params are required'}
params = {'output': ['triggerid', 'description'],
'filter': {'priority': priority}, 'hostids': hostid}
params = _params_extend(params, _ignore_name=True, **kwargs)
ret = _query(method, params, conn_args['url'], conn_args['auth'])
if ret['result']:
for r in ret['result']:
if trigger_desc in r['description']:
ret['result'] = r
return ret
return False
else:
return False
else:
raise KeyError
except KeyError:
return ret | .. versionadded:: Fluorine
Retrieve trigger ID and description based in host ID and trigger description.
.. note::
https://www.zabbix.com/documentation/3.4/manual/api/reference/trigger/get
:param hostid: ID of the host whose trigger we want to find
:param trigger_desc: Description of trigger (trigger name) whose we want to find
:param priority: Priority of trigger (useful if we have same name for more triggers with different priorities)
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
:return: Trigger ID and description. False if no trigger found or on failure.
CLI Example:
.. code-block:: bash
salt '*' zabbix.triggerid_get 1111 'trigger name to find' 5 |
def remove_hyperedge(self, hyperedge_id):
"""Removes a hyperedge and its attributes from the hypergraph.
:param hyperedge_id: ID of the hyperedge to be removed.
:raises: ValueError -- No such hyperedge exists.
Examples:
::
>>> H = DirectedHypergraph()
>>> xyz = hyperedge_list = ((["A"], ["B", "C"]),
(("A", "B"), ("C"), {'weight': 2}),
(set(["B"]), set(["A", "C"])))
>>> H.add_hyperedges(hyperedge_list)
>>> H.remove_hyperedge(xyz[0])
"""
if not self.has_hyperedge_id(hyperedge_id):
raise ValueError("No such hyperedge exists.")
frozen_tail = \
self._hyperedge_attributes[hyperedge_id]["__frozen_tail"]
frozen_head = \
self._hyperedge_attributes[hyperedge_id]["__frozen_head"]
# Remove this hyperedge from the forward-star of every tail node
for node in frozen_tail:
self._forward_star[node].remove(hyperedge_id)
# Remove this hyperedge from the backward-star of every head node
for node in frozen_head:
self._backward_star[node].remove(hyperedge_id)
# Remove frozen_head as a successor of frozen_tail
del self._successors[frozen_tail][frozen_head]
# If that tail is no longer the tail of any hyperedge, remove it
# from the successors dictionary
if self._successors[frozen_tail] == {}:
del self._successors[frozen_tail]
# Remove frozen_tail as a predecessor of frozen_head
del self._predecessors[frozen_head][frozen_tail]
# If that head is no longer the head of any hyperedge, remove it
# from the predecessors dictionary
if self._predecessors[frozen_head] == {}:
del self._predecessors[frozen_head]
# Remove hyperedge's attributes dictionary
del self._hyperedge_attributes[hyperedge_id] | Removes a hyperedge and its attributes from the hypergraph.
:param hyperedge_id: ID of the hyperedge to be removed.
:raises: ValueError -- No such hyperedge exists.
Examples:
::
>>> H = DirectedHypergraph()
>>> xyz = hyperedge_list = ((["A"], ["B", "C"]),
(("A", "B"), ("C"), {'weight': 2}),
(set(["B"]), set(["A", "C"])))
>>> H.add_hyperedges(hyperedge_list)
>>> H.remove_hyperedge(xyz[0]) |
def pipeline(self, config, request):
"""
The pipeline() function handles authentication and invocation of
the correct consumer based on the server configuration, that is
provided at initialization time.
When authentication is performed all the authenticators are
executed. If any returns False, authentication fails and a 403
error is raised. If none of them positively succeeds and they all
return None then also authentication fails and a 403 error is
raised. Authentication plugins can add attributes to the request
object for use of authorization or other plugins.
When authorization is performed and positive result will cause the
operation to be accepted and any negative result will cause it to
fail. If no authorization plugin returns a positive result a 403
error is returned.
Once authentication and authorization are successful the pipeline
will parse the path component and find the consumer plugin that
handles the provided path walking up the path component by
component until a consumer is found.
Paths are walked up from the leaf to the root, so if two consumers
hang on the same tree, the one closer to the leaf will be used. If
there is a trailing path when the conumer is selected then it will
be stored in the request dicstionary named 'trail'. The 'trail' is
an ordered list of the path components below the consumer entry
point.
"""
path_chain = request['path_chain']
if not path_chain or path_chain[0] != '':
# no path or not an absolute path
raise HTTPError(400)
# auth framework here
authers = config.get('authenticators')
if authers is None:
raise HTTPError(403)
valid_once = False
for auth in authers:
valid = authers[auth].handle(request)
if valid is False:
raise HTTPError(403)
elif valid is True:
valid_once = True
if valid_once is not True:
self.server.auditlog.svc_access(self.__class__.__name__,
log.AUDIT_SVC_AUTH_FAIL,
request['client_id'], 'No auth')
raise HTTPError(403)
# auhz framework here
authzers = config.get('authorizers')
if authzers is None:
raise HTTPError(403)
authz_ok = None
for authz in authzers:
valid = authzers[authz].handle(request)
if valid is True:
authz_ok = True
elif valid is False:
authz_ok = False
break
if authz_ok is not True:
self.server.auditlog.svc_access(self.__class__.__name__,
log.AUDIT_SVC_AUTHZ_FAIL,
request['client_id'],
path_chain)
raise HTTPError(403)
# Select consumer
trail = []
while path_chain:
if path_chain in config['consumers']:
con = config['consumers'][path_chain]
if len(trail) != 0:
request['trail'] = trail
return con.handle(request)
trail.insert(0, path_chain[-1])
path_chain = path_chain[:-1]
raise HTTPError(404) | The pipeline() function handles authentication and invocation of
the correct consumer based on the server configuration, that is
provided at initialization time.
When authentication is performed all the authenticators are
executed. If any returns False, authentication fails and a 403
error is raised. If none of them positively succeeds and they all
return None then also authentication fails and a 403 error is
raised. Authentication plugins can add attributes to the request
object for use of authorization or other plugins.
When authorization is performed and positive result will cause the
operation to be accepted and any negative result will cause it to
fail. If no authorization plugin returns a positive result a 403
error is returned.
Once authentication and authorization are successful the pipeline
will parse the path component and find the consumer plugin that
handles the provided path walking up the path component by
component until a consumer is found.
Paths are walked up from the leaf to the root, so if two consumers
hang on the same tree, the one closer to the leaf will be used. If
there is a trailing path when the conumer is selected then it will
be stored in the request dicstionary named 'trail'. The 'trail' is
an ordered list of the path components below the consumer entry
point. |
def execute(self, context):
"""
Publish the message to SQS queue
:param context: the context object
:type context: dict
:return: dict with information about the message sent
For details of the returned dict see :py:meth:`botocore.client.SQS.send_message`
:rtype: dict
"""
hook = SQSHook(aws_conn_id=self.aws_conn_id)
result = hook.send_message(queue_url=self.sqs_queue,
message_body=self.message_content,
delay_seconds=self.delay_seconds,
message_attributes=self.message_attributes)
self.log.info('result is send_message is %s', result)
return result | Publish the message to SQS queue
:param context: the context object
:type context: dict
:return: dict with information about the message sent
For details of the returned dict see :py:meth:`botocore.client.SQS.send_message`
:rtype: dict |
def is_me(self): # pragma: no cover, seems not to be used anywhere
"""Check if parameter name if same than name of this object
TODO: is it useful?
:return: true if parameter name if same than this name
:rtype: bool
"""
logger.info("And arbiter is launched with the hostname:%s "
"from an arbiter point of view of addr:%s", self.host_name, socket.getfqdn())
return self.host_name == socket.getfqdn() or self.host_name == socket.gethostname() | Check if parameter name if same than name of this object
TODO: is it useful?
:return: true if parameter name if same than this name
:rtype: bool |
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['next_m'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['next_v'] = torch.zeros_like(p.data)
next_m, next_v = state['next_m'], state['next_v']
beta1, beta2 = group['b1'], group['b2']
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
next_m.mul_(beta1).add_(1 - beta1, grad)
next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad)
update = next_m / (next_v.sqrt() + group['e'])
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
lr_scheduled = group['lr']
lr_scheduled *= group['schedule'].get_lr(state['step'])
update_with_lr = lr_scheduled * update
p.data.add_(-update_with_lr)
state['step'] += 1
# step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1
# No bias correction
# bias_correction1 = 1 - beta1 ** state['step']
# bias_correction2 = 1 - beta2 ** state['step']
return loss | Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss. |
def write_hdf5_segmentlist(seglist, output, path=None, **kwargs):
"""Write a `SegmentList` to an HDF5 file/group
Parameters
----------
seglist : :class:`~ligo.segments.segmentlist`
data to write
output : `str`, `h5py.File`, `h5py.Group`
filename or HDF5 object to write to
path : `str`
path to which to write inside the HDF5 file, relative to ``output``
**kwargs
other keyword arguments are passed to
:meth:`~astropy.table.Table.write`
"""
if path is None:
raise ValueError("Please specify the HDF5 path via the "
"``path=`` keyword argument")
# convert segmentlist to Table
data = numpy.zeros((len(seglist), 4), dtype=int)
for i, seg in enumerate(seglist):
start, end = map(LIGOTimeGPS, seg)
data[i, :] = (start.gpsSeconds, start.gpsNanoSeconds,
end.gpsSeconds, end.gpsNanoSeconds)
segtable = Table(data, names=['start_time', 'start_time_ns',
'end_time', 'end_time_ns'])
# write table to HDF5
return segtable.write(output, path=path, format='hdf5', **kwargs) | Write a `SegmentList` to an HDF5 file/group
Parameters
----------
seglist : :class:`~ligo.segments.segmentlist`
data to write
output : `str`, `h5py.File`, `h5py.Group`
filename or HDF5 object to write to
path : `str`
path to which to write inside the HDF5 file, relative to ``output``
**kwargs
other keyword arguments are passed to
:meth:`~astropy.table.Table.write` |
def unary_from_softmax(sm, scale=None, clip=1e-5):
"""Converts softmax class-probabilities to unary potentials (NLL per node).
Parameters
----------
sm: numpy.array
Output of a softmax where the first dimension is the classes,
all others will be flattend. This means `sm.shape[0] == n_classes`.
scale: float
The certainty of the softmax output (default is None).
If not None, the softmax outputs are scaled to range from uniform
probability for 0 outputs to `scale` probability for 1 outputs.
clip: float
Minimum value to which probability should be clipped.
This is because the unary is the negative log of the probability, and
log(0) = inf, so we need to clip 0 probabilities to a positive value.
"""
num_cls = sm.shape[0]
if scale is not None:
assert 0 < scale <= 1, "`scale` needs to be in (0,1]"
uniform = np.ones(sm.shape) / num_cls
sm = scale * sm + (1 - scale) * uniform
if clip is not None:
sm = np.clip(sm, clip, 1.0)
return -np.log(sm).reshape([num_cls, -1]).astype(np.float32) | Converts softmax class-probabilities to unary potentials (NLL per node).
Parameters
----------
sm: numpy.array
Output of a softmax where the first dimension is the classes,
all others will be flattend. This means `sm.shape[0] == n_classes`.
scale: float
The certainty of the softmax output (default is None).
If not None, the softmax outputs are scaled to range from uniform
probability for 0 outputs to `scale` probability for 1 outputs.
clip: float
Minimum value to which probability should be clipped.
This is because the unary is the negative log of the probability, and
log(0) = inf, so we need to clip 0 probabilities to a positive value. |
def init_app(self, app):
"""Flask application initialization.
Initialize the REST endpoints. Connect all signals if
`DEPOSIT_REGISTER_SIGNALS` is True.
:param app: An instance of :class:`flask.Flask`.
"""
self.init_config(app)
blueprint = rest.create_blueprint(
app.config['DEPOSIT_REST_ENDPOINTS']
)
# FIXME: This is a temporary fix. This means that
# invenio-records-rest's endpoint_prefixes cannot be used before
# the first request or in other processes, ex: Celery tasks.
@app.before_first_request
def extend_default_endpoint_prefixes():
"""Extend redirects between PID types."""
endpoint_prefixes = utils.build_default_endpoint_prefixes(
dict(app.config['DEPOSIT_REST_ENDPOINTS'])
)
current_records_rest = app.extensions['invenio-records-rest']
overlap = set(endpoint_prefixes.keys()) & set(
current_records_rest.default_endpoint_prefixes
)
if overlap:
raise RuntimeError(
'Deposit wants to override endpoint prefixes {0}.'.format(
', '.join(overlap)
)
)
current_records_rest.default_endpoint_prefixes.update(
endpoint_prefixes
)
app.register_blueprint(blueprint)
app.extensions['invenio-deposit-rest'] = _DepositState(app)
if app.config['DEPOSIT_REGISTER_SIGNALS']:
post_action.connect(index_deposit_after_publish, sender=app,
weak=False) | Flask application initialization.
Initialize the REST endpoints. Connect all signals if
`DEPOSIT_REGISTER_SIGNALS` is True.
:param app: An instance of :class:`flask.Flask`. |
def range(self, channels=None):
"""
Get the range of the specified channel(s).
The range is a two-element list specifying the smallest and largest
values that an event in a channel should have. Note that with
floating point data, some events could have values outside the
range in either direction due to instrument compensation.
The range should be transformed along with the data when passed
through a transformation function.
The range of channel "n" is extracted from the $PnR parameter as
``[0, $PnR - 1]``.
Parameters
----------
channels : int, str, list of int, list of str
Channel(s) for which to get the range. If None, return a list
with the range of all channels, in the order of
``FCSData.channels``.
Return
------
array or list of arrays
The range of the specified channel(s).
"""
# Check default
if channels is None:
channels = self._channels
# Get numerical indices of channels
channels = self._name_to_index(channels)
# Get the range of the specified channels
if hasattr(channels, '__iter__') \
and not isinstance(channels, six.string_types):
return [self._range[ch] for ch in channels]
else:
return self._range[channels] | Get the range of the specified channel(s).
The range is a two-element list specifying the smallest and largest
values that an event in a channel should have. Note that with
floating point data, some events could have values outside the
range in either direction due to instrument compensation.
The range should be transformed along with the data when passed
through a transformation function.
The range of channel "n" is extracted from the $PnR parameter as
``[0, $PnR - 1]``.
Parameters
----------
channels : int, str, list of int, list of str
Channel(s) for which to get the range. If None, return a list
with the range of all channels, in the order of
``FCSData.channels``.
Return
------
array or list of arrays
The range of the specified channel(s). |
def feature_match(template, image, options=None):
"""
Match template and image by extracting specified feature
:param template: Template image
:param image: Search image
:param options: Options include
- feature: Feature extractor to use. Default is 'rgb'. Available options are:
'hog', 'lab', 'rgb', 'gray'
:return: Heatmap
"""
op = _DEF_TM_OPT.copy()
if options is not None:
op.update(options)
feat = fe.factory(op['feature'])
tmpl_f = feat(template, op)
img_f = feat(image, op)
scale = image.shape[0] / img_f.shape[0]
heatmap = match_template(tmpl_f, img_f, op)
return heatmap, scale | Match template and image by extracting specified feature
:param template: Template image
:param image: Search image
:param options: Options include
- feature: Feature extractor to use. Default is 'rgb'. Available options are:
'hog', 'lab', 'rgb', 'gray'
:return: Heatmap |
def redirects(self):
""" list: List of all redirects to this page; **i.e.,** the titles \
listed here will redirect to this page title
Note:
Not settable """
if self._redirects is None:
self._redirects = list()
self.__pull_combined_properties()
return self._redirects | list: List of all redirects to this page; **i.e.,** the titles \
listed here will redirect to this page title
Note:
Not settable |
def _configure_device(commands, **kwargs):
'''
Helper function to send configuration commands to the device over a
proxy minion or native minion using NX-API or SSH.
'''
if salt.utils.platform.is_proxy():
return __proxy__['nxos.proxy_config'](commands, **kwargs)
else:
return _nxapi_config(commands, **kwargs) | Helper function to send configuration commands to the device over a
proxy minion or native minion using NX-API or SSH. |
def fanout(self, hosts=None, timeout=None, max_concurrency=64,
auto_batch=None):
"""Returns a context manager for a map operation that fans out to
manually specified hosts instead of using the routing system. This
can for instance be used to empty the database on all hosts. The
context manager returns a :class:`FanoutClient`. Example usage::
with cluster.fanout(hosts=[0, 1, 2, 3]) as client:
results = client.info()
for host_id, info in results.value.iteritems():
print '%s -> %s' % (host_id, info['is'])
The promise returned accumulates all results in a dictionary keyed
by the `host_id`.
The `hosts` parameter is a list of `host_id`\s or alternatively the
string ``'all'`` to send the commands to all hosts.
The fanout APi needs to be used with a lot of care as it can cause
a lot of damage when keys are written to hosts that do not expect
them.
"""
return MapManager(self.get_fanout_client(hosts, max_concurrency,
auto_batch),
timeout=timeout) | Returns a context manager for a map operation that fans out to
manually specified hosts instead of using the routing system. This
can for instance be used to empty the database on all hosts. The
context manager returns a :class:`FanoutClient`. Example usage::
with cluster.fanout(hosts=[0, 1, 2, 3]) as client:
results = client.info()
for host_id, info in results.value.iteritems():
print '%s -> %s' % (host_id, info['is'])
The promise returned accumulates all results in a dictionary keyed
by the `host_id`.
The `hosts` parameter is a list of `host_id`\s or alternatively the
string ``'all'`` to send the commands to all hosts.
The fanout APi needs to be used with a lot of care as it can cause
a lot of damage when keys are written to hosts that do not expect
them. |
def meta_changed_notify_after(self, state_machine_m, _, info):
"""Handle notification about the change of a state's meta data
The meta data of the affected state(s) are read and the view updated accordingly.
:param StateMachineModel state_machine_m: Always the state machine model belonging to this editor
:param str _: Always "state_meta_signal"
:param dict info: Information about the change, contains the MetaSignalMessage in the 'arg' key value
"""
meta_signal_message = info['arg']
if meta_signal_message.origin == "graphical_editor_gaphas": # Ignore changes caused by ourself
return
if meta_signal_message.origin == "load_meta_data": # Meta data can't be applied, as the view has not yet
return # been created
notification = meta_signal_message.notification
if not notification: # For changes applied to the root state, there are always two notifications
return # Ignore the one with less information
if self.model.ongoing_complex_actions:
return
model = notification.model
view = self.canvas.get_view_for_model(model)
if meta_signal_message.change == 'show_content':
library_state_m = model
library_state_v = view
if library_state_m.meta['gui']['show_content'] is not library_state_m.show_content():
logger.warning("The content of the LibraryState won't be shown, because "
"MAX_VISIBLE_LIBRARY_HIERARCHY is 1.")
if library_state_m.show_content():
if not library_state_m.state_copy_initialized:
logger.warning("Show library content without initialized state copy does not work {0}"
"".format(library_state_m))
logger.debug("Show content of {}".format(library_state_m.state))
gui_helper_meta_data.scale_library_content(library_state_m)
self.add_state_view_for_model(library_state_m.state_copy, view,
hierarchy_level=library_state_v.hierarchy_level + 1)
else:
logger.debug("Hide content of {}".format(library_state_m.state))
state_copy_v = self.canvas.get_view_for_model(library_state_m.state_copy)
if state_copy_v:
state_copy_v.remove()
else:
if isinstance(view, StateView):
view.apply_meta_data(recursive=meta_signal_message.affects_children)
else:
view.apply_meta_data()
self.canvas.request_update(view, matrix=True)
self.canvas.wait_for_update() | Handle notification about the change of a state's meta data
The meta data of the affected state(s) are read and the view updated accordingly.
:param StateMachineModel state_machine_m: Always the state machine model belonging to this editor
:param str _: Always "state_meta_signal"
:param dict info: Information about the change, contains the MetaSignalMessage in the 'arg' key value |
def _handle_auth(self, dtype, data, ts):
"""Handles authentication responses.
:param dtype:
:param data:
:param ts:
:return:
"""
# Contains keys status, chanId, userId, caps
if dtype == 'unauth':
raise NotImplementedError
channel_id = data.pop('chanId')
user_id = data.pop('userId')
identifier = ('auth', user_id)
self.channel_handlers[identifier] = channel_id
self.channel_directory[identifier] = channel_id
self.channel_directory[channel_id] = identifier | Handles authentication responses.
:param dtype:
:param data:
:param ts:
:return: |
def addItem(self, item):
"""Adds an item if the tree is mutable"""
try:
self.tree.addItem(item)
except AttributeError, e:
raise VersionError('Saved versions are immutable') | Adds an item if the tree is mutable |
def _iter_rawterms(cls, tree):
"""Iterate through the raw terms (Classes) in the ontology.
"""
for elem in tree.iterfind(OWL_CLASS):
if RDF_ABOUT not in elem.keys(): # This avoids parsing a class
continue # created by restriction
rawterm = cls._extract_resources(elem)
rawterm['id'] = cls._get_id_from_url(elem.get(RDF_ABOUT))
yield rawterm | Iterate through the raw terms (Classes) in the ontology. |
def _split_line_with_offsets(line):
"""Split a line by delimiter, but yield tuples of word and offset.
This function works by dropping all the english-like punctuation from
a line (so parenthesis preceded or succeeded by spaces, periods, etc)
and then splitting on spaces.
"""
for delimiter in re.finditer(r"[\.,:\;](?![^\s])", line):
span = delimiter.span()
line = line[:span[0]] + " " + line[span[1]:]
for delimiter in re.finditer(r"[\"'\)\]\}>](?![^\.,\;:\"'\)\]\}>\s])",
line):
span = delimiter.span()
line = line[:span[0]] + " " + line[span[1]:]
for delimiter in re.finditer(r"(?<![^\.,\;:\"'\(\[\{<\s])[\"'\(\[\{<]",
line):
span = delimiter.span()
line = line[:span[0]] + " " + line[span[1]:]
# Treat hyphen separated words as separate words
line = line.replace("-", " ")
# Remove backticks
line = line.replace("`", " ")
for match in re.finditer(r"[^\s]+", line):
content = match.group(0)
if content.strip() != "":
yield (match.span()[0], content) | Split a line by delimiter, but yield tuples of word and offset.
This function works by dropping all the english-like punctuation from
a line (so parenthesis preceded or succeeded by spaces, periods, etc)
and then splitting on spaces. |
def find_id_in_folder(self, name, parent_folder_id=0):
"""Find a folder or a file ID from its name, inside a given folder.
Args:
name (str): Name of the folder or the file to find.
parent_folder_id (int): ID of the folder where to search.
Returns:
int. ID of the file or folder found. None if not found.
Raises:
BoxError: An error response is returned from Box (status_code >= 400).
BoxHttpResponseError: Response from Box is malformed.
requests.exceptions.*: Any connection related problem.
"""
if name is None or len(name) == 0:
return parent_folder_id
offset = 0
resp = self.get_folder_items(parent_folder_id,
limit=1000, offset=offset,
fields_list=['name'])
total = int(resp['total_count'])
while offset < total:
found = self.__find_name(resp, name)
if found is not None:
return found
offset += int(len(resp['entries']))
resp = self.get_folder_items(parent_folder_id,
limit=1000, offset=offset,
fields_list=['name'])
return None | Find a folder or a file ID from its name, inside a given folder.
Args:
name (str): Name of the folder or the file to find.
parent_folder_id (int): ID of the folder where to search.
Returns:
int. ID of the file or folder found. None if not found.
Raises:
BoxError: An error response is returned from Box (status_code >= 400).
BoxHttpResponseError: Response from Box is malformed.
requests.exceptions.*: Any connection related problem. |
def get_section(self, section_id, params={}):
"""
Return section resource for given canvas section id.
https://canvas.instructure.com/doc/api/sections.html#method.sections.show
"""
url = SECTIONS_API.format(section_id)
return CanvasSection(data=self._get_resource(url, params=params)) | Return section resource for given canvas section id.
https://canvas.instructure.com/doc/api/sections.html#method.sections.show |
def dump(self):
"""Print a formatted summary of the current solve state."""
from rez.utils.formatting import columnise
rows = []
for i, phase in enumerate(self.phase_stack):
rows.append((self._depth_label(i), phase.status, str(phase)))
print "status: %s (%s)" % (self.status.name, self.status.description)
print "initial request: %s" % str(self.request_list)
print
print "solve stack:"
print '\n'.join(columnise(rows))
if self.failed_phase_list:
rows = []
for i, phase in enumerate(self.failed_phase_list):
rows.append(("#%d" % i, phase.status, str(phase)))
print
print "previous failures:"
print '\n'.join(columnise(rows)) | Print a formatted summary of the current solve state. |
def load(self, config):
"""Load the web list from the configuration file."""
web_list = []
if config is None:
logger.debug("No configuration file available. Cannot load ports list.")
elif not config.has_section(self._section):
logger.debug("No [%s] section in the configuration file. Cannot load ports list." % self._section)
else:
logger.debug("Start reading the [%s] section in the configuration file" % self._section)
refresh = int(config.get_value(self._section, 'refresh', default=self._default_refresh))
timeout = int(config.get_value(self._section, 'timeout', default=self._default_timeout))
# Read the web/url list
for i in range(1, 256):
new_web = {}
postfix = 'web_%s_' % str(i)
# Read mandatories configuration key: host
new_web['url'] = config.get_value(self._section, '%s%s' % (postfix, 'url'))
if new_web['url'] is None:
continue
url_parse = urlparse(new_web['url'])
if not bool(url_parse.scheme) or not bool(url_parse.netloc):
logger.error('Bad URL (%s) in the [%s] section of configuration file.' % (new_web['url'],
self._section))
continue
# Read optionals configuration keys
# Default description is the URL without the http://
new_web['description'] = config.get_value(self._section,
'%sdescription' % postfix,
default="%s" % url_parse.netloc)
# Default status
new_web['status'] = None
new_web['elapsed'] = 0
# Refresh rate in second
new_web['refresh'] = refresh
# Timeout in second
new_web['timeout'] = int(config.get_value(self._section,
'%stimeout' % postfix,
default=timeout))
# RTT warning
new_web['rtt_warning'] = config.get_value(self._section,
'%srtt_warning' % postfix,
default=None)
if new_web['rtt_warning'] is not None:
# Convert to second
new_web['rtt_warning'] = int(new_web['rtt_warning']) / 1000.0
# Indice
new_web['indice'] = 'web_' + str(i)
# ssl_verify
new_web['ssl_verify'] = config.get_value(self._section,
'%sssl_verify' % postfix,
default=True)
# Proxy
http_proxy = config.get_value(self._section,
'%shttp_proxy' % postfix,
default=None)
https_proxy = config.get_value(self._section,
'%shttps_proxy' % postfix,
default=None)
if https_proxy is None and http_proxy is None:
new_web['proxies'] = None
else:
new_web['proxies'] = {'http' : http_proxy,
'https' : https_proxy }
# Add the server to the list
logger.debug("Add Web URL %s to the static list" % new_web['url'])
web_list.append(new_web)
# Ports list loaded
logger.debug("Web list loaded: %s" % web_list)
return web_list | Load the web list from the configuration file. |
def absent(name, **connection_args):
'''
Ensure that the named database is absent
name
The name of the database to remove
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
#check if db exists and remove it
if __salt__['mysql.db_exists'](name, **connection_args):
if __opts__['test']:
ret['result'] = None
ret['comment'] = \
'Database {0} is present and needs to be removed'.format(name)
return ret
if __salt__['mysql.db_remove'](name, **connection_args):
ret['comment'] = 'Database {0} has been removed'.format(name)
ret['changes'][name] = 'Absent'
return ret
else:
err = _get_mysql_error()
if err is not None:
ret['comment'] = 'Unable to remove database {0} ' \
'({1})'.format(name, err)
ret['result'] = False
return ret
else:
err = _get_mysql_error()
if err is not None:
ret['comment'] = err
ret['result'] = False
return ret
# fallback
ret['comment'] = ('Database {0} is not present, so it cannot be removed'
).format(name)
return ret | Ensure that the named database is absent
name
The name of the database to remove |
def main():
# type: () -> typing.Any
"""Parse the command line options and launch the requested command.
If the command is 'help' then print the help message for the subcommand; if
no subcommand is given, print the standard help message.
"""
colorama.init(wrap=six.PY3)
doc = usage.get_primary_command_usage()
allow_subcommands = '<command>' in doc
args = docopt(doc, version=settings.version,
options_first=allow_subcommands)
if sys.excepthook is sys.__excepthook__:
sys.excepthook = log.excepthook
try:
log.enable_logging(log.get_log_level(args))
default_args = sys.argv[2 if args.get('<command>') else 1:]
if (args.get('<command>') == 'help' and
None not in settings.subcommands):
subcommand = next(iter(args.get('<args>', default_args)), None)
return usage.get_help_usage(subcommand)
argv = [args.get('<command>')] + args.get('<args>', default_args)
return _run_command(argv)
except exc.InvalidCliValueError as e:
return str(e) | Parse the command line options and launch the requested command.
If the command is 'help' then print the help message for the subcommand; if
no subcommand is given, print the standard help message. |
def handleMatch(self, m):
"""
Handles user input into [magic] tag, processes it,
and inserts the returned URL into an <img> tag
through a Python ElementTree <img> Element.
"""
userStr = m.group(3)
# print(userStr)
imgURL = processString(userStr)
# print(imgURL)
el = etree.Element('img')
# Sets imgURL to 'src' attribute of <img> tag element
el.set('src', imgURL)
el.set('alt', userStr)
el.set('title', userStr)
return el | Handles user input into [magic] tag, processes it,
and inserts the returned URL into an <img> tag
through a Python ElementTree <img> Element. |
def run(data):
"""Quantitaive isoforms expression by eXpress"""
name = dd.get_sample_name(data)
in_bam = dd.get_transcriptome_bam(data)
config = data['config']
if not in_bam:
logger.info("Transcriptome-mapped BAM file not found, skipping eXpress.")
return data
out_dir = os.path.join(dd.get_work_dir(data), "express", name)
out_file = os.path.join(out_dir, name + ".xprs")
express = config_utils.get_program("express", data['config'])
strand = _set_stranded_flag(in_bam, data)
if not file_exists(out_file):
gtf_fasta = gtf.gtf_to_fasta(dd.get_gtf_file(data), dd.get_ref_file(data))
with tx_tmpdir(data) as tmp_dir:
with file_transaction(data, out_dir) as tx_out_dir:
bam_file = _prepare_bam_file(in_bam, tmp_dir, config)
cmd = ("{express} --no-update-check -o {tx_out_dir} {strand} {gtf_fasta} {bam_file}")
do.run(cmd.format(**locals()), "Run express on %s." % in_bam, {})
shutil.move(os.path.join(out_dir, "results.xprs"), out_file)
eff_count_file = _get_column(out_file, out_file.replace(".xprs", "_eff.counts"), 7, data=data)
tpm_file = _get_column(out_file, out_file.replace("xprs", "tpm"), 14, data=data)
fpkm_file = _get_column(out_file, out_file.replace("xprs", "fpkm"), 10, data=data)
data = dd.set_express_counts(data, eff_count_file)
data = dd.set_express_tpm(data, tpm_file)
data = dd.set_express_fpkm(data, fpkm_file)
return data | Quantitaive isoforms expression by eXpress |
def idle_task(self):
'''called on idle'''
for r in self.repeats:
if r.event.trigger():
self.mpstate.functions.process_stdin(r.cmd, immediate=True) | called on idle |
def task(name, deps = None, fn = None):
"""Define a new task."""
if callable(deps):
fn = deps
deps = None
if not deps and not fn:
logger.log(logger.red("The task '%s' is empty" % name))
else:
tasks[name] = [fn, deps] | Define a new task. |
def paginate(self):
"""Make folders where we would like to put results etc."""
project_dir = self.project_dir
raw_dir = self.raw_dir
batch_dir = self.batch_dir
if project_dir is None:
raise UnderDefined("no project directory defined")
if raw_dir is None:
raise UnderDefined("no raw directory defined")
if batch_dir is None:
raise UnderDefined("no batcb directory defined")
# create the folders
if not os.path.isdir(project_dir):
os.mkdir(project_dir)
logging.info(f"created folder {project_dir}")
if not os.path.isdir(batch_dir):
os.mkdir(batch_dir)
logging.info(f"created folder {batch_dir}")
if not os.path.isdir(raw_dir):
os.mkdir(raw_dir)
logging.info(f"created folder {raw_dir}")
return project_dir, batch_dir, raw_dir | Make folders where we would like to put results etc. |
def detect_direct_function_shadowing(contract):
"""
Detects and obtains functions which are shadowed immediately by the provided ancestor contract.
:param contract: The ancestor contract which we check for function shadowing within.
:return: A list of tuples (overshadowing_function, overshadowed_immediate_base_contract, overshadowed_function)
-overshadowing_function is the function defined within the provided contract that overshadows another
definition.
-overshadowed_immediate_base_contract is the immediate inherited-from contract that provided the shadowed
function (could have provided it through inheritance, does not need to directly define it).
-overshadowed_function is the function definition which is overshadowed by the provided contract's definition.
"""
functions_declared = {function.full_name: function for function in contract.functions_and_modifiers_not_inherited}
results = {}
for base_contract in reversed(contract.immediate_inheritance):
for base_function in base_contract.functions_and_modifiers:
# We already found the most immediate shadowed definition for this function, skip to the next.
if base_function.full_name in results:
continue
# If this function is implemented and it collides with a definition in our immediate contract, we add
# it to our results.
if base_function.is_implemented and base_function.full_name in functions_declared:
results[base_function.full_name] = (functions_declared[base_function.full_name], base_contract, base_function)
return list(results.values()) | Detects and obtains functions which are shadowed immediately by the provided ancestor contract.
:param contract: The ancestor contract which we check for function shadowing within.
:return: A list of tuples (overshadowing_function, overshadowed_immediate_base_contract, overshadowed_function)
-overshadowing_function is the function defined within the provided contract that overshadows another
definition.
-overshadowed_immediate_base_contract is the immediate inherited-from contract that provided the shadowed
function (could have provided it through inheritance, does not need to directly define it).
-overshadowed_function is the function definition which is overshadowed by the provided contract's definition. |
def _factory(cls, constraints, op):
""" Factory for joining constraints with a single conjunction """
pieces = []
for i, constraint in enumerate(constraints):
pieces.append(constraint)
if i != len(constraints) - 1:
pieces.append(op)
return cls(pieces) | Factory for joining constraints with a single conjunction |
def _handle_chat(self, data):
"""Handle chat messages"""
self.conn.enqueue_data(
"chat", ChatMessage.from_data(self.room, self.conn, data)
) | Handle chat messages |
def search_upwards(self, fpath=None, repodirname='.svn', upwards={}):
"""
Traverse filesystem upwards, searching for .svn directories
with matching UUIDs (Recursive)
Args:
fpath (str): file path to search upwards from
repodirname (str): directory name to search for (``.svn``)
upwards (dict): dict of already-searched directories
example::
repo/.svn
repo/dir1/.svn
repo/dir1/dir2/.svn
>> search_upwards('repo/')
<< 'repo/'
>> search_upwards('repo/dir1')
<< 'repo/'
>> search_upwards('repo/dir1/dir2')
<< 'repo/'
repo/.svn
repo/dirA/
repo/dirA/dirB/.svn
>> search_upwards('repo/dirA')
<< 'repo/'
>> search_upwards('repo/dirA/dirB')
>> 'repo/dirB')
"""
fpath = fpath or self.fpath
uuid = self.unique_id
last_path = self
path_comp = fpath.split(os.path.sep)
# [0:-1], [0:-2], [0:-1*len(path_comp)]
for n in xrange(1, len(path_comp)-1):
checkpath = os.path.join(*path_comp[0:-1 * n])
repodir = os.path.join(checkpath, repodirname)
upw_uuid = upwards.get(repodir)
if upw_uuid:
if upw_uuid == uuid:
last_path = SvnRepository(checkpath)
continue
else:
break
elif os.path.exists(repodir):
repo = SvnRepository(checkpath)
upw_uuid = repo.unique_id
upwards[repodir] = upw_uuid
# TODO: match on REVISION too
if upw_uuid == uuid:
last_path = repo
continue
else:
break
return last_path | Traverse filesystem upwards, searching for .svn directories
with matching UUIDs (Recursive)
Args:
fpath (str): file path to search upwards from
repodirname (str): directory name to search for (``.svn``)
upwards (dict): dict of already-searched directories
example::
repo/.svn
repo/dir1/.svn
repo/dir1/dir2/.svn
>> search_upwards('repo/')
<< 'repo/'
>> search_upwards('repo/dir1')
<< 'repo/'
>> search_upwards('repo/dir1/dir2')
<< 'repo/'
repo/.svn
repo/dirA/
repo/dirA/dirB/.svn
>> search_upwards('repo/dirA')
<< 'repo/'
>> search_upwards('repo/dirA/dirB')
>> 'repo/dirB') |
def pretty_time(timestamp: str):
"""Format timestamp for human consumption."""
try:
parsed = iso_8601.parse_datetime(timestamp)
except ValueError:
now = datetime.utcnow().replace(tzinfo=timezone.utc)
try:
delta = iso_8601.parse_delta(timestamp)
except ValueError:
delta = human_time.parse_timedelta(timestamp)
parsed = now - delta
echo(human_time.human_timestamp(parsed)) | Format timestamp for human consumption. |
def _sync_io(self):
"""Update the stream with changes to the file object contents."""
if self._file_epoch == self.file_object.epoch:
return
if self._io.binary:
contents = self.file_object.byte_contents
else:
contents = self.file_object.contents
self._set_stream_contents(contents)
self._file_epoch = self.file_object.epoch | Update the stream with changes to the file object contents. |
def find_bidi(self, el):
"""Get directionality from element text."""
for node in self.get_children(el, tags=False):
# Analyze child text nodes
if self.is_tag(node):
# Avoid analyzing certain elements specified in the specification.
direction = DIR_MAP.get(util.lower(self.get_attribute_by_name(node, 'dir', '')), None)
if (
self.get_tag(node) in ('bdi', 'script', 'style', 'textarea', 'iframe') or
not self.is_html_tag(node) or
direction is not None
):
continue # pragma: no cover
# Check directionality of this node's text
value = self.find_bidi(node)
if value is not None:
return value
# Direction could not be determined
continue # pragma: no cover
# Skip `doctype` comments, etc.
if self.is_special_string(node):
continue
# Analyze text nodes for directionality.
for c in node:
bidi = unicodedata.bidirectional(c)
if bidi in ('AL', 'R', 'L'):
return ct.SEL_DIR_LTR if bidi == 'L' else ct.SEL_DIR_RTL
return None | Get directionality from element text. |
def filter_missing(self):
"""Filter out individuals and SNPs that have too many missing to be considered"""
missing = None
locus_count = 0
# Filter out individuals according to missingness
self.genotype_file.seek(0)
for genotypes in self.genotype_file:
genotypes = genotypes.split()
chr, rsid, junk, pos = genotypes[0:4]
if DataParser.boundary.TestBoundary(chr, pos, rsid):
locus_count += 1
allelic_data = numpy.array(genotypes[4:], dtype="S2").reshape(-1, 2)
if missing is None:
missing = numpy.zeros(allelic_data.shape[0], dtype='int8')
missing += (numpy.sum(0+(allelic_data==DataParser.missing_representation), axis=1)/2)
max_missing = DataParser.ind_miss_tol * locus_count
dropped_individuals = 0+(max_missing<missing)
self.ind_mask[:,0] = self.ind_mask[:,0]|dropped_individuals
self.ind_mask[:,1] = self.ind_mask[:,1]|dropped_individuals
valid_individuals = numpy.sum(self.ind_mask==0)
max_missing = DataParser.snp_miss_tol * valid_individuals
self.locus_count = 0
# We can't merge these two iterations since we need to know which individuals
# to consider for filtering on MAF
dropped_snps = []
self.genotype_file.seek(0)
for genotypes in self.genotype_file:
genotypes = genotypes.split()
chr, rsid, junk, pos = genotypes[0:4]
chr = int(chr)
pos = int(pos)
if DataParser.boundary.TestBoundary(chr, pos, rsid):
allelic_data = numpy.ma.MaskedArray(numpy.array(genotypes[4:], dtype="S2").reshape(-1, 2), self.ind_mask).compressed()
missing = numpy.sum(0+(allelic_data==DataParser.missing_representation))
if missing > max_missing:
DataParser.boundary.dropped_snps[int(chr)].add(int(pos))
dropped_snps.append(rsid)
else:
self.locus_count += 1 | Filter out individuals and SNPs that have too many missing to be considered |
def add_cmds_cpdir(cpdir,
cmdpkl,
cpfileglob='checkplot*.pkl*',
require_cmd_magcolor=True,
save_cmd_pngs=False):
'''This adds CMDs for each object in cpdir.
Parameters
----------
cpdir : list of str
This is the directory to search for checkplot pickles.
cmdpkl : str
This is the filename of the CMD pickle created previously.
cpfileglob : str
The UNIX fileglob to use when searching for checkplot pickles to operate
on.
require_cmd_magcolor : bool
If this is True, a CMD plot will not be made if the color and mag keys
required by the CMD are not present or are nan in each checkplot's
objectinfo dict.
save_cmd_pngs : bool
If this is True, then will save the CMD plots that were generated and
added back to the checkplotdict as PNGs to the same directory as
`cpx`.
Returns
-------
Nothing.
'''
cplist = glob.glob(os.path.join(cpdir, cpfileglob))
return add_cmds_cplist(cplist,
cmdpkl,
require_cmd_magcolor=require_cmd_magcolor,
save_cmd_pngs=save_cmd_pngs) | This adds CMDs for each object in cpdir.
Parameters
----------
cpdir : list of str
This is the directory to search for checkplot pickles.
cmdpkl : str
This is the filename of the CMD pickle created previously.
cpfileglob : str
The UNIX fileglob to use when searching for checkplot pickles to operate
on.
require_cmd_magcolor : bool
If this is True, a CMD plot will not be made if the color and mag keys
required by the CMD are not present or are nan in each checkplot's
objectinfo dict.
save_cmd_pngs : bool
If this is True, then will save the CMD plots that were generated and
added back to the checkplotdict as PNGs to the same directory as
`cpx`.
Returns
-------
Nothing. |
def datapoint_indices_for_tensor(self, tensor_index):
""" Returns the indices for all datapoints in the given tensor. """
if tensor_index >= self._num_tensors:
raise ValueError('Tensor index %d is greater than the number of tensors (%d)' %(tensor_index, self._num_tensors))
return self._file_num_to_indices[tensor_index] | Returns the indices for all datapoints in the given tensor. |
def marshal(self, values):
"""
Turn a list of entities into a list of dictionaries.
:param values: The entities to serialize.
:type values: List[stravalib.model.BaseEntity]
:return: List of dictionaries of attributes
:rtype: List[Dict[str, Any]]
"""
if values is not None:
return [super(EntityCollection, self).marshal(v) for v in values] | Turn a list of entities into a list of dictionaries.
:param values: The entities to serialize.
:type values: List[stravalib.model.BaseEntity]
:return: List of dictionaries of attributes
:rtype: List[Dict[str, Any]] |
def _search(self, limit, format):
'''
Returns a list of result objects, with the url for the next page MsCognitive search url.
'''
limit = min(limit, self.MAX_SEARCH_PER_QUERY)
payload = {
'q' : self.query,
'count' : limit, #currently 50 is max per search.
'offset': self.current_offset,
}
payload.update(self.CUSTOM_PARAMS)
headers = { 'Ocp-Apim-Subscription-Key' : self.api_key }
if not self.silent_fail:
QueryChecker.check_web_params(payload, headers)
response = requests.get(self.QUERY_URL, params=payload, headers=headers)
json_results = self.get_json_results(response)
packaged_results = [NewsResult(single_result_json) for single_result_json in json_results["value"]]
self.current_offset += min(50, limit, len(packaged_results))
return packaged_results | Returns a list of result objects, with the url for the next page MsCognitive search url. |
def boggle_hill_climbing(board=None, ntimes=100, verbose=True):
"""Solve inverse Boggle by hill-climbing: find a high-scoring board by
starting with a random one and changing it."""
finder = BoggleFinder()
if board is None:
board = random_boggle()
best = len(finder.set_board(board))
for _ in range(ntimes):
i, oldc = mutate_boggle(board)
new = len(finder.set_board(board))
if new > best:
best = new
if verbose: print best, _, board
else:
board[i] = oldc ## Change back
if verbose:
print_boggle(board)
return board, best | Solve inverse Boggle by hill-climbing: find a high-scoring board by
starting with a random one and changing it. |
def proc_collector(process_map, args, pipeline_string):
"""
Function that collects all processes available and stores a dictionary of
the required arguments of each process class to be passed to
procs_dict_parser
Parameters
----------
process_map: dict
The dictionary with the Processes currently available in flowcraft
and their corresponding classes as values
args: argparse.Namespace
The arguments passed through argparser that will be access to check the
type of list to be printed
pipeline_string: str
the pipeline string
"""
arguments_list = []
# prints a detailed list of the process class arguments
if args.detailed_list:
# list of attributes to be passed to proc_collector
arguments_list += [
"input_type",
"output_type",
"description",
"dependencies",
"conflicts",
"directives"
]
# prints a short list with each process and the corresponding description
if args.short_list:
arguments_list += [
"description"
]
if arguments_list:
# dict to store only the required entries
procs_dict = {}
# loops between all process_map Processes
for name, cls in process_map.items():
# instantiates each Process class
cls_inst = cls(template=name)
# checks if recipe is provided
if pipeline_string:
if name not in pipeline_string:
continue
d = {arg_key: vars(cls_inst)[arg_key] for arg_key in
vars(cls_inst) if arg_key in arguments_list}
procs_dict[name] = d
procs_dict_parser(procs_dict)
sys.exit(0) | Function that collects all processes available and stores a dictionary of
the required arguments of each process class to be passed to
procs_dict_parser
Parameters
----------
process_map: dict
The dictionary with the Processes currently available in flowcraft
and their corresponding classes as values
args: argparse.Namespace
The arguments passed through argparser that will be access to check the
type of list to be printed
pipeline_string: str
the pipeline string |
def _set_interface_brief(self, v, load=False):
"""
Setter method for interface_brief, mapped from YANG variable /isis_state/interface_brief (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_brief is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_brief() directly.
YANG Description: ISIS interface info brief
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=interface_brief.interface_brief, is_container='container', presence=False, yang_name="interface-brief", rest_name="interface-brief", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-port-isis-brief', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface_brief must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=interface_brief.interface_brief, is_container='container', presence=False, yang_name="interface-brief", rest_name="interface-brief", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-port-isis-brief', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)""",
})
self.__interface_brief = t
if hasattr(self, '_set'):
self._set() | Setter method for interface_brief, mapped from YANG variable /isis_state/interface_brief (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_brief is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_brief() directly.
YANG Description: ISIS interface info brief |
def clear_all_events(self):
"""Clear all event queues and their cached events."""
self.lock.acquire()
self.event_dict.clear()
self.lock.release() | Clear all event queues and their cached events. |
def loadPng(varNumVol, tplPngSize, strPathPng):
"""Load PNG files.
Parameters
----------
varNumVol : float
Number of volumes, i.e. number of time points in all runs.
tplPngSize : tuple
Shape of the stimulus image (i.e. png).
strPathPng: str
Path to the folder cointaining the png files.
Returns
-------
aryPngData : 2d numpy array, shape [png_x, png_y, n_vols]
Stack of stimulus data.
"""
print('------Load PNGs')
# Create list of png files to load:
lstPngPaths = [None] * varNumVol
for idx01 in range(0, varNumVol):
lstPngPaths[idx01] = (strPathPng + str(idx01) + '.png')
# Load png files. The png data will be saved in a numpy array of the
# following order: aryPngData[x-pixel, y-pixel, PngNumber]. The
# sp.misc.imread function actually contains three values per pixel (RGB),
# but since the stimuli are black-and-white, any one of these is sufficient
# and we discard the others.
aryPngData = np.zeros((tplPngSize[0],
tplPngSize[1],
varNumVol))
for idx01 in range(0, varNumVol):
aryPngData[:, :, idx01] = np.array(Image.open(lstPngPaths[idx01]))
# Convert RGB values (0 to 255) to integer ones and zeros:
aryPngData = (aryPngData > 0).astype(int)
return aryPngData | Load PNG files.
Parameters
----------
varNumVol : float
Number of volumes, i.e. number of time points in all runs.
tplPngSize : tuple
Shape of the stimulus image (i.e. png).
strPathPng: str
Path to the folder cointaining the png files.
Returns
-------
aryPngData : 2d numpy array, shape [png_x, png_y, n_vols]
Stack of stimulus data. |
def new_transaction(
vm: VM,
from_: Address,
to: Address,
amount: int=0,
private_key: PrivateKey=None,
gas_price: int=10,
gas: int=100000,
data: bytes=b'') -> BaseTransaction:
"""
Create and return a transaction sending amount from <from_> to <to>.
The transaction will be signed with the given private key.
"""
nonce = vm.state.get_nonce(from_)
tx = vm.create_unsigned_transaction(
nonce=nonce,
gas_price=gas_price,
gas=gas,
to=to,
value=amount,
data=data,
)
return tx.as_signed_transaction(private_key) | Create and return a transaction sending amount from <from_> to <to>.
The transaction will be signed with the given private key. |
def decorator(directname=None):
"""
Attach a class to a parsing decorator and register it to the global
decorator list.
The class is registered with its name unless directname is provided
"""
global _decorators
class_deco_list = _decorators
def wrapper(f):
nonlocal directname
if directname is None:
directname = f.__name__
f.ns_name = directname
set_one(class_deco_list, directname, f)
return wrapper | Attach a class to a parsing decorator and register it to the global
decorator list.
The class is registered with its name unless directname is provided |
def query_by_user(cls, user, **kwargs):
"""Get a user's memberships."""
return cls._filter(
cls.query.filter_by(user_id=user.get_id()),
**kwargs
) | Get a user's memberships. |
def _prepare_transformation_recipe(pattern: str, reduction: str, axes_lengths: Tuple) -> TransformRecipe:
""" Perform initial parsing of pattern and provided supplementary info
axes_lengths is a tuple of tuples (axis_name, axis_length)
"""
left, right = pattern.split('->')
identifiers_left, composite_axes_left = parse_expression(left)
identifiers_rght, composite_axes_rght = parse_expression(right)
# checking that both have similar letters
if reduction == 'rearrange':
difference = set.symmetric_difference(identifiers_left, identifiers_rght)
if len(difference) > 0:
raise EinopsError('Identifiers only on one side of expression (should be on both): {}'.format(difference))
elif reduction in _reductions:
difference = set.difference(identifiers_rght, identifiers_left)
if len(difference) > 0:
raise EinopsError('Unexpected identifiers on the right side of expression: {}'.format(difference))
else:
raise EinopsError('Unknown reduction {}'.format(reduction))
# parsing all dimensions to find out lengths
known_lengths = OrderedDict()
position_lookup = {}
position_lookup_after_reduction = {}
reduced_axes = []
for composite_axis in composite_axes_left:
for axis in composite_axis:
position_lookup[axis] = len(position_lookup)
if axis in identifiers_rght:
position_lookup_after_reduction[axis] = len(position_lookup_after_reduction)
else:
reduced_axes.append(len(known_lengths))
known_lengths[axis] = None
def update_axis_length(axis_name, axis_length):
if known_lengths[axis_name] is not None:
# check is not performed for symbols
if isinstance(axis_length, int) and isinstance(known_lengths[axis_name], int):
if axis_length != known_lengths[axis_name]:
raise RuntimeError('Inferred length for {} is {} not {}'.format(
axis_name, axis_length, known_lengths[axis_name]))
else:
known_lengths[axis_name] = axis_length
for elementary_axis, axis_length in axes_lengths:
if not _check_elementary_axis_name(elementary_axis):
raise EinopsError('Invalid name for an axis', elementary_axis)
if elementary_axis not in known_lengths:
raise EinopsError('Axis {} is not used in transform'.format(elementary_axis))
update_axis_length(elementary_axis, axis_length)
input_axes_known_unknown = []
# inferring rest of sizes from arguments
for composite_axis in composite_axes_left:
known = {axis for axis in composite_axis if known_lengths[axis] is not None}
unknown = {axis for axis in composite_axis if known_lengths[axis] is None}
lookup = dict(zip(list(known_lengths), range(len(known_lengths))))
if len(unknown) > 1:
raise EinopsError('Could not infer sizes for {}'.format(unknown))
assert len(unknown) + len(known) == len(composite_axis)
input_axes_known_unknown.append(([lookup[axis] for axis in known], [lookup[axis] for axis in unknown]))
result_axes_grouping = [[position_lookup_after_reduction[axis] for axis in composite_axis]
for composite_axis in composite_axes_rght]
ellipsis_left = math.inf if _ellipsis not in composite_axes_left else composite_axes_left.index(_ellipsis)
ellipsis_rght = math.inf if _ellipsis not in composite_axes_rght else composite_axes_rght.index(_ellipsis)
return TransformRecipe(elementary_axes_lengths=list(known_lengths.values()),
input_composite_axes=input_axes_known_unknown,
output_composite_axes=result_axes_grouping,
reduction_type=reduction,
reduced_elementary_axes=tuple(reduced_axes),
ellipsis_positions=(ellipsis_left, ellipsis_rght)
) | Perform initial parsing of pattern and provided supplementary info
axes_lengths is a tuple of tuples (axis_name, axis_length) |
def merge_items(from_id, to_id, login_obj, mediawiki_api_url='https://www.wikidata.org/w/api.php',
ignore_conflicts='', user_agent=config['USER_AGENT_DEFAULT']):
"""
A static method to merge two Wikidata items
:param from_id: The QID which should be merged into another item
:type from_id: string with 'Q' prefix
:param to_id: The QID into which another item should be merged
:type to_id: string with 'Q' prefix
:param login_obj: The object containing the login credentials and cookies
:type login_obj: instance of PBB_login.WDLogin
:param mediawiki_api_url: The MediaWiki url which should be used
:type mediawiki_api_url: str
:param ignore_conflicts: A string with the values 'description', 'statement' or 'sitelink', separated
by a pipe ('|') if using more than one of those.
:type ignore_conflicts: str
"""
url = mediawiki_api_url
headers = {
'content-type': 'application/x-www-form-urlencoded',
'charset': 'utf-8',
'User-Agent': user_agent
}
params = {
'action': 'wbmergeitems',
'fromid': from_id,
'toid': to_id,
'token': login_obj.get_edit_token(),
'format': 'json',
'bot': '',
'ignoreconflicts': ignore_conflicts
}
try:
# TODO: should we retry this?
merge_reply = requests.post(url=url, data=params, headers=headers, cookies=login_obj.get_edit_cookie())
merge_reply.raise_for_status()
if 'error' in merge_reply.json():
raise MergeError(merge_reply.json())
except requests.HTTPError as e:
print(e)
# TODO: should we return this?
return {'error': 'HTTPError'}
return merge_reply.json() | A static method to merge two Wikidata items
:param from_id: The QID which should be merged into another item
:type from_id: string with 'Q' prefix
:param to_id: The QID into which another item should be merged
:type to_id: string with 'Q' prefix
:param login_obj: The object containing the login credentials and cookies
:type login_obj: instance of PBB_login.WDLogin
:param mediawiki_api_url: The MediaWiki url which should be used
:type mediawiki_api_url: str
:param ignore_conflicts: A string with the values 'description', 'statement' or 'sitelink', separated
by a pipe ('|') if using more than one of those.
:type ignore_conflicts: str |
def check_config_mode(self, check_string=")#", pattern=""):
"""
Checks if the device is in configuration mode or not.
Cisco IOS devices abbreviate the prompt at 20 chars in config mode
"""
return super(CiscoBaseConnection, self).check_config_mode(
check_string=check_string, pattern=pattern
) | Checks if the device is in configuration mode or not.
Cisco IOS devices abbreviate the prompt at 20 chars in config mode |
def compose_object(self, file_list, destination_file, content_type):
"""COMPOSE multiple objects together.
Using the given list of files, calls the put object with the compose flag.
This call merges all the files into the destination file.
Args:
file_list: list of dicts with the file name.
destination_file: Path to the destination file.
content_type: Content type for the destination file.
"""
xml_setting_list = ['<ComposeRequest>']
for meta_data in file_list:
xml_setting_list.append('<Component>')
for key, val in meta_data.iteritems():
xml_setting_list.append('<%s>%s</%s>' % (key, val, key))
xml_setting_list.append('</Component>')
xml_setting_list.append('</ComposeRequest>')
xml = ''.join(xml_setting_list)
if content_type is not None:
headers = {'Content-Type': content_type}
else:
headers = None
status, resp_headers, content = self.put_object(
api_utils._quote_filename(destination_file) + '?compose',
payload=xml,
headers=headers)
errors.check_status(status, [200], destination_file, resp_headers,
body=content) | COMPOSE multiple objects together.
Using the given list of files, calls the put object with the compose flag.
This call merges all the files into the destination file.
Args:
file_list: list of dicts with the file name.
destination_file: Path to the destination file.
content_type: Content type for the destination file. |
def get_east_asian_width_property(value, is_bytes=False):
"""Get `EAST ASIAN WIDTH` property."""
obj = unidata.ascii_east_asian_width if is_bytes else unidata.unicode_east_asian_width
if value.startswith('^'):
negated = value[1:]
value = '^' + unidata.unicode_alias['eastasianwidth'].get(negated, negated)
else:
value = unidata.unicode_alias['eastasianwidth'].get(value, value)
return obj[value] | Get `EAST ASIAN WIDTH` property. |
def setup_multiprocessing_logging(queue=None):
'''
This code should be called from within a running multiprocessing
process instance.
'''
from salt.utils.platform import is_windows
global __MP_LOGGING_CONFIGURED
global __MP_LOGGING_QUEUE_HANDLER
if __MP_IN_MAINPROCESS is True and not is_windows():
# We're in the MainProcess, return! No multiprocessing logging setup shall happen
# Windows is the exception where we want to set up multiprocessing
# logging in the MainProcess.
return
try:
logging._acquireLock() # pylint: disable=protected-access
if __MP_LOGGING_CONFIGURED is True:
return
# Let's set it to true as fast as possible
__MP_LOGGING_CONFIGURED = True
if __MP_LOGGING_QUEUE_HANDLER is not None:
return
# The temp null and temp queue logging handlers will store messages.
# Since noone will process them, memory usage will grow. If they
# exist, remove them.
__remove_null_logging_handler()
__remove_queue_logging_handler()
# Let's add a queue handler to the logging root handlers
__MP_LOGGING_QUEUE_HANDLER = SaltLogQueueHandler(queue or get_multiprocessing_logging_queue())
logging.root.addHandler(__MP_LOGGING_QUEUE_HANDLER)
# Set the logging root level to the lowest needed level to get all
# desired messages.
log_level = get_multiprocessing_logging_level()
logging.root.setLevel(log_level)
logging.getLogger(__name__).debug(
'Multiprocessing queue logging configured for the process running '
'under PID: %s at log level %s', os.getpid(), log_level
)
# The above logging call will create, in some situations, a futex wait
# lock condition, probably due to the multiprocessing Queue's internal
# lock and semaphore mechanisms.
# A small sleep will allow us not to hit that futex wait lock condition.
time.sleep(0.0001)
finally:
logging._releaseLock() | This code should be called from within a running multiprocessing
process instance. |
def process(self, formdata=None, obj=None, data=None, **kwargs):
'''Wrap the process method to store the current object instance'''
self._obj = obj
super(CommonFormMixin, self).process(formdata, obj, data, **kwargs) | Wrap the process method to store the current object instance |
def url(self):
"""
The URL present in this inline results. If you want to "click"
this URL to open it in your browser, you should use Python's
`webbrowser.open(url)` for such task.
"""
if isinstance(self.result, types.BotInlineResult):
return self.result.url | The URL present in this inline results. If you want to "click"
this URL to open it in your browser, you should use Python's
`webbrowser.open(url)` for such task. |
def _prm_write_shared_array(self, key, data, hdf5_group, full_name, flag, **kwargs):
"""Creates and array that can be used with an HDF5 array object"""
if flag == HDF5StorageService.ARRAY:
self._prm_write_into_array(key, data, hdf5_group, full_name, **kwargs)
elif flag in (HDF5StorageService.CARRAY,
HDF5StorageService.EARRAY,
HDF5StorageService.VLARRAY):
self._prm_write_into_other_array(key, data, hdf5_group, full_name,
flag=flag, **kwargs)
else:
raise RuntimeError('Flag `%s` of hdf5 data `%s` of `%s` not understood' %
(flag, key, full_name))
self._hdf5file.flush() | Creates and array that can be used with an HDF5 array object |
def map_components(notsplit_packages, components):
"""
Returns a list of packages to install based on component names
This is done by checking if a component is in notsplit_packages,
if it is, we know we need to install 'ceph' instead of the
raw component name. Essentially, this component hasn't been
'split' from the master 'ceph' package yet.
"""
packages = set()
for c in components:
if c in notsplit_packages:
packages.add('ceph')
else:
packages.add(c)
return list(packages) | Returns a list of packages to install based on component names
This is done by checking if a component is in notsplit_packages,
if it is, we know we need to install 'ceph' instead of the
raw component name. Essentially, this component hasn't been
'split' from the master 'ceph' package yet. |
def _dict_to_map_str_str(self, d):
"""
Thrift requires the params and headers dict values to only contain str values.
"""
return dict(map(
lambda (k, v): (k, str(v).lower() if isinstance(v, bool) else str(v)),
d.iteritems()
)) | Thrift requires the params and headers dict values to only contain str values. |
def configure_retrieve(self, ns, definition):
"""
Register a retrieve endpoint.
The definition's func should be a retrieve function, which must:
- accept kwargs for path data
- return an item or falsey
:param ns: the namespace
:param definition: the endpoint definition
"""
request_schema = definition.request_schema or Schema()
@self.add_route(ns.instance_path, Operation.Retrieve, ns)
@qs(request_schema)
@response(definition.response_schema)
@wraps(definition.func)
def retrieve(**path_data):
headers = dict()
request_data = load_query_string_data(request_schema)
response_data = require_response_data(definition.func(**merge_data(path_data, request_data)))
definition.header_func(headers, response_data)
response_format = self.negotiate_response_content(definition.response_formats)
return dump_response_data(
definition.response_schema,
response_data,
headers=headers,
response_format=response_format,
)
retrieve.__doc__ = "Retrieve a {} by id".format(ns.subject_name) | Register a retrieve endpoint.
The definition's func should be a retrieve function, which must:
- accept kwargs for path data
- return an item or falsey
:param ns: the namespace
:param definition: the endpoint definition |
def save_history(self, f):
"""Saves the history of ``NeuralNet`` as a json file. In order
to use this feature, the history must only contain JSON encodable
Python data structures. Numpy and PyTorch types should not
be in the history.
Parameters
----------
f : file-like object or str
Examples
--------
>>> before = NeuralNetClassifier(mymodule)
>>> before.fit(X, y, epoch=2) # Train for 2 epochs
>>> before.save_params('path/to/params')
>>> before.save_history('path/to/history.json')
>>> after = NeuralNetClassifier(mymodule).initialize()
>>> after.load_params('path/to/params')
>>> after.load_history('path/to/history.json')
>>> after.fit(X, y, epoch=2) # Train for another 2 epochs
"""
# TODO: Remove warning in a future release
warnings.warn(
"save_history is deprecated and will be removed in the next "
"release, please use save_params with the f_history keyword",
DeprecationWarning)
self.history.to_file(f) | Saves the history of ``NeuralNet`` as a json file. In order
to use this feature, the history must only contain JSON encodable
Python data structures. Numpy and PyTorch types should not
be in the history.
Parameters
----------
f : file-like object or str
Examples
--------
>>> before = NeuralNetClassifier(mymodule)
>>> before.fit(X, y, epoch=2) # Train for 2 epochs
>>> before.save_params('path/to/params')
>>> before.save_history('path/to/history.json')
>>> after = NeuralNetClassifier(mymodule).initialize()
>>> after.load_params('path/to/params')
>>> after.load_history('path/to/history.json')
>>> after.fit(X, y, epoch=2) # Train for another 2 epochs |
def lessThan(self, leftIndex, rightIndex):
""" Returns true if the value of the item referred to by the given index left is less than
the value of the item referred to by the given index right, otherwise returns false.
"""
leftData = self.sourceModel().data(leftIndex, RegistryTableModel.SORT_ROLE)
rightData = self.sourceModel().data(rightIndex, RegistryTableModel.SORT_ROLE)
return leftData < rightData | Returns true if the value of the item referred to by the given index left is less than
the value of the item referred to by the given index right, otherwise returns false. |
def convert_representation(self, i):
"""
Return the proper representation for the given integer
"""
if self.number_representation == 'unsigned':
return i
elif self.number_representation == 'signed':
if i & (1 << self.interpreter._bit_width - 1):
return -((~i + 1) & (2**self.interpreter._bit_width - 1))
else:
return i
elif self.number_representation == 'hex':
return hex(i) | Return the proper representation for the given integer |
def rescale_gradients(model: Model, grad_norm: Optional[float] = None) -> Optional[float]:
"""
Performs gradient rescaling. Is a no-op if gradient rescaling is not enabled.
"""
if grad_norm:
parameters_to_clip = [p for p in model.parameters()
if p.grad is not None]
return sparse_clip_norm(parameters_to_clip, grad_norm)
return None | Performs gradient rescaling. Is a no-op if gradient rescaling is not enabled. |
def solution(self, expr, v, extra_constraints=(), solver=None, model_callback=None):
"""
Return True if `v` is a solution of `expr` with the extra constraints, False otherwise.
:param expr: An expression (an AST) to evaluate
:param v: The proposed solution (an AST)
:param solver: A solver object, native to the backend, to assist in the evaluation (for example,
a z3.Solver).
:param extra_constraints: Extra constraints (as ASTs) to add to the solver for this solve.
:param model_callback: a function that will be executed with recovered models (if any)
:return: True if `v` is a solution of `expr`, False otherwise
"""
if self._solver_required and solver is None:
raise BackendError("%s requires a solver for evaluation" % self.__class__.__name__)
return self._solution(self.convert(expr), self.convert(v), extra_constraints=self.convert_list(extra_constraints), solver=solver, model_callback=model_callback) | Return True if `v` is a solution of `expr` with the extra constraints, False otherwise.
:param expr: An expression (an AST) to evaluate
:param v: The proposed solution (an AST)
:param solver: A solver object, native to the backend, to assist in the evaluation (for example,
a z3.Solver).
:param extra_constraints: Extra constraints (as ASTs) to add to the solver for this solve.
:param model_callback: a function that will be executed with recovered models (if any)
:return: True if `v` is a solution of `expr`, False otherwise |
def get_bios_firmware_version(snmp_client):
"""Get bios firmware version of the node.
:param snmp_client: an SNMP client object.
:raises: SNMPFailure if SNMP operation failed.
:returns: a string of bios firmware version.
"""
try:
bios_firmware_version = snmp_client.get(BIOS_FW_VERSION_OID)
return six.text_type(bios_firmware_version)
except SNMPFailure as e:
raise SNMPBIOSFirmwareFailure(
SNMP_FAILURE_MSG % ("GET BIOS FIRMWARE VERSION", e)) | Get bios firmware version of the node.
:param snmp_client: an SNMP client object.
:raises: SNMPFailure if SNMP operation failed.
:returns: a string of bios firmware version. |
def _get_path_pattern_tornado45(self, router=None):
"""Return the path pattern used when routing a request. (Tornado>=4.5)
:param tornado.routing.Router router: (Optional) The router to scan.
Defaults to the application's router.
:rtype: str
"""
if router is None:
router = self.application.default_router
for rule in router.rules:
if rule.matcher.match(self.request) is not None:
if isinstance(rule.matcher, routing.PathMatches):
return rule.matcher.regex.pattern
elif isinstance(rule.target, routing.Router):
return self._get_path_pattern_tornado45(rule.target) | Return the path pattern used when routing a request. (Tornado>=4.5)
:param tornado.routing.Router router: (Optional) The router to scan.
Defaults to the application's router.
:rtype: str |
def print_trace(self, file=sys.stdout, base=10, compact=False):
"""
Prints a list of wires and their current values.
:param int base: the base the values are to be printed in
:param bool compact: whether to omit spaces in output lines
"""
if len(self.trace) == 0:
raise PyrtlError('error, cannot print an empty trace')
if base not in (2, 8, 10, 16):
raise PyrtlError('please choose a valid base (2,8,10,16)')
basekey = {2: 'b', 8: 'o', 10: 'd', 16: 'x'}[base]
ident_len = max(len(w) for w in self.trace)
if compact:
for w in sorted(self.trace, key=_trace_sort_key):
vals = ''.join('{0:{1}}'.format(x, basekey) for x in self.trace[w])
file.write(w.rjust(ident_len) + ' ' + vals + '\n')
else:
maxlenval = max(len('{0:{1}}'.format(x, basekey))
for w in self.trace for x in self.trace[w])
file.write(' ' * (ident_len - 3) + "--- Values in base %d ---\n" % base)
for w in sorted(self.trace, key=_trace_sort_key):
vals = ' '.join('{0:>{1}{2}}'.format(x, maxlenval, basekey) for x in self.trace[w])
file.write(w.ljust(ident_len + 1) + vals + '\n')
file.flush() | Prints a list of wires and their current values.
:param int base: the base the values are to be printed in
:param bool compact: whether to omit spaces in output lines |
def insert(self, song):
"""在当前歌曲后插入一首歌曲"""
if song in self._songs:
return
if self._current_song is None:
self._songs.append(song)
else:
index = self._songs.index(self._current_song)
self._songs.insert(index + 1, song) | 在当前歌曲后插入一首歌曲 |
def start(workflow_name, data=None, object_id=None, **kwargs):
"""Start a workflow by given name for specified data.
The name of the workflow to start is considered unique and it is
equal to the name of a file containing the workflow definition.
The data passed could be a list of Python standard data types such as
strings, dict, integers etc. to run through the workflow. Inside the
workflow tasks, this data is then available through ``obj.data``.
Or alternatively, pass the WorkflowObject to work on via
``object_id`` parameter. NOTE: This will replace any value in ``data``.
This is also a Celery (http://celeryproject.org) task, so you can
access the ``start.delay`` function to enqueue the execution of the
workflow asynchronously.
:param workflow_name: the workflow name to run. Ex: "my_workflow".
:type workflow_name: str
:param data: the workflow name to run. Ex: "my_workflow" (optional if
``object_id`` provided).
:type data: tuple
:param object_id: id of ``WorkflowObject`` to run (optional).
:type object_id: int
:return: UUID of the workflow engine that ran the workflow.
"""
from .proxies import workflow_object_class
from .worker_engine import run_worker
if data is None and object_id is None:
raise WorkflowsMissingData("No data or object_id passed to task.ß")
if object_id is not None:
obj = workflow_object_class.get(object_id)
if not obj:
raise WorkflowsMissingObject(
"Cannot find object: {0}".format(object_id)
)
data = [obj]
else:
if not isinstance(data, (list, tuple)):
data = [data]
return text_type(run_worker(workflow_name, data, **kwargs).uuid) | Start a workflow by given name for specified data.
The name of the workflow to start is considered unique and it is
equal to the name of a file containing the workflow definition.
The data passed could be a list of Python standard data types such as
strings, dict, integers etc. to run through the workflow. Inside the
workflow tasks, this data is then available through ``obj.data``.
Or alternatively, pass the WorkflowObject to work on via
``object_id`` parameter. NOTE: This will replace any value in ``data``.
This is also a Celery (http://celeryproject.org) task, so you can
access the ``start.delay`` function to enqueue the execution of the
workflow asynchronously.
:param workflow_name: the workflow name to run. Ex: "my_workflow".
:type workflow_name: str
:param data: the workflow name to run. Ex: "my_workflow" (optional if
``object_id`` provided).
:type data: tuple
:param object_id: id of ``WorkflowObject`` to run (optional).
:type object_id: int
:return: UUID of the workflow engine that ran the workflow. |
def _netbsd_gpu_data():
'''
num_gpus: int
gpus:
- vendor: nvidia|amd|ati|...
model: string
'''
known_vendors = ['nvidia', 'amd', 'ati', 'intel', 'cirrus logic', 'vmware', 'matrox', 'aspeed']
gpus = []
try:
pcictl_out = __salt__['cmd.run']('pcictl pci0 list')
for line in pcictl_out.splitlines():
for vendor in known_vendors:
vendor_match = re.match(
r'[0-9:]+ ({0}) (.+) \(VGA .+\)'.format(vendor),
line,
re.IGNORECASE
)
if vendor_match:
gpus.append({'vendor': vendor_match.group(1), 'model': vendor_match.group(2)})
except OSError:
pass
grains = {}
grains['num_gpus'] = len(gpus)
grains['gpus'] = gpus
return grains | num_gpus: int
gpus:
- vendor: nvidia|amd|ati|...
model: string |
def is_ancestor(self, commit1, commit2, patch=False):
"""Returns True if commit1 is a direct ancestor of commit2, or False
otherwise.
This method considers a commit to be a direct ancestor of itself"""
result = self.hg("log", "-r", "first(%s::%s)" % (commit1, commit2),
"--template", "exists", patch=patch)
return "exists" in result | Returns True if commit1 is a direct ancestor of commit2, or False
otherwise.
This method considers a commit to be a direct ancestor of itself |
async def update(self, obj, only=None):
"""Update the object in the database. Optionally, update only
the specified fields. For creating a new object use :meth:`.create()`
:param only: (optional) the list/tuple of fields or
field names to update
"""
field_dict = dict(obj.__data__)
pk_field = obj._meta.primary_key
if only:
self._prune_fields(field_dict, only)
if obj._meta.only_save_dirty:
self._prune_fields(field_dict, obj.dirty_fields)
if obj._meta.composite_key:
for pk_part_name in pk_field.field_names:
field_dict.pop(pk_part_name, None)
else:
field_dict.pop(pk_field.name, None)
query = obj.update(**field_dict).where(obj._pk_expr())
result = await self.execute(query)
obj._dirty.clear()
return result | Update the object in the database. Optionally, update only
the specified fields. For creating a new object use :meth:`.create()`
:param only: (optional) the list/tuple of fields or
field names to update |
def file_content(self, value):
"""The Base64 encoded content of the attachment
:param value: The Base64 encoded content of the attachment
:type value: FileContent, string
"""
if isinstance(value, FileContent):
self._file_content = value
else:
self._file_content = FileContent(value) | The Base64 encoded content of the attachment
:param value: The Base64 encoded content of the attachment
:type value: FileContent, string |
def parse_element(raw_element: str) -> List[Element]:
"""
Parse a raw element into text and indices (integers).
"""
elements = [regex.match("^(([a-zA-Z]+)\(([^;]+),List\(([^;]*)\)\))$",
elem.lstrip().rstrip())
for elem
in raw_element.split(';')]
return [interpret_element(*elem.groups()[1:])
for elem in elements
if elem] | Parse a raw element into text and indices (integers). |
def list_services(request, step):
"""
get the activated services added from the administrator
:param request: request object
:param step: the step which is proceeded
:type request: HttpRequest object
:type step: string
:return the activated services added from the administrator
"""
all_datas = []
if step == '0':
services = ServicesActivated.objects.filter(status=1)
elif step == '3':
services = ServicesActivated.objects.filter(status=1, id__iexact=request.id)
for class_name in services:
all_datas.append({class_name: class_name.name.rsplit('Service', 1)[1]})
return all_datas | get the activated services added from the administrator
:param request: request object
:param step: the step which is proceeded
:type request: HttpRequest object
:type step: string
:return the activated services added from the administrator |
def cd_to(path, mkdir=False):
"""make a generator like cd, but use it for function
Usage::
>>> @cd_to("/")
... def say_where():
... print(os.getcwd())
...
>>> say_where()
/
"""
def cd_to_decorator(func):
@functools.wraps(func)
def _cd_and_exec(*args, **kwargs):
with cd(path, mkdir):
return func(*args, **kwargs)
return _cd_and_exec
return cd_to_decorator | make a generator like cd, but use it for function
Usage::
>>> @cd_to("/")
... def say_where():
... print(os.getcwd())
...
>>> say_where()
/ |
def to_python(self, value):
"""Convert value if needed."""
if isinstance(value, DirDescriptor):
return value
elif isinstance(value, str):
return DirDescriptor(value)
elif isinstance(value, dict):
try:
path = value['dir']
except KeyError:
raise ValidationError("dictionary must contain a 'dir' element")
if not isinstance(path, str):
raise ValidationError("field's dir element must be a string")
size = value.get('size', None)
if size is not None and not isinstance(size, int):
raise ValidationError("field's size element must be an integer")
total_size = value.get('total_size', None)
if total_size is not None and not isinstance(total_size, int):
raise ValidationError("field's total_size element must be an integer")
refs = value.get('refs', None)
if refs is not None and not isinstance(refs, list):
# TODO: Validate that all refs are strings.
raise ValidationError("field's refs element must be a list of strings")
return DirDescriptor(
path,
size=size,
total_size=total_size,
refs=refs,
)
elif not isinstance(value, None):
raise ValidationError("field must be a DirDescriptor, string or a dict") | Convert value if needed. |
def _get_content_type(url, session):
"""Get the Content-Type of the given url, using a HEAD request"""
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url)
if scheme not in ('http', 'https'):
# FIXME: some warning or something?
# assertion error?
return ''
resp = session.head(url, allow_redirects=True)
resp.raise_for_status()
return resp.headers.get("Content-Type", "") | Get the Content-Type of the given url, using a HEAD request |
def getFieldMax(self, fieldName):
"""
If underlying implementation does not support min/max stats collection,
or if a field type does not support min/max (non scalars), the return
value will be None.
:param fieldName: (string) name of field to get max
:returns: current maximum value for the field ``fieldName``.
"""
stats = self.getStats()
if stats == None:
return None
maxValues = stats.get('max', None)
if maxValues == None:
return None
index = self.getFieldNames().index(fieldName)
return maxValues[index] | If underlying implementation does not support min/max stats collection,
or if a field type does not support min/max (non scalars), the return
value will be None.
:param fieldName: (string) name of field to get max
:returns: current maximum value for the field ``fieldName``. |
def create_dialog_node(self,
workspace_id,
dialog_node,
description=None,
conditions=None,
parent=None,
previous_sibling=None,
output=None,
context=None,
metadata=None,
next_step=None,
title=None,
node_type=None,
event_name=None,
variable=None,
actions=None,
digress_in=None,
digress_out=None,
digress_out_slots=None,
user_label=None,
**kwargs):
"""
Create dialog node.
Create a new dialog node.
This operation is limited to 500 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str dialog_node: The dialog node ID. This string must conform to the
following restrictions:
- It can contain only Unicode alphanumeric, space, underscore, hyphen, and dot
characters.
- It must be no longer than 1024 characters.
:param str description: The description of the dialog node. This string cannot
contain carriage return, newline, or tab characters, and it must be no longer than
128 characters.
:param str conditions: The condition that will trigger the dialog node. This
string cannot contain carriage return, newline, or tab characters, and it must be
no longer than 2048 characters.
:param str parent: The ID of the parent dialog node. This property is omitted if
the dialog node has no parent.
:param str previous_sibling: The ID of the previous sibling dialog node. This
property is omitted if the dialog node has no previous sibling.
:param DialogNodeOutput output: The output of the dialog node. For more
information about how to specify dialog node output, see the
[documentation](https://cloud.ibm.com/docs/services/assistant/dialog-overview.html#dialog-overview-responses).
:param dict context: The context for the dialog node.
:param dict metadata: The metadata for the dialog node.
:param DialogNodeNextStep next_step: The next step to execute following this
dialog node.
:param str title: The alias used to identify the dialog node. This string must
conform to the following restrictions:
- It can contain only Unicode alphanumeric, space, underscore, hyphen, and dot
characters.
- It must be no longer than 64 characters.
:param str node_type: How the dialog node is processed.
:param str event_name: How an `event_handler` node is processed.
:param str variable: The location in the dialog context where output is stored.
:param list[DialogNodeAction] actions: An array of objects describing any actions
to be invoked by the dialog node.
:param str digress_in: Whether this top-level dialog node can be digressed into.
:param str digress_out: Whether this dialog node can be returned to after a
digression.
:param str digress_out_slots: Whether the user can digress to top-level nodes
while filling out slots.
:param str user_label: A label that can be displayed externally to describe the
purpose of the node to users. This string must be no longer than 512 characters.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if dialog_node is None:
raise ValueError('dialog_node must be provided')
if output is not None:
output = self._convert_model(output, DialogNodeOutput)
if next_step is not None:
next_step = self._convert_model(next_step, DialogNodeNextStep)
if actions is not None:
actions = [
self._convert_model(x, DialogNodeAction) for x in actions
]
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers('conversation', 'V1',
'create_dialog_node')
headers.update(sdk_headers)
params = {'version': self.version}
data = {
'dialog_node': dialog_node,
'description': description,
'conditions': conditions,
'parent': parent,
'previous_sibling': previous_sibling,
'output': output,
'context': context,
'metadata': metadata,
'next_step': next_step,
'title': title,
'type': node_type,
'event_name': event_name,
'variable': variable,
'actions': actions,
'digress_in': digress_in,
'digress_out': digress_out,
'digress_out_slots': digress_out_slots,
'user_label': user_label
}
url = '/v1/workspaces/{0}/dialog_nodes'.format(
*self._encode_path_vars(workspace_id))
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
json=data,
accept_json=True)
return response | Create dialog node.
Create a new dialog node.
This operation is limited to 500 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str dialog_node: The dialog node ID. This string must conform to the
following restrictions:
- It can contain only Unicode alphanumeric, space, underscore, hyphen, and dot
characters.
- It must be no longer than 1024 characters.
:param str description: The description of the dialog node. This string cannot
contain carriage return, newline, or tab characters, and it must be no longer than
128 characters.
:param str conditions: The condition that will trigger the dialog node. This
string cannot contain carriage return, newline, or tab characters, and it must be
no longer than 2048 characters.
:param str parent: The ID of the parent dialog node. This property is omitted if
the dialog node has no parent.
:param str previous_sibling: The ID of the previous sibling dialog node. This
property is omitted if the dialog node has no previous sibling.
:param DialogNodeOutput output: The output of the dialog node. For more
information about how to specify dialog node output, see the
[documentation](https://cloud.ibm.com/docs/services/assistant/dialog-overview.html#dialog-overview-responses).
:param dict context: The context for the dialog node.
:param dict metadata: The metadata for the dialog node.
:param DialogNodeNextStep next_step: The next step to execute following this
dialog node.
:param str title: The alias used to identify the dialog node. This string must
conform to the following restrictions:
- It can contain only Unicode alphanumeric, space, underscore, hyphen, and dot
characters.
- It must be no longer than 64 characters.
:param str node_type: How the dialog node is processed.
:param str event_name: How an `event_handler` node is processed.
:param str variable: The location in the dialog context where output is stored.
:param list[DialogNodeAction] actions: An array of objects describing any actions
to be invoked by the dialog node.
:param str digress_in: Whether this top-level dialog node can be digressed into.
:param str digress_out: Whether this dialog node can be returned to after a
digression.
:param str digress_out_slots: Whether the user can digress to top-level nodes
while filling out slots.
:param str user_label: A label that can be displayed externally to describe the
purpose of the node to users. This string must be no longer than 512 characters.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse |
def match(self, search, **kwargs):
"""
Searches for Repository Configurations based on internal or external url, ignoring the protocol and \".git\" suffix. Only exact matches are returned.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.match(search, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str search: Url to search for (required)
:param int page_index: Page Index
:param int page_size: Pagination size
:param str sort: Sorting RSQL
:return: RepositoryConfigurationPage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.match_with_http_info(search, **kwargs)
else:
(data) = self.match_with_http_info(search, **kwargs)
return data | Searches for Repository Configurations based on internal or external url, ignoring the protocol and \".git\" suffix. Only exact matches are returned.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.match(search, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str search: Url to search for (required)
:param int page_index: Page Index
:param int page_size: Pagination size
:param str sort: Sorting RSQL
:return: RepositoryConfigurationPage
If the method is called asynchronously,
returns the request thread. |
def v1_folder_rename(request, response, kvlclient,
fid_src, fid_dest, sfid_src=None, sfid_dest=None):
'''Rename a folder or a subfolder.
The routes for this endpoint are:
* ``POST /dossier/v1/<fid_src>/rename/<fid_dest>``
* ``POST /dossier/v1/<fid_src>/subfolder/<sfid_src>/rename/
<fid_dest>/subfolder/<sfid_dest>``
'''
src, dest = make_path(fid_src, sfid_src), make_path(fid_dest, sfid_dest)
new_folders(kvlclient, request).move(src, dest)
response.status = 200 | Rename a folder or a subfolder.
The routes for this endpoint are:
* ``POST /dossier/v1/<fid_src>/rename/<fid_dest>``
* ``POST /dossier/v1/<fid_src>/subfolder/<sfid_src>/rename/
<fid_dest>/subfolder/<sfid_dest>`` |
def _bld_pnab_generic(self, funcname, **kwargs):
"""
implement's a generic version of a non-attribute based pandas function
"""
margs = {'mtype': pnab, 'kwargs': kwargs}
setattr(self, funcname, margs) | implement's a generic version of a non-attribute based pandas function |
def _doc2vec_doc_stream(paths, n, tokenizer=word_tokenize, sentences=True):
"""
Generator to feed sentences to the dov2vec model.
"""
i = 0
p = Progress()
for path in paths:
with open(path, 'r') as f:
for line in f:
i += 1
p.print_progress(i/n)
# We do minimal pre-processing here so the model can learn
# punctuation
line = line.lower()
if sentences:
for sent in sent_tokenize(line):
tokens = tokenizer(sent)
yield LabeledSentence(tokens, ['SENT_{}'.format(i)])
else:
tokens = tokenizer(line)
yield LabeledSentence(tokens, ['SENT_{}'.format(i)]) | Generator to feed sentences to the dov2vec model. |
def pbkdf2(digestmod, password, salt, count, dk_length):
"""
PBKDF2, from PKCS #5 v2.0[1].
[1]: http://tools.ietf.org/html/rfc2898
For proper usage, see NIST Special Publication 800-132:
http://csrc.nist.gov/publications/PubsSPs.html
The arguments for this function are:
digestmod
a crypographic hash constructor, such as hashlib.sha256
which will be used as an argument to the hmac function.
Note that the performance difference between sha1 and
sha256 is not very big. New applications should choose
sha256 or better.
password
The arbitrary-length password (passphrase) (bytes)
salt
A bunch of random bytes, generated using a cryptographically
strong random number generator (such as os.urandom()). NIST
recommend the salt be _at least_ 128bits (16 bytes) long.
count
The iteration count. Set this value as large as you can
tolerate. NIST recommend that the absolute minimum value
be 1000. However, it should generally be in the range of
tens of thousands, or however many cause about a half-second
delay to the user.
dk_length
The lenght of the desired key in bytes. This doesn't need
to be the same size as the hash functions digest size, but
it makes sense to use a larger digest hash function if your
key size is large.
"""
def pbkdf2_function(pw, salt, count, i):
# in the first iteration, the hmac message is the salt
# concatinated with the block number in the form of \x00\x00\x00\x01
r = u = hmac.new(pw, salt + struct.pack(">i", i), digestmod).digest()
for i in range(2, count + 1):
# in subsequent iterations, the hmac message is the
# previous hmac digest. The key is always the users password
# see the hmac specification for notes on padding and stretching
u = hmac.new(pw, u, digestmod).digest()
# this is the exclusive or of the two byte-strings
r = bytes(i ^ j for i, j in zip(r, u))
return r
dk, h_length = b'', digestmod().digest_size
# we generate as many blocks as are required to
# concatinate to the desired key size:
blocks = (dk_length // h_length) + (1 if dk_length % h_length else 0)
for i in range(1, blocks + 1):
dk += pbkdf2_function(password, salt, count, i)
# The length of the key wil be dk_length to the nearest
# hash block size, i.e. larger than or equal to it. We
# slice it to the desired length befor returning it.
return dk[:dk_length] | PBKDF2, from PKCS #5 v2.0[1].
[1]: http://tools.ietf.org/html/rfc2898
For proper usage, see NIST Special Publication 800-132:
http://csrc.nist.gov/publications/PubsSPs.html
The arguments for this function are:
digestmod
a crypographic hash constructor, such as hashlib.sha256
which will be used as an argument to the hmac function.
Note that the performance difference between sha1 and
sha256 is not very big. New applications should choose
sha256 or better.
password
The arbitrary-length password (passphrase) (bytes)
salt
A bunch of random bytes, generated using a cryptographically
strong random number generator (such as os.urandom()). NIST
recommend the salt be _at least_ 128bits (16 bytes) long.
count
The iteration count. Set this value as large as you can
tolerate. NIST recommend that the absolute minimum value
be 1000. However, it should generally be in the range of
tens of thousands, or however many cause about a half-second
delay to the user.
dk_length
The lenght of the desired key in bytes. This doesn't need
to be the same size as the hash functions digest size, but
it makes sense to use a larger digest hash function if your
key size is large. |
def find_unpaired_ligand(self):
"""Identify unpaired functional in groups in ligands, involving H-Bond donors, acceptors, halogen bond donors.
"""
unpaired_hba, unpaired_hbd, unpaired_hal = [], [], []
# Unpaired hydrogen bond acceptors/donors in ligand (not used for hydrogen bonds/water, salt bridges/mcomplex)
involved_atoms = [hbond.a.idx for hbond in self.hbonds_pdon] + [hbond.d.idx for hbond in self.hbonds_ldon]
[[involved_atoms.append(atom.idx) for atom in sb.negative.atoms] for sb in self.saltbridge_lneg]
[[involved_atoms.append(atom.idx) for atom in sb.positive.atoms] for sb in self.saltbridge_pneg]
[involved_atoms.append(wb.a.idx) for wb in self.water_bridges if wb.protisdon]
[involved_atoms.append(wb.d.idx) for wb in self.water_bridges if not wb.protisdon]
[involved_atoms.append(mcomplex.target.atom.idx) for mcomplex in self.metal_complexes
if mcomplex.location == 'ligand']
for atom in [hba.a for hba in self.ligand.get_hba()]:
if atom.idx not in involved_atoms:
unpaired_hba.append(atom)
for atom in [hbd.d for hbd in self.ligand.get_hbd()]:
if atom.idx not in involved_atoms:
unpaired_hbd.append(atom)
# unpaired halogen bond donors in ligand (not used for the previous + halogen bonds)
[involved_atoms.append(atom.don.x.idx) for atom in self.halogen_bonds]
for atom in [haldon.x for haldon in self.ligand.halogenbond_don]:
if atom.idx not in involved_atoms:
unpaired_hal.append(atom)
return unpaired_hba, unpaired_hbd, unpaired_hal | Identify unpaired functional in groups in ligands, involving H-Bond donors, acceptors, halogen bond donors. |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.