code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def calc_custom(custom, genome, scaffold, sequence, scaffold_coverage, total_bases):
"""
custom = {(reads mapped to scaffold)/(total reads for sample)}/(length of scaffold)
"""
index = 0
if scaffold in scaffold_coverage: # what if the scaffold does not have bases mapped back to it? (this *should* not happen)
if genome not in custom:
custom[genome] = [[] for i in scaffold_coverage[scaffold]]
for cov in scaffold_coverage[scaffold]:
length = float(len(sequence[1]))
bases = cov * length
custom_value = ((bases) / (total_bases[index])) / length
custom[genome][index].append(custom_value)
index += 1
return custom | custom = {(reads mapped to scaffold)/(total reads for sample)}/(length of scaffold) |
def get_reports_by_type(self, account_id, report_type):
"""
Shows all reports of the passed report_type that have been run
for the canvas account id.
https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.index
"""
url = ACCOUNTS_API.format(account_id) + "/reports/{}".format(
report_type)
reports = []
for datum in self._get_resource(url):
datum["account_id"] = account_id
reports.append(Report(data=datum))
return reports | Shows all reports of the passed report_type that have been run
for the canvas account id.
https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.index |
def start(self):
"""
Starts the timer from zero
"""
self.startTime = time.time()
self.configure(text='{0:<d} s'.format(0))
self.update() | Starts the timer from zero |
def finalized_canonical_averages_dtype(spanning_cluster=True):
"""
The NumPy Structured Array type for finalized canonical averages over
several runs
Helper function
Parameters
----------
spanning_cluster : bool, optional
Whether to detect a spanning cluster or not.
Defaults to ``True``.
Returns
-------
ret : list of pairs of str
A list of tuples of field names and data types to be used as ``dtype``
argument in numpy ndarray constructors
See Also
--------
http://docs.scipy.org/doc/numpy/user/basics.rec.html
canonical_averages_dtype
"""
fields = list()
fields.extend([
('number_of_runs', 'uint32'),
('p', 'float64'),
('alpha', 'float64'),
])
if spanning_cluster:
fields.extend([
('percolation_probability_mean', 'float64'),
('percolation_probability_std', 'float64'),
('percolation_probability_ci', '(2,)float64'),
])
fields.extend([
('percolation_strength_mean', 'float64'),
('percolation_strength_std', 'float64'),
('percolation_strength_ci', '(2,)float64'),
('moments_mean', '(5,)float64'),
('moments_std', '(5,)float64'),
('moments_ci', '(5,2)float64'),
])
return _ndarray_dtype(fields) | The NumPy Structured Array type for finalized canonical averages over
several runs
Helper function
Parameters
----------
spanning_cluster : bool, optional
Whether to detect a spanning cluster or not.
Defaults to ``True``.
Returns
-------
ret : list of pairs of str
A list of tuples of field names and data types to be used as ``dtype``
argument in numpy ndarray constructors
See Also
--------
http://docs.scipy.org/doc/numpy/user/basics.rec.html
canonical_averages_dtype |
async def proxy_new(connection, flags, info, name, object_path, interface_name):
"""Asynchronously call the specified method on a DBus proxy object."""
future = Future()
cancellable = None
Gio.DBusProxy.new(
connection,
flags,
info,
name,
object_path,
interface_name,
cancellable,
gio_callback,
future,
)
result = await future
value = Gio.DBusProxy.new_finish(result)
if value is None:
raise RuntimeError("Failed to connect DBus object!")
return value | Asynchronously call the specified method on a DBus proxy object. |
def sgd(grad, x, callback=None, num_iters=200, step_size=0.1, mass=0.9):
"""Stochastic gradient descent with momentum.
grad() must have signature grad(x, i), where i is the iteration number."""
velocity = np.zeros(len(x))
for i in range(num_iters):
g = grad(x, i)
if callback: callback(x, i, g)
velocity = mass * velocity - (1.0 - mass) * g
x = x + step_size * velocity
return x | Stochastic gradient descent with momentum.
grad() must have signature grad(x, i), where i is the iteration number. |
def positions_func(inputs, pad=0):
"""
A layer filling i-th column of a 2D tensor with
1+ln(1+i) when it contains a meaningful symbol
and with 0 when it contains PAD
"""
position_inputs = kb.cumsum(kb.ones_like(inputs, dtype="float32"), axis=1)
position_inputs *= kb.cast(kb.not_equal(inputs, pad), "float32")
return kb.log(1.0 + position_inputs) | A layer filling i-th column of a 2D tensor with
1+ln(1+i) when it contains a meaningful symbol
and with 0 when it contains PAD |
def itertypes(iterable):
"""Iterates over an iterable containing either type objects or tuples of
type objects and yields once for every type object found."""
seen = set()
for entry in iterable:
if isinstance(entry, tuple):
for type_ in entry:
if type_ not in seen:
seen.add(type_)
yield type_
else:
if entry not in seen:
seen.add(entry)
yield entry | Iterates over an iterable containing either type objects or tuples of
type objects and yields once for every type object found. |
def atlas_peer_update_health( peer_hostport, received_response, peer_table=None ):
"""
Mark the given peer as alive at this time.
Update times at which we contacted it,
and update its health score.
Use the global health table by default,
or use the given health info if set.
"""
with AtlasPeerTableLocked(peer_table) as ptbl:
if peer_hostport not in ptbl.keys():
return False
# record that we contacted this peer, and whether or not we useful info from it
now = time_now()
# update timestamps; remove old data
new_times = []
for (t, r) in ptbl[peer_hostport]['time']:
if t + atlas_peer_lifetime_interval() < now:
continue
new_times.append((t, r))
new_times.append((now, received_response))
ptbl[peer_hostport]['time'] = new_times
return True | Mark the given peer as alive at this time.
Update times at which we contacted it,
and update its health score.
Use the global health table by default,
or use the given health info if set. |
def analyze(self):
"""Run analysis."""
precision = 'DP' if self.kernel.datatype == 'double' else 'SP'
self.calculate_cache_access()
self.results['max_perf'] = self.conv_perf(self.machine['clock'] * self.cores * \
self.machine['FLOPs per cycle'][precision]['total']) | Run analysis. |
def close(self):
"""Closes the SSH connection if the connection is UP."""
if not self.connected:
return None
if self.config is not None:
if self.config.changed() and not self.config.committed():
try:
self.config.discard() # if configuration changed and not committed, will rollback
except pyPluribus.exceptions.ConfigurationDiscardError as discarderr: # bad luck.
raise pyPluribus.exceptions.ConnectionError("Could not discard the configuration: \
{err}".format(err=discarderr))
self._connection.close() # close SSH connection
self.config = None # reset config object
self._connection = None #
self.connected = False | Closes the SSH connection if the connection is UP. |
def md5sum(self):
"""
Check to see if the file exists on the device
:return:
"""
cmd = 'show file {dir}:{bin} md5sum'.format(
dir=self.DESTDIR, bin=self.image)
run = self.device.api.exec_opcmd
try:
got = run(cmd)
return got.get('file_content_md5sum').strip()
except: # NOQA
return None | Check to see if the file exists on the device
:return: |
def coordinator(self):
"""Get the current coordinator
Returns: the current coordinator id or None if it is unknown
"""
if self.coordinator_id is None:
return None
elif self._client.is_disconnected(self.coordinator_id):
self.coordinator_dead('Node Disconnected')
return None
else:
return self.coordinator_id | Get the current coordinator
Returns: the current coordinator id or None if it is unknown |
def _setup_transport(self):
"""
Wrap the socket in an SSL object, either the
new Python 2.6 version, or the older Python 2.5 and
lower version.
"""
if HAVE_PY26_SSL:
if hasattr(self, 'sslopts'):
self.sslobj = ssl.wrap_socket(self.sock, **self.sslopts)
else:
self.sslobj = ssl.wrap_socket(self.sock)
self.sslobj.do_handshake()
else:
self.sslobj = socket.ssl(self.sock) | Wrap the socket in an SSL object, either the
new Python 2.6 version, or the older Python 2.5 and
lower version. |
def hash(self):
'''
:rtype: int
:return: hash of the field
'''
hashed = super(RandomBits, self).hash()
return khash(hashed, self._min_length, self._max_length, self._num_mutations, self._step, self._seed) | :rtype: int
:return: hash of the field |
def get_jobs_events_from_sequence(user, sequence):
"""Get all the jobs events from a given sequence number."""
args = schemas.args(flask.request.args.to_dict())
if user.is_not_super_admin():
raise dci_exc.Unauthorized()
query = sql.select([models.JOBS_EVENTS]). \
select_from(models.JOBS_EVENTS.join(models.JOBS,
models.JOBS.c.id == models.JOBS_EVENTS.c.job_id)). \
where(_TABLE.c.id >= sequence)
sort_list = v1_utils.sort_query(args['sort'], _JOBS_EVENTS_COLUMNS,
default='created_at')
query = v1_utils.add_sort_to_query(query, sort_list)
if args.get('limit', None):
query = query.limit(args.get('limit'))
if args.get('offset', None):
query = query.offset(args.get('offset'))
rows = flask.g.db_conn.execute(query).fetchall()
query_nb_rows = sql.select([func.count(models.JOBS_EVENTS.c.id)])
nb_rows = flask.g.db_conn.execute(query_nb_rows).scalar()
return json.jsonify({'jobs_events': rows, '_meta': {'count': nb_rows}}) | Get all the jobs events from a given sequence number. |
def id_pools_ipv4_subnets(self):
"""
Gets the IdPoolsIpv4Subnets API client.
Returns:
IdPoolsIpv4Subnets:
"""
if not self.__id_pools_ipv4_subnets:
self.__id_pools_ipv4_subnets = IdPoolsIpv4Subnets(self.__connection)
return self.__id_pools_ipv4_subnets | Gets the IdPoolsIpv4Subnets API client.
Returns:
IdPoolsIpv4Subnets: |
def create_user(user, name, create=None): # noqa: E501
"""Create a new script
Create a new script # noqa: E501
:param user: Get user with this name
:type user: str
:param name: Get status of a driver with this name
:type name: str
:param create: The data needed to create this user
:type create: dict | bytes
:rtype: Response
"""
if connexion.request.is_json:
create = Create.from_dict(connexion.request.get_json()) # noqa: E501
response = errorIfUnauthorized(role='admin')
if response:
return response
else:
response = ApitaxResponse()
driver: Driver = LoadedDrivers.getDriver(name)
user: User = mapUserToUser(create.script)
if driver.createApitaxUser(user):
return Response(status=200, body=response.getResponseBody())
return ErrorResponse(status=500, message='Failed to create user') | Create a new script
Create a new script # noqa: E501
:param user: Get user with this name
:type user: str
:param name: Get status of a driver with this name
:type name: str
:param create: The data needed to create this user
:type create: dict | bytes
:rtype: Response |
def _grab_concretization_results(cls, state):
"""
Grabs the concretized result so we can add the constraint ourselves.
"""
# only grab ones that match the constrained addrs
if cls._should_add_constraints(state):
addr = state.inspect.address_concretization_expr
result = state.inspect.address_concretization_result
if result is None:
l.warning("addr concretization result is None")
return
state.preconstrainer.address_concretization.append((addr, result)) | Grabs the concretized result so we can add the constraint ourselves. |
def f_i18n_citation_type(string, lang="eng"):
""" Take a string of form %citation_type|passage% and format it for human
:param string: String of formation %citation_type|passage%
:param lang: Language to translate to
:return: Human Readable string
.. note :: To Do : Use i18n tools and provide real i18n
"""
s = " ".join(string.strip("%").split("|"))
return s.capitalize() | Take a string of form %citation_type|passage% and format it for human
:param string: String of formation %citation_type|passage%
:param lang: Language to translate to
:return: Human Readable string
.. note :: To Do : Use i18n tools and provide real i18n |
async def get_friendly_name(self) -> Text:
"""
The friendly name is mapped to Facebook's first name. If the first
name is missing, use the last name.
"""
u = await self._get_user()
f = u.get('first_name', '').strip()
l = u.get('last_name', '').strip()
return f or l | The friendly name is mapped to Facebook's first name. If the first
name is missing, use the last name. |
def _adjust_prt_flds(self, kws_xlsx, desc2nts, shade_hdrgos):
"""Print user-requested fields or provided fields minus info fields."""
# Use xlsx prt_flds from the user, if provided
if "prt_flds" in kws_xlsx:
return kws_xlsx["prt_flds"]
# If the user did not provide specific fields to print in an xlsx file:
dont_print = set(['hdr_idx', 'is_hdrgo', 'is_usrgo'])
# Are we printing GO group headers?
# Build new list of xlsx print fields, excluding those which add no new information
prt_flds_adjusted = []
# Get all namedtuple fields
nt_flds = self.sortobj.get_fields(desc2nts)
# Keep fields intended for print and optionally gray-shade field (format_txt)
# print('FFFFFFFFFFFFFFF WrXlsxSortedGos::_adjust_prt_flds:', nt_flds)
for nt_fld in nt_flds:
if nt_fld not in dont_print:
# Only add grey-shade to hdrgo and section name rows if hdrgo_prt=True
if nt_fld == "format_txt":
if shade_hdrgos is True:
prt_flds_adjusted.append(nt_fld)
else:
prt_flds_adjusted.append(nt_fld)
kws_xlsx['prt_flds'] = prt_flds_adjusted | Print user-requested fields or provided fields minus info fields. |
def midi(self):
"""
Return the (nearest) MIDI note to the tone's frequency. This will be an
integer number in the range 0 to 127. If the frequency is outside the
range represented by MIDI notes (which is approximately 8Hz to 12.5KHz)
:exc:`ValueError` exception will be raised.
"""
result = int(round(12 * log2(self.frequency / 440) + 69))
if 0 <= result < 128:
return result
raise ValueError('%f is outside the MIDI note range' % self.frequency) | Return the (nearest) MIDI note to the tone's frequency. This will be an
integer number in the range 0 to 127. If the frequency is outside the
range represented by MIDI notes (which is approximately 8Hz to 12.5KHz)
:exc:`ValueError` exception will be raised. |
def _send(self, javascript):
"""
Establishes a socket connection to the zombie.js server and sends
Javascript instructions.
:param js: the Javascript string to execute
"""
# Prepend JS to switch to the proper client context.
message = """
var _ctx = ctx_switch('%s'),
browser = _ctx[0],
ELEMENTS = _ctx[1];
%s
""" % (id(self), javascript)
response = self.connection.send(message)
return self._handle_response(response) | Establishes a socket connection to the zombie.js server and sends
Javascript instructions.
:param js: the Javascript string to execute |
def refresh(self):
"""Refresh the dev_info data used by get_value.
Only needed if you're not using subscriptions.
"""
j = self.vera_request(id='sdata', output_format='json').json()
devices = j.get('devices')
for device_data in devices:
if device_data.get('id') == self.device_id:
self.update(device_data) | Refresh the dev_info data used by get_value.
Only needed if you're not using subscriptions. |
def similarity_by_path(sense1: "wn.Synset", sense2: "wn.Synset", option: str = "path") -> float:
"""
Returns maximum path similarity between two senses.
:param sense1: A synset.
:param sense2: A synset.
:param option: String, one of ('path', 'wup', 'lch').
:return: A float, similarity measurement.
"""
if option.lower() in ["path", "path_similarity"]: # Path similarities.
return max(wn.path_similarity(sense1, sense2, if_none_return=0),
wn.path_similarity(sense2, sense1, if_none_return=0))
elif option.lower() in ["wup", "wupa", "wu-palmer", "wu-palmer"]: # Wu-Palmer
return max(wn.wup_similarity(sense1, sense2, if_none_return=0),
wn.wup_similarity(sense2, sense1, if_none_return=0))
elif option.lower() in ['lch', "leacock-chordorow"]: # Leacock-Chodorow
if sense1.pos != sense2.pos: # lch can't do diff POS
return 0
return wn.lch_similarity(sense1, sense2, if_none_return=0) | Returns maximum path similarity between two senses.
:param sense1: A synset.
:param sense2: A synset.
:param option: String, one of ('path', 'wup', 'lch').
:return: A float, similarity measurement. |
def markdown_changelog(version: str, changelog: dict, header: bool = False) -> str:
"""
Generates a markdown version of the changelog. Takes a parsed changelog dict from
generate_changelog.
:param version: A string with the version number.
:param changelog: A dict from generate_changelog.
:param header: A boolean that decides whether a header should be included or not.
:return: The markdown formatted changelog.
"""
debug('markdown_changelog(version="{}", header={}, changelog=...)'.format(version, header))
output = ''
if header:
output += '## v{0}\n'.format(version)
for section in CHANGELOG_SECTIONS:
if not changelog[section]:
continue
output += '\n### {0}\n'.format(section.capitalize())
for item in changelog[section]:
output += '* {0} ({1})\n'.format(item[1], item[0])
return output | Generates a markdown version of the changelog. Takes a parsed changelog dict from
generate_changelog.
:param version: A string with the version number.
:param changelog: A dict from generate_changelog.
:param header: A boolean that decides whether a header should be included or not.
:return: The markdown formatted changelog. |
def lastmod(self, tag):
"""Return the last modification of the entry."""
lastitems = EntryModel.objects.published().order_by('-modification_date').filter(tags=tag).only('modification_date')
return lastitems[0].modification_date | Return the last modification of the entry. |
def MeshArrows(*inputobj, **options):
"""
Build arrows representing displacements.
:param float s: cross-section size of the arrow
:param float rescale: apply a rescaling factor to the length
"""
s = options.pop("s", None)
scale = options.pop("scale", 1)
c = options.pop("c", "gray")
alpha = options.pop("alpha", 1)
res = options.pop("res", 12)
mesh, u = _inputsort(inputobj)
startPoints = mesh.coordinates()
u_values = np.array([u(p) for p in mesh.coordinates()])
if not utils.isSequence(u_values[0]):
printc("~times Error: cannot show Arrows for 1D scalar values!", c=1)
exit()
endPoints = mesh.coordinates() + u_values
if u_values.shape[1] == 2: # u_values is 2D
u_values = np.insert(u_values, 2, 0, axis=1) # make it 3d
startPoints = np.insert(startPoints, 2, 0, axis=1) # make it 3d
endPoints = np.insert(endPoints, 2, 0, axis=1) # make it 3d
actor = shapes.Arrows(
startPoints, endPoints, s=s, scale=scale, c=c, alpha=alpha, res=res
)
actor.mesh = mesh
actor.u = u
actor.u_values = u_values
return actor | Build arrows representing displacements.
:param float s: cross-section size of the arrow
:param float rescale: apply a rescaling factor to the length |
def start_capture(self, adapter_number, output_file):
"""
Starts a packet capture.
:param adapter_number: adapter number
:param output_file: PCAP destination file for the capture
"""
try:
adapter = self._ethernet_adapters[adapter_number]
except IndexError:
raise QemuError('Adapter {adapter_number} does not exist on QEMU VM "{name}"'.format(name=self._name,
adapter_number=adapter_number))
nio = adapter.get_nio(0)
if not nio:
raise QemuError("Adapter {} is not connected".format(adapter_number))
if nio.capturing:
raise QemuError("Packet capture is already activated on adapter {adapter_number}".format(adapter_number=adapter_number))
nio.startPacketCapture(output_file)
if self.ubridge:
yield from self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(name="QEMU-{}-{}".format(self._id, adapter_number),
output_file=output_file))
log.info("QEMU VM '{name}' [{id}]: starting packet capture on adapter {adapter_number}".format(name=self.name,
id=self.id,
adapter_number=adapter_number)) | Starts a packet capture.
:param adapter_number: adapter number
:param output_file: PCAP destination file for the capture |
def snapshotToMovie(snap,filename,*args,**kwargs):
"""
NAME:
snapshotToMovie
PURPOSE:
turn a list of snapshots into a movie
INPUT:
snap - the snapshots (list)
filename - name of the file to save the movie to
framerate= in fps
bitrate= ?
thumbnail=False : create thumbnail image (filename-extension+.jpg)
thumbsize= size of thumbnail
+Snapshot.plot args and kwargs
OUTPUT:
movie is saved to file
DEPENDENCIES:
this procedure uses ffmpeg and convert
BUGS:
matplotlib's 'Agg' backend has a memory leak that prevents it from
creating hundred's of figures. It is recommended to call
import matplotlib
matplotlib.use('PDF')
at the beginning of the movie creating script as the PDF backend does
not have the same memory leak.
HISTORY:
2011-02-06 - Written - Bovy (NYU)
"""
if kwargs.has_key('tmpdir'):
tmpdir= kwargs['tmpdir']
kwargs.pop('tmpdir')
else:
tmpdir= '/tmp'
if kwargs.has_key('framerate'):
framerate= kwargs['framerate']
kwargs.pop('framerate')
else:
framerate= 25
if kwargs.has_key('bitrate'):
bitrate= kwargs['bitrate']
kwargs.pop('bitrate')
else:
bitrate= 1000
if kwargs.has_key('thumbnail') and kwargs['thumbnail']:
thumbnail= True
kwargs.pop('thumbnail')
elif kwargs.has_key('thumbnail'):
kwargs.pop('thumbnail')
thumbnail= False
else:
thumbnail= False
if kwargs.has_key('thumbsize'):
thumbsize= kwargs['thumbsize']
else:
thumbsize= 300
#Create all of the files
tempdir= tempfile.mkdtemp(dir=tmpdir) #Temporary directory
tmpfiles= []
nsnap= len(snap)
file_length= int(m.ceil(m.log10(nsnap)))
#Determine good xrange BOVY TO DO
if not kwargs.has_key('xrange'):
pass
if not kwargs.has_key('yrange'):
pass
for ii in range(nsnap):
tmpfiles.append(os.path.join(tempdir,
str(ii).zfill(file_length)))
bovy_plot.bovy_print()
snap[ii].plot(*args,**kwargs)
bovy_plot.bovy_end_print(tmpfiles[ii]+'.pdf')
#Convert to jpeg
try:
subprocess.check_call(['convert',
tmpfiles[ii]+'.pdf',
tmpfiles[ii]+'.jpg'])
except subprocess.CalledProcessError:
print("'convert' failed")
raise subprocess.CalledProcessError
#turn them into a movie
try:
subprocess.check_call(['ffmpeg',
'-r',str(framerate),
'-b', str(bitrate),
'-i',
os.path.join(tempdir,
'%'+'0%id.jpg' % file_length),
'-y',
filename])
if thumbnail:
thumbnameTemp= re.split(r'\.',filename)
thumbnameTemp= thumbnameTemp[0:len(thumbnameTemp)-1]
thumbname= ''
for t in thumbnameTemp:
thumbname+= t
thumbname+= '.jpg'
subprocess.check_call(['ffmpeg',
'-itsoffset','-4','-y',
'-i',filename,
'-vcodec',
'mjpeg',
'-vframes','1',
'-an',
'-f',
'rawvideo',
'-s', '%ix%i' % (thumbsize,thumbsize),
thumbname])
except subprocess.CalledProcessError:
print("'ffmpeg' failed")
_cleanupMovieTempdir(tempdir)
raise subprocess.CalledProcessError
finally:
_cleanupMovieTempdir(tempdir) | NAME:
snapshotToMovie
PURPOSE:
turn a list of snapshots into a movie
INPUT:
snap - the snapshots (list)
filename - name of the file to save the movie to
framerate= in fps
bitrate= ?
thumbnail=False : create thumbnail image (filename-extension+.jpg)
thumbsize= size of thumbnail
+Snapshot.plot args and kwargs
OUTPUT:
movie is saved to file
DEPENDENCIES:
this procedure uses ffmpeg and convert
BUGS:
matplotlib's 'Agg' backend has a memory leak that prevents it from
creating hundred's of figures. It is recommended to call
import matplotlib
matplotlib.use('PDF')
at the beginning of the movie creating script as the PDF backend does
not have the same memory leak.
HISTORY:
2011-02-06 - Written - Bovy (NYU) |
def cp(source, bucket, checksum, key_prefix):
"""Create new bucket from all files in directory."""
from .models import Bucket
from .helpers import populate_from_path
for object_version in populate_from_path(
Bucket.get(bucket), source, checksum=checksum,
key_prefix=key_prefix):
click.secho(str(object_version))
db.session.commit() | Create new bucket from all files in directory. |
def parse_release_id(release_id):
"""
Parse release_id to parts:
{short, version, type}
or
{short, version, type, bp_short, bp_version, bp_type}
:param release_id: Release ID string
:type release_id: str
:rtype: dict
"""
if "@" in release_id:
release, base_product = release_id.split("@")
else:
release = release_id
base_product = None
result = _parse_release_id_part(release)
if base_product is not None:
result.update(_parse_release_id_part(base_product, prefix="bp_"))
return result | Parse release_id to parts:
{short, version, type}
or
{short, version, type, bp_short, bp_version, bp_type}
:param release_id: Release ID string
:type release_id: str
:rtype: dict |
def member(Imported, **Config):
r"""Helps with adding imported members to Scripts.
Note:
Config depends upon the Imported. It could be that of a **task** or a **group**.
"""
__ec_member__ = Imported.__ec_member__
__ec_member__.Config.update(**Config)
state.ActiveModuleMemberQ.insert(0, __ec_member__) | r"""Helps with adding imported members to Scripts.
Note:
Config depends upon the Imported. It could be that of a **task** or a **group**. |
def dumps(data, escape=False, **kwargs):
"""A wrapper around `json.dumps` that can handle objects that json
module is not aware.
This function is aware of a list of custom serializers that can be
registered by the API user, making it possible to convert any kind
of object to types that the json library can handle.
"""
if 'sort_keys' not in kwargs:
kwargs['sort_keys'] = True
converted = json.dumps(data, default=_converter, **kwargs)
if escape:
# We're escaping the whole dumped string here cause there's no (easy)
# way to hook into the native json library and change how they process
# values like strings, None objects and some other "literal" stuff.
#
# Also, we're not escaping quotes here cause they're escaped by the
# native json library already. So, we just escape basic html entities,
# like <, > and &;
return cgi.escape(converted)
return converted | A wrapper around `json.dumps` that can handle objects that json
module is not aware.
This function is aware of a list of custom serializers that can be
registered by the API user, making it possible to convert any kind
of object to types that the json library can handle. |
def keys(self, name_start, name_end, limit=10):
"""
Return a list of the top ``limit`` keys between ``name_start`` and
``name_end``
Similiar with **Redis.KEYS**
.. note:: The range is (``name_start``, ``name_end``]. ``name_start``
isn't in the range, but ``name_end`` is.
:param string name_start: The lower bound(not included) of keys to be
returned, empty string ``''`` means -inf
:param string name_end: The upper bound(included) of keys to be
returned, empty string ``''`` means +inf
:param int limit: number of elements will be returned.
:return: a list of keys
:rtype: list
>>> ssdb.keys('set_x1', 'set_x3', 10)
['set_x2', 'set_x3']
>>> ssdb.keys('set_x ', 'set_xx', 3)
['set_x1', 'set_x2', 'set_x3']
>>> ssdb.keys('set_x ', '', 3)
['set_x1', 'set_x2', 'set_x3', 'set_x4']
>>> ssdb.keys('set_zzzzz ', '', )
[]
"""
limit = get_positive_integer('limit', limit)
return self.execute_command('keys', name_start, name_end, limit) | Return a list of the top ``limit`` keys between ``name_start`` and
``name_end``
Similiar with **Redis.KEYS**
.. note:: The range is (``name_start``, ``name_end``]. ``name_start``
isn't in the range, but ``name_end`` is.
:param string name_start: The lower bound(not included) of keys to be
returned, empty string ``''`` means -inf
:param string name_end: The upper bound(included) of keys to be
returned, empty string ``''`` means +inf
:param int limit: number of elements will be returned.
:return: a list of keys
:rtype: list
>>> ssdb.keys('set_x1', 'set_x3', 10)
['set_x2', 'set_x3']
>>> ssdb.keys('set_x ', 'set_xx', 3)
['set_x1', 'set_x2', 'set_x3']
>>> ssdb.keys('set_x ', '', 3)
['set_x1', 'set_x2', 'set_x3', 'set_x4']
>>> ssdb.keys('set_zzzzz ', '', )
[] |
def decrypt(self, key, data, mode, padding):
# pylint: disable=unused-argument,no-self-use
"""Decrypt data using the supplied values.
:param bytes key: Loaded decryption key
:param bytes data: IV prepended to encrypted data
:param JavaMode mode: Decryption mode to use (not used by :class:`JavaAsymmetricEncryptionAlgorithm`)
:param JavaPadding padding: Padding mode to use
:returns: Decrypted data
:rtype: bytes
"""
if hasattr(key, "public_bytes"):
raise NotImplementedError('"decrypt" is not supported by public keys')
try:
return key.decrypt(data, padding.build())
except Exception:
error_message = "Decryption failed"
_LOGGER.exception(error_message)
raise DecryptionError(error_message) | Decrypt data using the supplied values.
:param bytes key: Loaded decryption key
:param bytes data: IV prepended to encrypted data
:param JavaMode mode: Decryption mode to use (not used by :class:`JavaAsymmetricEncryptionAlgorithm`)
:param JavaPadding padding: Padding mode to use
:returns: Decrypted data
:rtype: bytes |
def _choose_random_direction(current_state_parts, batch_rank, seed=None):
"""Chooses a random direction in the event space."""
seed_gen = distributions.SeedStream(seed, salt='_choose_random_direction')
# Chooses the random directions across each of the input components.
rnd_direction_parts = [
tf.random.normal(
tf.shape(input=current_state_part), dtype=tf.float32, seed=seed_gen())
for current_state_part in current_state_parts
]
# Sum squares over all of the input components. Note this takes all
# components into account.
sum_squares = sum(
tf.reduce_sum(
input_tensor=rnd_direction**2.,
axis=tf.range(batch_rank, tf.rank(rnd_direction)),
keepdims=True) for rnd_direction in rnd_direction_parts)
# Normalizes the random direction fragments.
rnd_direction_parts = [rnd_direction / tf.sqrt(sum_squares)
for rnd_direction in rnd_direction_parts]
return rnd_direction_parts | Chooses a random direction in the event space. |
def data(self):
"""The data dictionary for this entity.
"""
return self.model.state.entity_data(
self.entity_type, self.entity_id, self._history_index) | The data dictionary for this entity. |
def add_metric(self, labels, value, created=None, timestamp=None):
"""Add a metric to the metric family.
Args:
labels: A list of label values
value: The value of the metric
created: Optional unix timestamp the child was created at.
"""
self.samples.append(Sample(self.name + '_total', dict(zip(self._labelnames, labels)), value, timestamp))
if created is not None:
self.samples.append(Sample(self.name + '_created', dict(zip(self._labelnames, labels)), created, timestamp)) | Add a metric to the metric family.
Args:
labels: A list of label values
value: The value of the metric
created: Optional unix timestamp the child was created at. |
def silence(cls, *modules, **kwargs):
"""
Args:
*modules: Modules, or names of modules to silence (by setting their log level to WARNING or above)
**kwargs: Pass as kwargs due to python 2.7, would be level=logging.WARNING otherwise
"""
level = kwargs.pop("level", logging.WARNING)
for mod in modules:
name = mod.__name__ if hasattr(mod, "__name__") else mod
logging.getLogger(name).setLevel(level) | Args:
*modules: Modules, or names of modules to silence (by setting their log level to WARNING or above)
**kwargs: Pass as kwargs due to python 2.7, would be level=logging.WARNING otherwise |
def get_state_in_ec_string(self, ec_index, add_colour=True):
'''Get the state of the component in an execution context as a string.
@param ec_index The index of the execution context to check the state
in. This index is into the total array of contexts,
that is both owned and participating contexts. If the
value of ec_index is greater than the length of @ref
owned_ecs, that length is subtracted from ec_index and
the result used as an index into @ref
participating_ecs.
'''
with self._mutex:
if ec_index >= len(self.owned_ecs):
ec_index -= len(self.owned_ecs)
if ec_index >= len(self.participating_ecs):
raise exceptions.BadECIndexError(ec_index)
state = self.participating_ec_states[ec_index]
else:
state = self.owned_ec_states[ec_index]
if state == self.INACTIVE:
result = 'Inactive', ['bold', 'blue']
elif state == self.ACTIVE:
result = 'Active', ['bold', 'green']
elif state == self.ERROR:
result = 'Error', ['bold', 'white', 'bgred']
elif state == self.UNKNOWN:
result = 'Unknown', ['bold', 'red']
elif state == self.CREATED:
result = 'Created', ['reset']
if add_colour:
return utils.build_attr_string(result[1], supported=add_colour) + \
result[0] + utils.build_attr_string('reset',
supported=add_colour)
else:
return result[0] | Get the state of the component in an execution context as a string.
@param ec_index The index of the execution context to check the state
in. This index is into the total array of contexts,
that is both owned and participating contexts. If the
value of ec_index is greater than the length of @ref
owned_ecs, that length is subtracted from ec_index and
the result used as an index into @ref
participating_ecs. |
def purge_docs(cls, app, env, docname): # pragma: no cover
"""Handler for Sphinx's env-purge-doc event.
This event is emitted when all traces of a source file should be cleaned
from the environment (that is, if the source file is removed, or before
it is freshly read). This is for extensions that keep their own caches
in attributes of the environment.
For example, there is a cache of all modules on the environment. When a
source file has been changed, the cache's entries for the file are
cleared, since the module declarations could have been removed from the
file.
"""
state = getattr(env, cls.directive_name, None)
if state and docname in state.doc_names:
state.doc_names.remove(docname) | Handler for Sphinx's env-purge-doc event.
This event is emitted when all traces of a source file should be cleaned
from the environment (that is, if the source file is removed, or before
it is freshly read). This is for extensions that keep their own caches
in attributes of the environment.
For example, there is a cache of all modules on the environment. When a
source file has been changed, the cache's entries for the file are
cleared, since the module declarations could have been removed from the
file. |
def remove_tags(self, server, tags):
"""
Remove tags from a server.
- server: Server object or UUID string
- tags: list of Tag objects or strings
"""
uuid = str(server)
tags = [str(tag) for tag in tags]
url = '/server/{0}/untag/{1}'.format(uuid, ','.join(tags))
return self.post_request(url) | Remove tags from a server.
- server: Server object or UUID string
- tags: list of Tag objects or strings |
def add_template_filter(self, func: Callable, name: Optional[str]=None) -> None:
"""Add a template filter.
This is designed to be used on the application directly. An
example usage,
.. code-block:: python
def to_upper(value):
return value.upper()
app.add_template_filter(to_upper)
Arguments:
func: The function that is the filter.
name: The filter name (defaults to function name).
"""
self.jinja_env.filters[name or func.__name__] = func | Add a template filter.
This is designed to be used on the application directly. An
example usage,
.. code-block:: python
def to_upper(value):
return value.upper()
app.add_template_filter(to_upper)
Arguments:
func: The function that is the filter.
name: The filter name (defaults to function name). |
def update(self, date, data=None, inow=None):
"""
Update strategy. Updates prices, values, weight, etc.
"""
# resolve stale state
self.root.stale = False
# update helpers on date change
# also set newpt flag
newpt = False
if self.now == 0:
newpt = True
elif date != self.now:
self._net_flows = 0
self._last_price = self._price
self._last_value = self._value
self._last_fee = 0.0
newpt = True
# update now
self.now = date
if inow is None:
if self.now == 0:
inow = 0
else:
inow = self.data.index.get_loc(date)
# update children if any and calculate value
val = self._capital # default if no children
if self.children is not None:
for c in self._childrenv:
# avoid useless update call
if c._issec and not c._needupdate:
continue
c.update(date, data, inow)
val += c.value
if self.root == self:
if (val < 0) and not self.bankrupt:
# Declare a bankruptcy
self.bankrupt = True
self.flatten()
# update data if this value is different or
# if now has changed - avoid all this if not since it
# won't change
if newpt or self._value != val:
self._value = val
self._values.values[inow] = val
bottom = self._last_value + self._net_flows
if bottom != 0:
ret = self._value / (self._last_value + self._net_flows) - 1
else:
if self._value == 0:
ret = 0
else:
raise ZeroDivisionError(
'Could not update %s. Last value '
'was %s and net flows were %s. Current'
'value is %s. Therefore, '
'we are dividing by zero to obtain the return '
'for the period.' % (self.name,
self._last_value,
self._net_flows,
self._value))
self._price = self._last_price * (1 + ret)
self._prices.values[inow] = self._price
# update children weights
if self.children is not None:
for c in self._childrenv:
# avoid useless update call
if c._issec and not c._needupdate:
continue
if val != 0:
c._weight = c.value / val
else:
c._weight = 0.0
# if we have strategy children, we will need to update them in universe
if self._has_strat_children:
for c in self._strat_children:
# TODO: optimize ".loc" here as well
self._universe.loc[date, c] = self.children[c].price
# Cash should track the unallocated capital at the end of the day, so
# we should update it every time we call "update".
# Same for fees
self._cash.values[inow] = self._capital
self._fees.values[inow] = self._last_fee
# update paper trade if necessary
if newpt and self._paper_trade:
self._paper.update(date)
self._paper.run()
self._paper.update(date)
# update price
self._price = self._paper.price
self._prices.values[inow] = self._price | Update strategy. Updates prices, values, weight, etc. |
def _load_plt(self, filename):
"""Initialize Grid from gOpenMol plt file."""
g = gOpenMol.Plt()
g.read(filename)
grid, edges = g.histogramdd()
self.__init__(grid=grid, edges=edges, metadata=self.metadata) | Initialize Grid from gOpenMol plt file. |
def push(self, remote, branch=None):
'''Push a repository
:param remote: git-remote instance
:param branch: name of the branch to push
:return: PushInfo, git push output lines
'''
pb = ProgressBar()
pb.setup(self.name, ProgressBar.Action.PUSH)
if branch:
result = remote.push(branch, progress=pb)
else: #pragma: no cover
result = remote.push(progress=pb)
print()
return result, pb.other_lines | Push a repository
:param remote: git-remote instance
:param branch: name of the branch to push
:return: PushInfo, git push output lines |
def scroll_one_line_up(event):
"""
scroll_offset -= 1
"""
w = find_window_for_buffer_name(event.cli, event.cli.current_buffer_name)
b = event.cli.current_buffer
if w:
# When the cursor is at the bottom, move to the previous line. (Otherwise, only scroll.)
if w.render_info:
info = w.render_info
if w.vertical_scroll > 0:
first_line_height = info.get_height_for_line(info.first_visible_line())
cursor_up = info.cursor_position.y - (info.window_height - 1 - first_line_height -
info.configured_scroll_offsets.bottom)
# Move cursor up, as many steps as the height of the first line.
# TODO: not entirely correct yet, in case of line wrapping and many long lines.
for _ in range(max(0, cursor_up)):
b.cursor_position += b.document.get_cursor_up_position()
# Scroll window
w.vertical_scroll -= 1 | scroll_offset -= 1 |
def _iter_interleaved_items(self, elements):
"""Generate element or subtotal items in interleaved order.
This ordering corresponds to how value "rows" (or columns) are to
appear after subtotals have been inserted at their anchor locations.
Where more than one subtotal is anchored to the same location, they
appear in their document order in the cube response.
Only elements in the passed *elements* collection appear, which
allows control over whether missing elements are included by choosing
`.all_elements` or `.valid_elements`.
"""
subtotals = self._subtotals
for subtotal in subtotals.iter_for_anchor("top"):
yield subtotal
for element in elements:
yield element
for subtotal in subtotals.iter_for_anchor(element.element_id):
yield subtotal
for subtotal in subtotals.iter_for_anchor("bottom"):
yield subtotal | Generate element or subtotal items in interleaved order.
This ordering corresponds to how value "rows" (or columns) are to
appear after subtotals have been inserted at their anchor locations.
Where more than one subtotal is anchored to the same location, they
appear in their document order in the cube response.
Only elements in the passed *elements* collection appear, which
allows control over whether missing elements are included by choosing
`.all_elements` or `.valid_elements`. |
def parse_component_reference(self, node):
"""
Parses <ComponentReference>
@param node: Node containing the <ComponentTypeRef> element
@type node: xml.etree.Element
"""
if 'name' in node.lattrib:
name = node.lattrib['name']
else:
self.raise_error('<ComponentReference> must specify a name for the ' +
'reference.')
if 'type' in node.lattrib:
type_ = node.lattrib['type']
else:
self.raise_error('<ComponentReference> must specify a type for the ' +
'reference.')
if 'local' in node.lattrib:
local = node.lattrib['local']
else:
local = None
self.current_component_type.add_component_reference(ComponentReference(name, type_, local)) | Parses <ComponentReference>
@param node: Node containing the <ComponentTypeRef> element
@type node: xml.etree.Element |
def qos_map_cos_mutation_cos5(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
qos = ET.SubElement(config, "qos", xmlns="urn:brocade.com:mgmt:brocade-qos")
map = ET.SubElement(qos, "map")
cos_mutation = ET.SubElement(map, "cos-mutation")
name_key = ET.SubElement(cos_mutation, "name")
name_key.text = kwargs.pop('name')
cos5 = ET.SubElement(cos_mutation, "cos5")
cos5.text = kwargs.pop('cos5')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def findunique(lst, key):
"""
Find all unique key values for items in lst.
Parameters
----------
lst: list
A list of composite dictionaries e.g. ``layers``, ``classes``
key: string
The key name to search each dictionary in the list
Returns
-------
list
A sorted Python list of unique keys in the list
Example
-------
To find all ``GROUP`` values for ``CLASS`` in a ``LAYER``::
s = '''
LAYER
CLASS
GROUP "group1"
NAME "Class1"
COLOR 0 0 0
END
CLASS
GROUP "group2"
NAME "Class2"
COLOR 0 0 0
END
CLASS
GROUP "group1"
NAME "Class3"
COLOR 0 0 0
END
END
'''
d = mappyfile.loads(s)
groups = mappyfile.findunique(d["classes"], "group")
assert groups == ["group1", "group2"]
"""
return sorted(set([item[key.lower()] for item in lst])) | Find all unique key values for items in lst.
Parameters
----------
lst: list
A list of composite dictionaries e.g. ``layers``, ``classes``
key: string
The key name to search each dictionary in the list
Returns
-------
list
A sorted Python list of unique keys in the list
Example
-------
To find all ``GROUP`` values for ``CLASS`` in a ``LAYER``::
s = '''
LAYER
CLASS
GROUP "group1"
NAME "Class1"
COLOR 0 0 0
END
CLASS
GROUP "group2"
NAME "Class2"
COLOR 0 0 0
END
CLASS
GROUP "group1"
NAME "Class3"
COLOR 0 0 0
END
END
'''
d = mappyfile.loads(s)
groups = mappyfile.findunique(d["classes"], "group")
assert groups == ["group1", "group2"] |
def force_clean(self, remove_rw=False, allow_lazy=False, retries=5, sleep_interval=0.5):
"""Attempts to call the clean method, but will retry automatically if an error is raised. When the attempts
run out, it will raise the last error.
Note that the method will only catch :class:`ImageMounterError` exceptions.
:param bool remove_rw: indicates whether a read-write cache should be removed
:param bool allow_lazy: indicates whether lazy unmounting is allowed
:param retries: Maximum amount of retries while unmounting
:param sleep_interval: The sleep interval between attempts.
:raises SubsystemError: when one of the underlying commands fails. Some are swallowed.
:raises CleanupError: when actual cleanup fails. Some are swallowed.
"""
while True:
try:
self.clean(remove_rw=remove_rw, allow_lazy=allow_lazy)
except ImageMounterError:
if retries == 0:
raise
retries -= 1
time.sleep(sleep_interval)
else:
return | Attempts to call the clean method, but will retry automatically if an error is raised. When the attempts
run out, it will raise the last error.
Note that the method will only catch :class:`ImageMounterError` exceptions.
:param bool remove_rw: indicates whether a read-write cache should be removed
:param bool allow_lazy: indicates whether lazy unmounting is allowed
:param retries: Maximum amount of retries while unmounting
:param sleep_interval: The sleep interval between attempts.
:raises SubsystemError: when one of the underlying commands fails. Some are swallowed.
:raises CleanupError: when actual cleanup fails. Some are swallowed. |
def generate_iv_for_export(self, client_random, server_random,
con_end, read_or_write, req_len):
"""
Generate IV for EXPORT ciphersuite, i.e. weakens it.
An export IV generation example is given in section 6.3.1 of RFC 2246.
See also page 86 of EKR's book.
"""
s = con_end + read_or_write
s = (s == "clientwrite" or s == "serverread")
if self.tls_version < 0x0300:
return None
elif self.tls_version == 0x0300:
if s:
tbh = client_random + server_random
else:
tbh = server_random + client_random
iv = _tls_hash_algs["MD5"]().digest(tbh)[:req_len]
else:
iv_block = self.prf("",
b"IV block",
client_random + server_random,
2 * req_len)
if s:
iv = iv_block[:req_len]
else:
iv = iv_block[req_len:]
return iv | Generate IV for EXPORT ciphersuite, i.e. weakens it.
An export IV generation example is given in section 6.3.1 of RFC 2246.
See also page 86 of EKR's book. |
def run(self):
"""Create a type list."""
config = self.state.document.settings.env.config
# Group processes by category
processes = get_processes(config.autoprocess_process_dir, config.autoprocess_source_base_url)
processes.sort(key=itemgetter('type'))
processes_by_types = {k: list(g) for k, g in groupby(processes, itemgetter('type'))}
listnode = nodes.bullet_list()
for typ in sorted(processes_by_types.keys()):
par = nodes.paragraph()
par += nodes.literal(typ, typ)
par += nodes.Text(' - ')
processes = sorted(processes_by_types[typ], key=itemgetter('name'))
last_process = processes[-1]
for process in processes:
node = nodes.reference('', process['name'], internal=True)
node['refuri'] = config.autoprocess_definitions_uri + '#process-' + process['slug']
node['reftitle'] = process['name']
par += node
if process != last_process:
par += nodes.Text(', ')
listnode += nodes.list_item('', par)
return [listnode] | Create a type list. |
def _debug_dump_dom(el):
"""Debugging helper. Prints out `el` contents."""
import xml.dom.minidom
s = [el.nodeName]
att_container = el.attributes
for i in range(att_container.length):
attr = att_container.item(i)
s.append(' @{a}="{v}"'.format(a=attr.name, v=attr.value))
for c in el.childNodes:
if c.nodeType == xml.dom.minidom.Node.TEXT_NODE:
s.append(' {a} type="TEXT" data="{d}"'.format(a=c.nodeName, d=c.data))
else:
s.append(' {a} child'.format(a=c.nodeName))
return '\n'.join(s) | Debugging helper. Prints out `el` contents. |
def analyze(self, text, tokenizer=str.split):
"""Analyze text and return pretty format.
Args:
text: string, the input text.
tokenizer: Tokenize input sentence. Default tokenizer is `str.split`.
Returns:
res: dict.
"""
if not self.tagger:
self.tagger = Tagger(self.model,
preprocessor=self.p,
tokenizer=tokenizer)
return self.tagger.analyze(text) | Analyze text and return pretty format.
Args:
text: string, the input text.
tokenizer: Tokenize input sentence. Default tokenizer is `str.split`.
Returns:
res: dict. |
def projective_measurement_constraints(*parties):
"""Return a set of constraints that define projective measurements.
:param parties: Measurements of different parties.
:type A: list or tuple of list of list of
:class:`sympy.physics.quantum.operator.HermitianOperator`.
:returns: substitutions containing idempotency, orthogonality and
commutation relations.
"""
substitutions = {}
# Idempotency and orthogonality of projectors
if isinstance(parties[0][0][0], list):
parties = parties[0]
for party in parties:
for measurement in party:
for projector1 in measurement:
for projector2 in measurement:
if projector1 == projector2:
substitutions[projector1**2] = projector1
else:
substitutions[projector1*projector2] = 0
substitutions[projector2*projector1] = 0
# Projectors commute between parties in a partition
for n1 in range(len(parties)):
for n2 in range(n1+1, len(parties)):
for measurement1 in parties[n1]:
for measurement2 in parties[n2]:
for projector1 in measurement1:
for projector2 in measurement2:
substitutions[projector2*projector1] = \
projector1*projector2
return substitutions | Return a set of constraints that define projective measurements.
:param parties: Measurements of different parties.
:type A: list or tuple of list of list of
:class:`sympy.physics.quantum.operator.HermitianOperator`.
:returns: substitutions containing idempotency, orthogonality and
commutation relations. |
def correction(self, word):
""" The most probable correct spelling for the word
Args:
word (str): The word to correct
Returns:
str: The most likely candidate """
return max(self.candidates(word), key=self.word_probability) | The most probable correct spelling for the word
Args:
word (str): The word to correct
Returns:
str: The most likely candidate |
def Read(self, timeout=None):
'''
Reads the context menu
:param timeout: Optional. Any value other than None indicates a non-blocking read
:return:
'''
if not self.Shown:
self.Shown = True
self.TrayIcon.show()
if timeout is None:
self.App.exec_()
elif timeout == 0:
self.App.processEvents()
else:
self.timer = start_systray_read_timer(self, timeout)
self.App.exec_()
if self.timer:
stop_timer(self.timer)
item = self.MenuItemChosen
self.MenuItemChosen = TIMEOUT_KEY
return item | Reads the context menu
:param timeout: Optional. Any value other than None indicates a non-blocking read
:return: |
def system_info(query):
"""system_info(query) -- print system specific information like OS, kernel,
architecture etc.
"""
proc = subprocess.Popen(["uname -o"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "operating system : "+str(out),
proc = subprocess.Popen(["uname"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "kernel : "+str(out),
proc = subprocess.Popen(["uname -r"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "kernel release : "+str(out),
proc = subprocess.Popen(["uname -m"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "architecture : "+str(out),
proc = subprocess.Popen(["uname -n"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "network node name : "+str(out), | system_info(query) -- print system specific information like OS, kernel,
architecture etc. |
def put(self, key, value, cache=None, options={}):
"""Query the server to set the key specified to the value specified in
the specified cache.
Keyword arguments:
key -- the name of the key to be set. Required.
value -- the value to set key to. Must be a string or JSON
serialisable. Required.
cache -- the cache to store the item in. Defaults to None, which uses
self.name. If no name is set, raises a ValueError.
options -- a dict of arguments to send with the request. See
http://dev.iron.io/cache/reference/api/#put_item for more
information on defaults and possible values.
"""
if cache is None:
cache = self.name
if cache is None:
raise ValueError("Cache name must be set")
if not isinstance(value, str_type) and not isinstance(value, int_types):
value = json.dumps(value)
options["value"] = value
body = json.dumps(options)
cache = quote_plus(cache)
key = quote_plus(key)
result = self.client.put("caches/%s/items/%s" % (cache, key), body,
{"Content-Type": "application/json"})
return Item(cache=cache, key=key, value=value) | Query the server to set the key specified to the value specified in
the specified cache.
Keyword arguments:
key -- the name of the key to be set. Required.
value -- the value to set key to. Must be a string or JSON
serialisable. Required.
cache -- the cache to store the item in. Defaults to None, which uses
self.name. If no name is set, raises a ValueError.
options -- a dict of arguments to send with the request. See
http://dev.iron.io/cache/reference/api/#put_item for more
information on defaults and possible values. |
def analyze_theory(V, x0list=[], plot=False):
""" Extract ground-state energy E0 and psi**2 for potential V. """
# initialize path integral
T = 4.
ndT = 8. # use larger ndT to reduce discretization error (goes like 1/ndT**2)
neval = 3e5 # should probably use more evaluations (10x?)
nitn = 6
alpha = 0.1 # damp adaptation
# create integrator and train it (no x0list)
integrand = PathIntegrand(V=V, T=T, ndT=ndT)
integ = vegas.Integrator(integrand.region, alpha=alpha)
integ(integrand, neval=neval, nitn=nitn / 2, alpha=2 * alpha)
# evaluate path integral with trained integrator and x0list
integrand = PathIntegrand(V=V, x0list=x0list, T=T, ndT=ndT)
results = integ(integrand, neval=neval, nitn=nitn, alpha=alpha)
print(results.summary())
E0 = -np.log(results['exp(-E0*T)']) / T
print('Ground-state energy = %s Q = %.2f\n' % (E0, results.Q))
if len(x0list) <= 0:
return E0
psi2 = results['exp(-E0*T) * psi(x0)**2'] / results['exp(-E0*T)']
print('%5s %-12s %-10s' % ('x', 'psi**2', 'sho-exact'))
print(27 * '-')
for i, (x0i, psi2i) in enumerate(zip(x0list, psi2)):
exact = np.exp(- x0i ** 2) / np.sqrt(np.pi) #* np.exp(-T / 2.)
print(
"%5.1f %-12s %-10.5f"
% (x0i, psi2i, exact)
)
if plot:
plot_results(E0, x0list, psi2, T)
return E0 | Extract ground-state energy E0 and psi**2 for potential V. |
def makeLinearxFunc(self,mLvl,pLvl,MedShk,xLvl):
'''
Constructs the (unconstrained) expenditure function for this period using
bilinear interpolation (over permanent income and the medical shock) among
an array of linear interpolations over market resources.
Parameters
----------
mLvl : np.array
Corresponding market resource points for interpolation.
pLvl : np.array
Corresponding permanent income level points for interpolation.
MedShk : np.array
Corresponding medical need shocks for interpolation.
xLvl : np.array
Expenditure points for interpolation, corresponding to those in mLvl,
pLvl, and MedShk.
Returns
-------
xFuncUnc : BilinearInterpOnInterp1D
Unconstrained total expenditure function for this period.
'''
# Get state dimensions
pCount = mLvl.shape[1]
MedCount = mLvl.shape[0]
# Loop over each permanent income level and medical shock and make a linear xFunc
xFunc_by_pLvl_and_MedShk = [] # Initialize the empty list of lists of 1D xFuncs
for i in range(pCount):
temp_list = []
pLvl_i = pLvl[0,i,0]
mLvlMin_i = self.BoroCnstNat(pLvl_i)
for j in range(MedCount):
m_temp = mLvl[j,i,:] - mLvlMin_i
x_temp = xLvl[j,i,:]
temp_list.append(LinearInterp(m_temp,x_temp))
xFunc_by_pLvl_and_MedShk.append(deepcopy(temp_list))
# Combine the nested list of linear xFuncs into a single function
pLvl_temp = pLvl[0,:,0]
MedShk_temp = MedShk[:,0,0]
xFuncUncBase = BilinearInterpOnInterp1D(xFunc_by_pLvl_and_MedShk,pLvl_temp,MedShk_temp)
xFuncUnc = VariableLowerBoundFunc3D(xFuncUncBase,self.BoroCnstNat)
return xFuncUnc | Constructs the (unconstrained) expenditure function for this period using
bilinear interpolation (over permanent income and the medical shock) among
an array of linear interpolations over market resources.
Parameters
----------
mLvl : np.array
Corresponding market resource points for interpolation.
pLvl : np.array
Corresponding permanent income level points for interpolation.
MedShk : np.array
Corresponding medical need shocks for interpolation.
xLvl : np.array
Expenditure points for interpolation, corresponding to those in mLvl,
pLvl, and MedShk.
Returns
-------
xFuncUnc : BilinearInterpOnInterp1D
Unconstrained total expenditure function for this period. |
def split_by_percent(self, spin_systems_list):
"""Split list of spin systems by specified percentages.
:param list spin_systems_list: List of spin systems.
:return: List of spin systems divided into sub-lists corresponding to specified split percentages.
:rtype: :py:class:`list`
"""
chunk_sizes = [int((i*len(spin_systems_list))/100) for i in self.plsplit]
if sum(chunk_sizes) < len(spin_systems_list):
difference = len(spin_systems_list) - sum(chunk_sizes)
chunk_sizes[chunk_sizes.index(min(chunk_sizes))] += difference
assert sum(chunk_sizes) == len(spin_systems_list), \
"sum of chunk sizes must be equal to spin systems list length."
intervals = self.calculate_intervals(chunk_sizes)
chunks_of_spin_systems_by_percentage = [itertools.islice(spin_systems_list, *interval) for interval in intervals]
return chunks_of_spin_systems_by_percentage | Split list of spin systems by specified percentages.
:param list spin_systems_list: List of spin systems.
:return: List of spin systems divided into sub-lists corresponding to specified split percentages.
:rtype: :py:class:`list` |
def forward_committor(T, A, B):
r"""Forward committor between given sets.
The forward committor u(x) between sets A and B is the probability
for the chain starting in x to reach B before reaching A.
Parameters
----------
T : (M, M) scipy.sparse matrix
Transition matrix
A : array_like
List of integer state labels for set A
B : array_like
List of integer state labels for set B
Returns
-------
u : (M, ) ndarray
Vector of forward committor probabilities
Notes
-----
The forward committor is a solution to the following
boundary-value problem
.. math::
\sum_j L_{ij} u_{j}=0 for i in X\(A u B) (I)
u_{i}=0 for i \in A (II)
u_{i}=1 for i \in B (III)
with generator matrix L=(P-I).
"""
X = set(range(T.shape[0]))
A = set(A)
B = set(B)
AB = A.intersection(B)
notAB = X.difference(A).difference(B)
if len(AB) > 0:
raise ValueError("Sets A and B have to be disjoint")
L = T - eye(T.shape[0], T.shape[0])
"""Assemble left hand-side W for linear system"""
"""Equation (I)"""
W = 1.0 * L
"""Equation (II)"""
W = W.todok()
W[list(A), :] = 0.0
W.tocsr()
W = W + coo_matrix((np.ones(len(A)), (list(A), list(A))), shape=W.shape).tocsr()
"""Equation (III)"""
W = W.todok()
W[list(B), :] = 0.0
W.tocsr()
W = W + coo_matrix((np.ones(len(B)), (list(B), list(B))), shape=W.shape).tocsr()
"""Assemble right hand side r for linear system"""
"""Equation (I+II)"""
r = np.zeros(T.shape[0])
"""Equation (III)"""
r[list(B)] = 1.0
u = spsolve(W, r)
return u | r"""Forward committor between given sets.
The forward committor u(x) between sets A and B is the probability
for the chain starting in x to reach B before reaching A.
Parameters
----------
T : (M, M) scipy.sparse matrix
Transition matrix
A : array_like
List of integer state labels for set A
B : array_like
List of integer state labels for set B
Returns
-------
u : (M, ) ndarray
Vector of forward committor probabilities
Notes
-----
The forward committor is a solution to the following
boundary-value problem
.. math::
\sum_j L_{ij} u_{j}=0 for i in X\(A u B) (I)
u_{i}=0 for i \in A (II)
u_{i}=1 for i \in B (III)
with generator matrix L=(P-I). |
def load_from_file(self, yamlfile, _override=True, _allow_undeclared=False):
"""Loads the configuration from a file.
Parsed contents must be a single dict mapping config key to value.
Args:
yamlfile: The opened file object to load configuration from.
See load_from_dict() for other args' descriptions.
Raises:
ConfigurationInvalidError: If configuration file can't be read, or can't
be parsed as either YAML (or JSON, which is a subset of YAML).
"""
self._logger.info('Loading configuration from file: %s', yamlfile)
try:
parsed_yaml = self._modules['yaml'].safe_load(yamlfile.read())
except self._modules['yaml'].YAMLError:
self._logger.exception('Problem parsing YAML')
raise self.ConfigurationInvalidError(
'Failed to load from %s as YAML' % yamlfile)
if not isinstance(parsed_yaml, dict):
# Parsed YAML, but it's not a dict.
raise self.ConfigurationInvalidError(
'YAML parsed, but wrong type, should be dict', parsed_yaml)
self._logger.debug('Configuration loaded from file: %s', parsed_yaml)
self.load_from_dict(
parsed_yaml, _override=_override, _allow_undeclared=_allow_undeclared) | Loads the configuration from a file.
Parsed contents must be a single dict mapping config key to value.
Args:
yamlfile: The opened file object to load configuration from.
See load_from_dict() for other args' descriptions.
Raises:
ConfigurationInvalidError: If configuration file can't be read, or can't
be parsed as either YAML (or JSON, which is a subset of YAML). |
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the base estimators in the
ensemble. If base estimators do not implement a ``predict_proba``
method, then it resorts to voting and the predicted class probabilities
of a an input sample represents the proportion of estimators predicting
each class.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
p : array of shape = [n_samples, n_classes]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
# Check data
# X = check_array(X, accept_sparse=['csr', 'csc', 'coo']) # Dont in version 0.15
if self.n_features_ != X.shape[1]:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is {0} and "
"input n_features is {1}."
"".format(self.n_features_, X.shape[1]))
# Parallel loop
n_jobs, n_estimators, starts = _partition_estimators(self.n_estimators, self.n_jobs)
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_predict_proba)(
self.estimators_[starts[i]:starts[i + 1]],
self.estimators_features_[starts[i]:starts[i + 1]],
X,
self.n_classes_,
self.combination,
self.estimators_weight_[starts[i]:starts[i + 1]])
for i in range(n_jobs))
# Reduce
if self.combination in ['majority_voting', 'majority_bmr']:
proba = sum(all_proba) / self.n_estimators
elif self.combination in ['weighted_voting', 'weighted_bmr']:
proba = sum(all_proba)
elif self.combination in ['stacking', 'stacking_proba', 'stacking_bmr', 'stacking_proba_bmr']:
X_stacking = _create_stacking_set(self.estimators_, self.estimators_features_,
self.estimators_weight_, X, self.combination)
proba = self.f_staking.predict_proba(X_stacking)
return proba | Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the base estimators in the
ensemble. If base estimators do not implement a ``predict_proba``
method, then it resorts to voting and the predicted class probabilities
of a an input sample represents the proportion of estimators predicting
each class.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
p : array of shape = [n_samples, n_classes]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`. |
def generateMethods(self):
"""Generate some member functions
"""
for i in range(1, 5):
# adds member function grid_ixi_slot(self)
self.make_grid_slot(i, i)
for cl in self.mvision_classes:
self.make_mvision_slot(cl) | Generate some member functions |
def _generate_signature(self, nonce, method, path, data):
"""Generate the call signature
:param path:
:param data:
:param nonce:
:return: signature string
"""
data_json = ""
endpoint = path
if method == "get":
if data:
query_string = self._get_params_for_sig(data)
endpoint = "{}?{}".format(path, query_string)
elif data:
data_json = compact_json_dict(data)
sig_str = ("{}{}{}{}".format(nonce, method.upper(), endpoint, data_json)).encode('utf-8')
m = hmac.new(self.API_SECRET.encode('utf-8'), sig_str, hashlib.sha256)
return base64.b64encode(m.digest()) | Generate the call signature
:param path:
:param data:
:param nonce:
:return: signature string |
def map_collection(func, collection):
"""
Apply func to each element of a collection, or value of a dictionary.
If the value is not a collection, return it unmodified
"""
datatype = type(collection)
if isinstance(collection, Mapping):
return datatype((key, func(val)) for key, val in collection.items())
if is_string(collection):
return collection
elif isinstance(collection, Iterable):
return datatype(map(func, collection))
else:
return collection | Apply func to each element of a collection, or value of a dictionary.
If the value is not a collection, return it unmodified |
def getfigsize(self, opt):
'''calculate appropriate sizes for the subfigures
'''
if opt.xmin is None:
opt.xmin = self.plotman.grid.grid['x'].min()
if opt.xmax is None:
opt.xmax = self.plotman.grid.grid['x'].max()
if opt.zmin is None:
opt.zmin = self.plotman.grid.grid['z'].min()
if opt.zmax is None:
opt.zmax = self.plotman.grid.grid['z'].max()
if np.abs(opt.zmax - opt.zmin) < np.abs(opt.xmax - opt.xmin):
self.sizex = 2 / 2.54
self.sizez = self.sizex * (
np.abs(opt.zmax - opt.zmin) / np.abs(opt.xmax - opt.xmin))
else:
self.sizez = 2 / 2.54
self.sizex = 0.5 * self.sizez * (
np.abs(opt.xmax - opt.xmin) / np.abs(opt.zmax - opt.zmin))
print('schmal')
# add 1 inch to accommodate colorbar
self.sizex += 4 * .5
self.sizex *= 4
self.sizez *= self.rows
self.sizez += 5 | calculate appropriate sizes for the subfigures |
def __purge():
"""Remove all dead signal receivers from the global receivers collection.
Note:
It is assumed that the caller holds the __lock.
"""
global __receivers
newreceivers = collections.defaultdict(list)
for signal, receivers in six.iteritems(__receivers):
alive = [x for x in receivers if not __is_dead(x)]
newreceivers[signal] = alive
__receivers = newreceivers | Remove all dead signal receivers from the global receivers collection.
Note:
It is assumed that the caller holds the __lock. |
def _fillVolumesAndPaths(self, paths):
""" Fill in paths.
:arg paths: = { Store.Volume: ["linux path",]}
"""
self.diffs = collections.defaultdict((lambda: []))
self.extraKeys = {}
for key in self.bucket.list():
if key.name.startswith(theTrashPrefix):
continue
keyInfo = self._parseKeyName(key.name)
if keyInfo is None:
if key.name[-1:] != '/':
logger.warning("Ignoring '%s' in S3", key.name)
continue
if keyInfo['type'] == 'info':
stream = io.BytesIO()
key.get_contents_to_file(stream)
Store.Volume.readInfo(stream)
continue
if keyInfo['from'] == 'None':
keyInfo['from'] = None
path = self._relativePath("/" + keyInfo['fullpath'])
if path is None:
continue
diff = Store.Diff(self, keyInfo['to'], keyInfo['from'], key.size)
logger.debug("Adding %s in %s", diff, path)
self.diffs[diff.fromVol].append(diff)
paths[diff.toVol].append(path)
self.extraKeys[diff] = path | Fill in paths.
:arg paths: = { Store.Volume: ["linux path",]} |
def fcoe_get_interface_output_fcoe_intf_total_interfaces(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fcoe_get_interface = ET.Element("fcoe_get_interface")
config = fcoe_get_interface
output = ET.SubElement(fcoe_get_interface, "output")
fcoe_intf_total_interfaces = ET.SubElement(output, "fcoe-intf-total-interfaces")
fcoe_intf_total_interfaces.text = kwargs.pop('fcoe_intf_total_interfaces')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def grouper(n, iterable, padvalue=None):
"grouper(3, 'abcdefg', 'x') --> ('a','b','c'), ('d','e','f'), ('g','x','x')"
return zip_longest(*[iter(iterable)]*n, fillvalue=padvalue) | grouper(3, 'abcdefg', 'x') --> ('a','b','c'), ('d','e','f'), ('g','x','x') |
def dependence_plot(ind, shap_values, features, feature_names=None, display_features=None,
interaction_index="auto",
color="#1E88E5", axis_color="#333333", cmap=colors.red_blue,
dot_size=16, x_jitter=0, alpha=1, title=None, xmin=None, xmax=None, show=True):
""" Create a SHAP dependence plot, colored by an interaction feature.
Plots the value of the feature on the x-axis and the SHAP value of the same feature
on the y-axis. This shows how the model depends on the given feature, and is like a
richer extenstion of the classical parital dependence plots. Vertical dispersion of the
data points represents interaction effects. Grey ticks along the y-axis are data
points where the feature's value was NaN.
Parameters
----------
ind : int or string
If this is an int it is the index of the feature to plot. If this is a string it is
either the name of the feature to plot, or it can have the form "rank(int)" to specify
the feature with that rank (ordered by mean absolute SHAP value over all the samples).
shap_values : numpy.array
Matrix of SHAP values (# samples x # features).
features : numpy.array or pandas.DataFrame
Matrix of feature values (# samples x # features).
feature_names : list
Names of the features (length # features).
display_features : numpy.array or pandas.DataFrame
Matrix of feature values for visual display (such as strings instead of coded values).
interaction_index : "auto", None, int, or string
The index of the feature used to color the plot. The name of a feature can also be passed
as a string. If "auto" then shap.common.approximate_interactions is used to pick what
seems to be the strongest interaction (note that to find to true stongest interaction you
need to compute the SHAP interaction values).
x_jitter : float (0 - 1)
Adds random jitter to feature values. May increase plot readability when feature
is discrete.
alpha : float
The transparency of the data points (between 0 and 1). This can be useful to the
show density of the data points when using a large dataset.
xmin : float or string
Represents the lower bound of the plot's x-axis. It can be a string of the format
"percentile(float)" to denote that percentile of the feature's value used on the x-axis.
xmax : float or string
Represents the upper bound of the plot's x-axis. It can be a string of the format
"percentile(float)" to denote that percentile of the feature's value used on the x-axis.
"""
# convert from DataFrames if we got any
if str(type(features)).endswith("'pandas.core.frame.DataFrame'>"):
if feature_names is None:
feature_names = features.columns
features = features.values
if str(type(display_features)).endswith("'pandas.core.frame.DataFrame'>"):
if feature_names is None:
feature_names = display_features.columns
display_features = display_features.values
elif display_features is None:
display_features = features
if feature_names is None:
feature_names = [labels['FEATURE'] % str(i) for i in range(shap_values.shape[1])]
# allow vectors to be passed
if len(shap_values.shape) == 1:
shap_values = np.reshape(shap_values, len(shap_values), 1)
if len(features.shape) == 1:
features = np.reshape(features, len(features), 1)
ind = convert_name(ind, shap_values, feature_names)
# plotting SHAP interaction values
if len(shap_values.shape) == 3 and len(ind) == 2:
ind1 = convert_name(ind[0], shap_values, feature_names)
ind2 = convert_name(ind[1], shap_values, feature_names)
if ind1 == ind2:
proj_shap_values = shap_values[:, ind2, :]
else:
proj_shap_values = shap_values[:, ind2, :] * 2 # off-diag values are split in half
# TODO: remove recursion; generally the functions should be shorter for more maintainable code
dependence_plot(
ind1, proj_shap_values, features, feature_names=feature_names,
interaction_index=ind2, display_features=display_features, show=False,
xmin=xmin, xmax=xmax
)
if ind1 == ind2:
pl.ylabel(labels['MAIN_EFFECT'] % feature_names[ind1])
else:
pl.ylabel(labels['INTERACTION_EFFECT'] % (feature_names[ind1], feature_names[ind2]))
if show:
pl.show()
return
assert shap_values.shape[0] == features.shape[0], \
"'shap_values' and 'features' values must have the same number of rows!"
assert shap_values.shape[1] == features.shape[1], \
"'shap_values' must have the same number of columns as 'features'!"
# get both the raw and display feature values
oinds = np.arange(shap_values.shape[0]) # we randomize the ordering so plotting overlaps are not related to data ordering
np.random.shuffle(oinds)
xv = features[oinds, ind].astype(np.float64)
xd = display_features[oinds, ind]
s = shap_values[oinds, ind]
if type(xd[0]) == str:
name_map = {}
for i in range(len(xv)):
name_map[xd[i]] = xv[i]
xnames = list(name_map.keys())
# allow a single feature name to be passed alone
if type(feature_names) == str:
feature_names = [feature_names]
name = feature_names[ind]
# guess what other feature as the stongest interaction with the plotted feature
if interaction_index == "auto":
interaction_index = approximate_interactions(ind, shap_values, features)[0]
interaction_index = convert_name(interaction_index, shap_values, feature_names)
categorical_interaction = False
# get both the raw and display color values
color_norm = None
if interaction_index is not None:
cv = features[:, interaction_index]
cd = display_features[:, interaction_index]
clow = np.nanpercentile(cv.astype(np.float), 5)
chigh = np.nanpercentile(cv.astype(np.float), 95)
if type(cd[0]) == str:
cname_map = {}
for i in range(len(cv)):
cname_map[cd[i]] = cv[i]
cnames = list(cname_map.keys())
categorical_interaction = True
elif clow % 1 == 0 and chigh % 1 == 0 and chigh - clow < 10:
categorical_interaction = True
# discritize colors for categorical features
if categorical_interaction and clow != chigh:
clow = np.nanmin(cv.astype(np.float))
chigh = np.nanmax(cv.astype(np.float))
bounds = np.linspace(clow, chigh, int(chigh - clow + 2))
color_norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N-1)
# optionally add jitter to feature values
if x_jitter > 0:
if x_jitter > 1: x_jitter = 1
xvals = xv.copy()
if isinstance(xvals[0], float):
xvals = xvals.astype(np.float)
xvals = xvals[~np.isnan(xvals)]
xvals = np.unique(xvals)
if len(xvals) >= 2:
smallest_diff = np.min(np.diff(np.sort(xvals)))
jitter_amount = x_jitter * smallest_diff
xv += (np.random.ranf(size = len(xv))*jitter_amount) - (jitter_amount/2)
# the actual scatter plot, TODO: adapt the dot_size to the number of data points?
xv_nan = np.isnan(xv)
xv_notnan = np.invert(xv_nan)
if interaction_index is not None:
# plot the nan values in the interaction feature as grey
cvals = features[oinds, interaction_index].astype(np.float64)
cvals_imp = cvals.copy()
cvals_imp[np.isnan(cvals)] = (clow + chigh) / 2.0
cvals[cvals_imp > chigh] = chigh
cvals[cvals_imp < clow] = clow
p = pl.scatter(
xv[xv_notnan], s[xv_notnan], s=dot_size, linewidth=0, c=cvals[xv_notnan],
cmap=cmap, alpha=alpha, vmin=clow, vmax=chigh,
norm=color_norm, rasterized=len(xv) > 500
)
p.set_array(cvals[xv_notnan])
else:
pl.scatter(xv, s, s=dot_size, linewidth=0, color=color,
alpha=alpha, rasterized=len(xv) > 500)
if interaction_index != ind and interaction_index is not None:
# draw the color bar
if type(cd[0]) == str:
tick_positions = [cname_map[n] for n in cnames]
if len(tick_positions) == 2:
tick_positions[0] -= 0.25
tick_positions[1] += 0.25
cb = pl.colorbar(ticks=tick_positions)
cb.set_ticklabels(cnames)
else:
cb = pl.colorbar()
cb.set_label(feature_names[interaction_index], size=13)
cb.ax.tick_params(labelsize=11)
if categorical_interaction:
cb.ax.tick_params(length=0)
cb.set_alpha(1)
cb.outline.set_visible(False)
bbox = cb.ax.get_window_extent().transformed(pl.gcf().dpi_scale_trans.inverted())
cb.ax.set_aspect((bbox.height - 0.7) * 20)
# handles any setting of xmax and xmin
# note that we handle None,float, or "percentile(float)" formats
if xmin is not None or xmax is not None:
if type(xmin) == str and xmin.startswith("percentile"):
xmin = np.nanpercentile(xv, float(xmin[11:-1]))
if type(xmax) == str and xmax.startswith("percentile"):
xmax = np.nanpercentile(xv, float(xmax[11:-1]))
if xmin is None or xmin == np.nanmin(xv):
xmin = np.nanmin(xv) - (xmax - np.nanmin(xv))/20
if xmax is None or xmax == np.nanmax(xv):
xmax = np.nanmax(xv) + (np.nanmax(xv) - xmin)/20
pl.xlim(xmin, xmax)
# plot any nan feature values as tick marks along the y-axis
xlim = pl.xlim()
if interaction_index is not None:
p = pl.scatter(
xlim[0] * np.ones(xv_nan.sum()), s[xv_nan], marker=1,
linewidth=2, c=cvals_imp[xv_nan], cmap=cmap, alpha=alpha,
vmin=clow, vmax=chigh
)
p.set_array(cvals[xv_nan])
else:
pl.scatter(
xlim[0] * np.ones(xv_nan.sum()), s[xv_nan], marker=1,
linewidth=2, color=color, alpha=alpha
)
pl.xlim(*xlim)
# make the plot more readable
if interaction_index != ind:
pl.gcf().set_size_inches(7.5, 5)
else:
pl.gcf().set_size_inches(6, 5)
pl.xlabel(name, color=axis_color, fontsize=13)
pl.ylabel(labels['VALUE_FOR'] % name, color=axis_color, fontsize=13)
if title is not None:
pl.title(title, color=axis_color, fontsize=13)
pl.gca().xaxis.set_ticks_position('bottom')
pl.gca().yaxis.set_ticks_position('left')
pl.gca().spines['right'].set_visible(False)
pl.gca().spines['top'].set_visible(False)
pl.gca().tick_params(color=axis_color, labelcolor=axis_color, labelsize=11)
for spine in pl.gca().spines.values():
spine.set_edgecolor(axis_color)
if type(xd[0]) == str:
pl.xticks([name_map[n] for n in xnames], xnames, rotation='vertical', fontsize=11)
if show:
with warnings.catch_warnings(): # ignore expected matplotlib warnings
warnings.simplefilter("ignore", RuntimeWarning)
pl.show() | Create a SHAP dependence plot, colored by an interaction feature.
Plots the value of the feature on the x-axis and the SHAP value of the same feature
on the y-axis. This shows how the model depends on the given feature, and is like a
richer extenstion of the classical parital dependence plots. Vertical dispersion of the
data points represents interaction effects. Grey ticks along the y-axis are data
points where the feature's value was NaN.
Parameters
----------
ind : int or string
If this is an int it is the index of the feature to plot. If this is a string it is
either the name of the feature to plot, or it can have the form "rank(int)" to specify
the feature with that rank (ordered by mean absolute SHAP value over all the samples).
shap_values : numpy.array
Matrix of SHAP values (# samples x # features).
features : numpy.array or pandas.DataFrame
Matrix of feature values (# samples x # features).
feature_names : list
Names of the features (length # features).
display_features : numpy.array or pandas.DataFrame
Matrix of feature values for visual display (such as strings instead of coded values).
interaction_index : "auto", None, int, or string
The index of the feature used to color the plot. The name of a feature can also be passed
as a string. If "auto" then shap.common.approximate_interactions is used to pick what
seems to be the strongest interaction (note that to find to true stongest interaction you
need to compute the SHAP interaction values).
x_jitter : float (0 - 1)
Adds random jitter to feature values. May increase plot readability when feature
is discrete.
alpha : float
The transparency of the data points (between 0 and 1). This can be useful to the
show density of the data points when using a large dataset.
xmin : float or string
Represents the lower bound of the plot's x-axis. It can be a string of the format
"percentile(float)" to denote that percentile of the feature's value used on the x-axis.
xmax : float or string
Represents the upper bound of the plot's x-axis. It can be a string of the format
"percentile(float)" to denote that percentile of the feature's value used on the x-axis. |
def _ed25519_key_from_file(fn, path):
"""Create an ed25519 key from the contents of ``path``.
``path`` is a filepath containing a base64-encoded ed25519 key seed.
Args:
fn (callable): the function to call with the contents from ``path``
path (str): the file path to the base64-encoded key seed.
Returns:
obj: the appropriate key type from ``path``
Raises:
ScriptWorkerEd25519Error
"""
try:
return fn(read_from_file(path, exception=ScriptWorkerEd25519Error))
except ScriptWorkerException as exc:
raise ScriptWorkerEd25519Error("Failed calling {} for {}: {}!".format(fn, path, str(exc))) | Create an ed25519 key from the contents of ``path``.
``path`` is a filepath containing a base64-encoded ed25519 key seed.
Args:
fn (callable): the function to call with the contents from ``path``
path (str): the file path to the base64-encoded key seed.
Returns:
obj: the appropriate key type from ``path``
Raises:
ScriptWorkerEd25519Error |
def histogram(a, bins=10, range=None, **kwargs):
"""Compute the histogram of the input data.
Parameters
----------
a : NDArray
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars
If bins is an int, it defines the number of equal-width bins in the
given range (10, by default). If bins is a sequence, it defines the bin edges,
including the rightmost edge, allowing for non-uniform bin widths.
range : (float, float), required if bins is an integer
The lower and upper range of the bins. If not provided, range is simply (a.min(), a.max()).
Values outside the range are ignored. The first element of the range must be less than or
equal to the second. range affects the automatic bin computation as well, the range will
be equally divided by the number of bins.
Returns
-------
out : Symbol
The created Symbol
"""
if isinstance(bins, Symbol):
return _internal._histogram(data=a, bins=bins, **kwargs)
elif isinstance(bins, integer_types):
if range is None:
raise ValueError("null range is not supported in symbol mode")
return _internal._histogram(data=a, bin_cnt=bins, range=range, **kwargs)
raise ValueError("bins argument should be either an integer or an NDArray") | Compute the histogram of the input data.
Parameters
----------
a : NDArray
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars
If bins is an int, it defines the number of equal-width bins in the
given range (10, by default). If bins is a sequence, it defines the bin edges,
including the rightmost edge, allowing for non-uniform bin widths.
range : (float, float), required if bins is an integer
The lower and upper range of the bins. If not provided, range is simply (a.min(), a.max()).
Values outside the range are ignored. The first element of the range must be less than or
equal to the second. range affects the automatic bin computation as well, the range will
be equally divided by the number of bins.
Returns
-------
out : Symbol
The created Symbol |
def init_default(required, default, optional_default):
"""
Returns optional default if field is not required and
default was not provided.
:param bool required: whether the field is required in a given model.
:param default: default provided by creator of field.
:param optional_default: default for the data type if none provided.
:return: default or optional default based on inputs
"""
if not required and default == NOTHING:
default = optional_default
return default | Returns optional default if field is not required and
default was not provided.
:param bool required: whether the field is required in a given model.
:param default: default provided by creator of field.
:param optional_default: default for the data type if none provided.
:return: default or optional default based on inputs |
def size(self, store_hashes=True):
"""
Retrieves the size in bytes of this ZIP content.
:return: Size of the zip content in bytes
"""
if self.modified:
self.__cache_content(store_hashes)
return len(self.cached_content) | Retrieves the size in bytes of this ZIP content.
:return: Size of the zip content in bytes |
def create_fake_mirror(src, dst):
"""Copy all dir, files from ``src`` to ``dst``. But only create a empty file
with same file name. Of course, the tree structure doesn't change.
A recipe gadget to create some test data set.
Make sure to use absolute path.
**中文文档**
复制整个src目录下的文件树结构到dst目录。但实际上并不复制内容, 只复制
文件名。即, 全是空文件, 但目录结构一致。
"""
src = os.path.abspath(src)
if not (os.path.exists(src) and (not os.path.exists(dst)) ):
raise Exception("source not exist or distination already exist")
folder_to_create = list()
file_to_create = list()
for current_folder, _, file_list in os.walk(src):
new_folder = os.path.join(dst, os.path.relpath(current_folder, src))
folder_to_create.append(new_folder)
for basename in file_list:
file_to_create.append(os.path.join(new_folder, basename))
for abspath in folder_to_create:
os.mkdir(abspath)
for abspath in file_to_create:
with open(abspath, "w") as _:
pass | Copy all dir, files from ``src`` to ``dst``. But only create a empty file
with same file name. Of course, the tree structure doesn't change.
A recipe gadget to create some test data set.
Make sure to use absolute path.
**中文文档**
复制整个src目录下的文件树结构到dst目录。但实际上并不复制内容, 只复制
文件名。即, 全是空文件, 但目录结构一致。 |
def skyimage_figure(cluster):
"""
Given a cluster create a Bokeh plot figure using the
cluster's image.
"""
pf_image = figure(x_range=(0, 1), y_range=(0, 1),
title='Image of {0}'.format(cluster.name))
pf_image.image_url(url=[cluster.image_path],
x=0, y=0, w=1, h=1, anchor='bottom_left')
pf_image.toolbar_location = None
pf_image.axis.visible = False
return pf_image | Given a cluster create a Bokeh plot figure using the
cluster's image. |
def argmax(self, axis=None, skipna=True):
"""
Return an ndarray of the maximum argument indexer.
Parameters
----------
axis : {None}
Dummy argument for consistency with Series
skipna : bool, default True
See Also
--------
numpy.ndarray.argmax
"""
nv.validate_minmax_axis(axis)
return nanops.nanargmax(self._values, skipna=skipna) | Return an ndarray of the maximum argument indexer.
Parameters
----------
axis : {None}
Dummy argument for consistency with Series
skipna : bool, default True
See Also
--------
numpy.ndarray.argmax |
def _get_instructions_bytes(code, varnames=None, names=None, constants=None,
cells=None, linestarts=None, line_offset=0):
"""Iterate over the instructions in a bytecode string.
Generates a sequence of Instruction namedtuples giving the details of each
opcode. Additional information about the code's runtime environment
(e.g. variable names, constants) can be specified using optional
arguments.
"""
labels = dis.findlabels(code)
extended_arg = 0
starts_line = None
free = None
# enumerate() is not an option, since we sometimes process
# multiple elements on a single pass through the loop
n = len(code)
i = 0
while i < n:
op = code[i]
offset = i
if linestarts is not None:
starts_line = linestarts.get(i, None)
if starts_line is not None:
starts_line += line_offset
is_jump_target = i in labels
i = i + 1
arg = None
argval = None
argrepr = ''
if op >= dis.HAVE_ARGUMENT:
arg = code[i] + code[i + 1] * 256 + extended_arg
extended_arg = 0
i = i + 2
if op == dis.EXTENDED_ARG:
extended_arg = arg * 65536
# Set argval to the dereferenced value of the argument when
# availabe, and argrepr to the string representation of argval.
# _disassemble_bytes needs the string repr of the
# raw name index for LOAD_GLOBAL, LOAD_CONST, etc.
argval = arg
if op in dis.hasconst:
argval, argrepr = dis._get_const_info(arg, constants)
elif op in dis.hasname:
argval, argrepr = dis._get_name_info(arg, names)
elif op in dis.hasjrel:
argval = i + arg
argrepr = "to " + repr(argval)
elif op in dis.haslocal:
argval, argrepr = dis._get_name_info(arg, varnames)
elif op in dis.hascompare:
argval = dis.cmp_op[arg]
argrepr = argval
elif op in dis.hasfree:
argval, argrepr = dis._get_name_info(arg, cells)
elif op in dis.hasnargs:
argrepr = "%d positional, %d keyword pair" % (code[i - 2], code[i - 1])
yield dis.Instruction(dis.opname[op], op,
arg, argval, argrepr,
offset, starts_line, is_jump_target) | Iterate over the instructions in a bytecode string.
Generates a sequence of Instruction namedtuples giving the details of each
opcode. Additional information about the code's runtime environment
(e.g. variable names, constants) can be specified using optional
arguments. |
def create(klass, account, name):
"""
Creates a new tailored audience.
"""
audience = klass(account)
getattr(audience, '__create_audience__')(name)
try:
return audience.reload()
except BadRequest as e:
audience.delete()
raise e | Creates a new tailored audience. |
def make_at_least_n_items_valid(flag_list, n):
"""
tries to make at least min(len(flag_list, n) items True in flag_list
Args:
flag_list (list): list of booleans
n (int): number of items to ensure are True
CommandLine:
python -m utool.util_dev --test-make_at_least_n_items_valid
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dev import * # NOQA
>>> # build test data
>>> flag_list = [False, True, False, False, False, False, False, True]
>>> n = 5
>>> # execute function
>>> flag_list = make_at_least_n_items_valid(flag_list, n)
>>> # verify results
>>> result = str(flag_list)
>>> print(result)
[ True True True True False False False True]
"""
flag_list = np.array(flag_list)
num_valid = flag_list.sum()
# Find how many places we need to make true
num_extra = min(len(flag_list) - num_valid, n - num_valid)
# make_at_least_n_items_valid
# Add in some extra daids to show if there are not enough
for index in range(len(flag_list)):
if num_extra <= 0:
break
if not flag_list[index]:
flag_list[index] = True
num_extra -= 1
return flag_list | tries to make at least min(len(flag_list, n) items True in flag_list
Args:
flag_list (list): list of booleans
n (int): number of items to ensure are True
CommandLine:
python -m utool.util_dev --test-make_at_least_n_items_valid
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dev import * # NOQA
>>> # build test data
>>> flag_list = [False, True, False, False, False, False, False, True]
>>> n = 5
>>> # execute function
>>> flag_list = make_at_least_n_items_valid(flag_list, n)
>>> # verify results
>>> result = str(flag_list)
>>> print(result)
[ True True True True False False False True] |
def run(self):
"""
Runs its worker method.
This method will be terminated once its parent's is_running
property turns False.
"""
while self._base.is_running:
if self._worker:
self._worker()
time.sleep(self._sleep_duration) | Runs its worker method.
This method will be terminated once its parent's is_running
property turns False. |
def cmdloop(self, intro: Optional[str] = None) -> None:
"""This is an outer wrapper around _cmdloop() which deals with extra features provided by cmd2.
_cmdloop() provides the main loop equivalent to cmd.cmdloop(). This is a wrapper around that which deals with
the following extra features provided by cmd2:
- commands at invocation
- transcript testing
- intro banner
:param intro: if provided this overrides self.intro and serves as the intro banner printed once at start
"""
# cmdloop() expects to be run in the main thread to support extensive use of KeyboardInterrupts throughout the
# other built-in functions. You are free to override cmdloop, but much of cmd2's features will be limited.
if not threading.current_thread() is threading.main_thread():
raise RuntimeError("cmdloop must be run in the main thread")
# Register a SIGINT signal handler for Ctrl+C
import signal
original_sigint_handler = signal.getsignal(signal.SIGINT)
signal.signal(signal.SIGINT, self.sigint_handler)
if self.allow_cli_args:
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--test', action="store_true",
help='Test against transcript(s) in FILE (wildcards OK)')
callopts, callargs = parser.parse_known_args()
# If transcript testing was called for, use other arguments as transcript files
if callopts.test:
self._transcript_files = callargs
# If commands were supplied at invocation, then add them to the command queue
if callargs:
self.cmdqueue.extend(callargs)
# Grab terminal lock before the prompt has been drawn by readline
self.terminal_lock.acquire()
# Always run the preloop first
for func in self._preloop_hooks:
func()
self.preloop()
# If transcript-based regression testing was requested, then do that instead of the main loop
if self._transcript_files is not None:
self.run_transcript_tests([os.path.expanduser(tf) for tf in self._transcript_files])
else:
# If an intro was supplied in the method call, allow it to override the default
if intro is not None:
self.intro = intro
# Print the intro, if there is one, right after the preloop
if self.intro is not None:
self.poutput(str(self.intro) + "\n")
# And then call _cmdloop() to enter the main loop
self._cmdloop()
# Run the postloop() no matter what
for func in self._postloop_hooks:
func()
self.postloop()
# Release terminal lock now that postloop code should have stopped any terminal updater threads
# This will also zero the lock count in case cmdloop() is called again
self.terminal_lock.release()
# Restore the original signal handler
signal.signal(signal.SIGINT, original_sigint_handler)
if self.exit_code is not None:
sys.exit(self.exit_code) | This is an outer wrapper around _cmdloop() which deals with extra features provided by cmd2.
_cmdloop() provides the main loop equivalent to cmd.cmdloop(). This is a wrapper around that which deals with
the following extra features provided by cmd2:
- commands at invocation
- transcript testing
- intro banner
:param intro: if provided this overrides self.intro and serves as the intro banner printed once at start |
def _execute_get_url(self, request_url, append_sid=True):
"""Function to execute and handle a GET request"""
# Prepare Request
self._debuglog("Requesting URL: '" + request_url + "'")
if append_sid:
self._debuglog("Appending access_token (SID: " +
self.access_token + ") to url")
request_url = "%s&_sid=%s" % (
request_url, self.access_token)
# Execute Request
try:
resp = self._session.get(request_url)
self._debuglog("Request executed: " + str(resp.status_code))
if resp.status_code == 200:
# We got a response
json_data = json.loads(resp.text)
if json_data["success"]:
self._debuglog("Succesfull returning data")
self._debuglog(str(json_data))
return json_data
else:
if json_data["error"]["code"] in {105, 106, 107, 119}:
self._debuglog("Session error: " +
str(json_data["error"]["code"]))
self._session_error = True
else:
self._debuglog("Failed: " + resp.text)
else:
# We got a 404 or 401
return None
#pylint: disable=bare-except
except:
return None | Function to execute and handle a GET request |
def _cleanup_ca_temp_file(self):
"""
Function to clean up ca temp file for requests.
**Returns:** Removes TEMP ca file, no return
"""
if os.name == 'nt':
if isinstance(self.ca_verify_filename, (binary_type, text_type)):
# windows requires file to be closed for access. Have to manually remove
os.unlink(self.ca_verify_filename)
else:
# other OS's allow close and delete of file.
self._ca_verify_file_handle.close() | Function to clean up ca temp file for requests.
**Returns:** Removes TEMP ca file, no return |
def define_task(name,
tick_script,
task_type='stream',
database=None,
retention_policy='default',
dbrps=None):
'''
Define a task. Serves as both create/update.
name
Name of the task.
tick_script
Path to the TICK script for the task. Can be a salt:// source.
task_type
Task type. Defaults to 'stream'
dbrps
A list of databases and retention policies in "dbname"."rpname" format
to fetch data from. For backward compatibility, the value of
'database' and 'retention_policy' will be merged as part of dbrps.
.. versionadded:: 2019.2.0
database
Which database to fetch data from.
retention_policy
Which retention policy to fetch data from. Defaults to 'default'.
CLI Example:
.. code-block:: bash
salt '*' kapacitor.define_task cpu salt://kapacitor/cpu.tick database=telegraf
'''
if not database and not dbrps:
log.error("Providing database name or dbrps is mandatory.")
return False
if version() < '0.13':
cmd = 'kapacitor define -name {0}'.format(name)
else:
cmd = 'kapacitor define {0}'.format(name)
if tick_script.startswith('salt://'):
tick_script = __salt__['cp.cache_file'](tick_script, __env__)
cmd += ' -tick {0}'.format(tick_script)
if task_type:
cmd += ' -type {0}'.format(task_type)
if not dbrps:
dbrps = []
if database and retention_policy:
dbrp = '{0}.{1}'.format(database, retention_policy)
dbrps.append(dbrp)
if dbrps:
for dbrp in dbrps:
cmd += ' -dbrp {0}'.format(dbrp)
return _run_cmd(cmd) | Define a task. Serves as both create/update.
name
Name of the task.
tick_script
Path to the TICK script for the task. Can be a salt:// source.
task_type
Task type. Defaults to 'stream'
dbrps
A list of databases and retention policies in "dbname"."rpname" format
to fetch data from. For backward compatibility, the value of
'database' and 'retention_policy' will be merged as part of dbrps.
.. versionadded:: 2019.2.0
database
Which database to fetch data from.
retention_policy
Which retention policy to fetch data from. Defaults to 'default'.
CLI Example:
.. code-block:: bash
salt '*' kapacitor.define_task cpu salt://kapacitor/cpu.tick database=telegraf |
def createDirStruct(paths, verbose=True):
'''Loops ait.config._datapaths from AIT_CONFIG and creates a directory.
Replaces year and doy with the respective year and day-of-year.
If neither are given as arguments, current UTC day and year are used.
Args:
paths:
[optional] list of directory paths you would like to create.
doy and year will be replaced by the datetime day and year, respectively.
datetime:
UTC Datetime string in ISO 8601 Format YYYY-MM-DDTHH:mm:ssZ
'''
for k, path in paths.items():
p = None
try:
pathlist = path if type(path) is list else [ path ]
for p in pathlist:
os.makedirs(p)
if verbose:
log.info('Creating directory: ' + p)
except OSError, e:
#print path
if e.errno == errno.EEXIST and os.path.isdir(p):
pass
else:
raise
return True | Loops ait.config._datapaths from AIT_CONFIG and creates a directory.
Replaces year and doy with the respective year and day-of-year.
If neither are given as arguments, current UTC day and year are used.
Args:
paths:
[optional] list of directory paths you would like to create.
doy and year will be replaced by the datetime day and year, respectively.
datetime:
UTC Datetime string in ISO 8601 Format YYYY-MM-DDTHH:mm:ssZ |
def order_upgrades(self, upgrades, history=None):
"""Order upgrades according to their dependencies.
(topological sort using
Kahn's algorithm - http://en.wikipedia.org/wiki/Topological_sorting).
:param upgrades: Dict of upgrades
:param history: Dict of applied upgrades
"""
history = history or {}
graph_incoming, graph_outgoing = self._create_graph(upgrades, history)
# Removed already applied upgrades (assumes all dependencies prior to
# this upgrade has been applied).
for node_id in six.iterkeys(history):
start_nodes = [node_id, ]
while start_nodes:
node = start_nodes.pop()
# Remove from direct dependents
try:
for d in graph_outgoing[node]:
graph_incoming[d] = [x for x in graph_incoming[d]
if x != node]
except KeyError:
warnings.warn("Ghost upgrade %s detected" % node)
# Remove all prior dependencies
if node in graph_incoming:
# Get dependencies, remove node, and recursively
# remove all dependencies.
depends_on = graph_incoming[node]
# Add dependencies to check
for d in depends_on:
graph_outgoing[d] = [x for x in graph_outgoing[d]
if x != node]
start_nodes.append(d)
del graph_incoming[node]
# Check for missing dependencies
for node_id, depends_on in six.iteritems(graph_incoming):
for d in depends_on:
if d not in graph_incoming:
raise RuntimeError("Upgrade %s depends on an unknown"
" upgrade %s" % (node_id, d))
# Nodes with no incoming edges
start_nodes = [x for x in six.iterkeys(graph_incoming)
if len(graph_incoming[x]) == 0]
topo_order = []
while start_nodes:
# Append node_n to list (it has no incoming edges)
node_n = start_nodes.pop()
topo_order.append(node_n)
# For each node m with and edge from n to m
for node_m in graph_outgoing[node_n]:
# Remove the edge n to m
graph_incoming[node_m] = [x for x in graph_incoming[node_m]
if x != node_n]
# If m has no incoming edges, add it to start_nodes.
if not graph_incoming[node_m]:
start_nodes.append(node_m)
for node, edges in six.iteritems(graph_incoming):
if edges:
raise RuntimeError("The upgrades have at least one cyclic "
"dependency involving %s." % node)
return map(lambda x: upgrades[x], topo_order) | Order upgrades according to their dependencies.
(topological sort using
Kahn's algorithm - http://en.wikipedia.org/wiki/Topological_sorting).
:param upgrades: Dict of upgrades
:param history: Dict of applied upgrades |
def native_decode_source(text):
"""Use codec specified in file to decode to unicode
Then, encode unicode to native str:
Python 2: bytes
Python 3: unicode
"""
if ((only_python3 and isinstance(text, bytes))
or (only_python2 and isinstance(text, str))):
text = decode_source_to_unicode(text)
if only_python2:
return text.encode('ascii', 'replace')
return text | Use codec specified in file to decode to unicode
Then, encode unicode to native str:
Python 2: bytes
Python 3: unicode |
def ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions=[]):
"""Processes a single line in the file.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
clean_lines: An array of strings, each representing a line of the file,
with comments stripped.
line: Number of line being processed.
include_state: An _IncludeState instance in which the headers are inserted.
function_state: A _FunctionState instance which counts function lines, etc.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[line], line, error)
nesting_state.Update(filename, clean_lines, line, error)
CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
error)
if nesting_state.InAsmBlock(): return
CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
CheckLanguage(filename, clean_lines, line, file_extension, include_state,
nesting_state, error)
CheckForNonConstReference(filename, clean_lines, line, nesting_state, error)
CheckForNonStandardConstructs(filename, clean_lines, line,
nesting_state, error)
CheckVlogArguments(filename, clean_lines, line, error)
CheckPosixThreading(filename, clean_lines, line, error)
CheckInvalidIncrement(filename, clean_lines, line, error)
CheckMakePairUsesDeduction(filename, clean_lines, line, error)
CheckDefaultLambdaCaptures(filename, clean_lines, line, error)
CheckRedundantVirtual(filename, clean_lines, line, error)
CheckRedundantOverrideOrFinal(filename, clean_lines, line, error)
for check_fn in extra_check_functions:
check_fn(filename, clean_lines, line, error) | Processes a single line in the file.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
clean_lines: An array of strings, each representing a line of the file,
with comments stripped.
line: Number of line being processed.
include_state: An _IncludeState instance in which the headers are inserted.
function_state: A _FunctionState instance which counts function lines, etc.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error |
def write_java_message(key,val,text_file):
"""
Loop through all java messages that are not associated with a unit test and
write them into a log file.
Parameters
----------
key : str
9.general_bad_java_messages
val : list of list of str
contains the bad java messages and the message types.
:return: none
"""
text_file.write(key)
text_file.write('\n')
if (len(val[0]) > 0) and (len(val) >= 3):
for index in range(len(val[0])):
text_file.write("Java Message Type: ")
text_file.write(val[1][index])
text_file.write('\n')
text_file.write("Java Message: ")
for jmess in val[2][index]:
text_file.write(jmess)
text_file.write('\n')
text_file.write('\n \n') | Loop through all java messages that are not associated with a unit test and
write them into a log file.
Parameters
----------
key : str
9.general_bad_java_messages
val : list of list of str
contains the bad java messages and the message types.
:return: none |
def random_str(size=10):
"""
create random string of selected size
:param size: int, length of the string
:return: the string
"""
return ''.join(random.choice(string.ascii_lowercase) for _ in range(size)) | create random string of selected size
:param size: int, length of the string
:return: the string |
def serialize(self, method="urlencoded", lev=0, **kwargs):
"""
Convert this instance to another representation. Which representation
is given by the choice of serialization method.
:param method: A serialization method. Presently 'urlencoded', 'json',
'jwt' and 'dict' is supported.
:param lev:
:param kwargs: Extra key word arguments
:return: THe content of this message serialized using a chosen method
"""
return getattr(self, "to_%s" % method)(lev=lev, **kwargs) | Convert this instance to another representation. Which representation
is given by the choice of serialization method.
:param method: A serialization method. Presently 'urlencoded', 'json',
'jwt' and 'dict' is supported.
:param lev:
:param kwargs: Extra key word arguments
:return: THe content of this message serialized using a chosen method |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.