code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def cone(self, plunge, bearing, angle, segments=100, bidirectional=True,
**kwargs):
"""
Plot a polygon of a small circle (a.k.a. a cone) with an angular radius
of *angle* centered at a p/b of *plunge*, *bearing*. Additional keyword
arguments are passed on to the ``PathCollection``. (e.g. to have an
unfilled small small circle, pass "facecolor='none'".)
Parameters
----------
plunge : number or sequence of numbers
The plunge of the center of the cone in degrees.
bearing : number or sequence of numbers
The bearing of the center of the cone in degrees.
angle : number or sequence of numbers
The angular radius of the cone in degrees.
segments : int, optional
The number of vertices to use for the cone. Defaults to 100.
bidirectional : boolean, optional
Whether or not to draw two patches (the one given and its antipode)
for each measurement. Defaults to True.
**kwargs
Additional parameters are ``matplotlib.collections.PatchCollection``
properties.
Returns
-------
collection : ``matplotlib.collections.PathCollection``
Notes
-----
If *bidirectional* is ``True``, two circles will be plotted, even if
only one of each pair is visible. This is the default behavior.
"""
plunge, bearing, angle = np.atleast_1d(plunge, bearing, angle)
patches = []
lons, lats = stereonet_math.cone(plunge, bearing, angle, segments)
codes = mpath.Path.LINETO * np.ones(segments, dtype=np.uint8)
codes[0] = mpath.Path.MOVETO
if bidirectional:
p, b = -plunge, bearing + 180
alons, alats = stereonet_math.cone(p, b, angle, segments)
codes = np.hstack([codes, codes])
lons = np.hstack([lons, alons])
lats = np.hstack([lats, alats])
for lon, lat in zip(lons, lats):
xy = np.vstack([lon, lat]).T
path = mpath.Path(xy, codes)
patches.append(mpatches.PathPatch(path))
col = mcollections.PatchCollection(patches, **kwargs)
self.add_collection(col)
return col | def function[cone, parameter[self, plunge, bearing, angle, segments, bidirectional]]:
constant[
Plot a polygon of a small circle (a.k.a. a cone) with an angular radius
of *angle* centered at a p/b of *plunge*, *bearing*. Additional keyword
arguments are passed on to the ``PathCollection``. (e.g. to have an
unfilled small small circle, pass "facecolor='none'".)
Parameters
----------
plunge : number or sequence of numbers
The plunge of the center of the cone in degrees.
bearing : number or sequence of numbers
The bearing of the center of the cone in degrees.
angle : number or sequence of numbers
The angular radius of the cone in degrees.
segments : int, optional
The number of vertices to use for the cone. Defaults to 100.
bidirectional : boolean, optional
Whether or not to draw two patches (the one given and its antipode)
for each measurement. Defaults to True.
**kwargs
Additional parameters are ``matplotlib.collections.PatchCollection``
properties.
Returns
-------
collection : ``matplotlib.collections.PathCollection``
Notes
-----
If *bidirectional* is ``True``, two circles will be plotted, even if
only one of each pair is visible. This is the default behavior.
]
<ast.Tuple object at 0x7da20e74b220> assign[=] call[name[np].atleast_1d, parameter[name[plunge], name[bearing], name[angle]]]
variable[patches] assign[=] list[[]]
<ast.Tuple object at 0x7da20e74a3b0> assign[=] call[name[stereonet_math].cone, parameter[name[plunge], name[bearing], name[angle], name[segments]]]
variable[codes] assign[=] binary_operation[name[mpath].Path.LINETO * call[name[np].ones, parameter[name[segments]]]]
call[name[codes]][constant[0]] assign[=] name[mpath].Path.MOVETO
if name[bidirectional] begin[:]
<ast.Tuple object at 0x7da20cabdd80> assign[=] tuple[[<ast.UnaryOp object at 0x7da20cabe9b0>, <ast.BinOp object at 0x7da20cabf640>]]
<ast.Tuple object at 0x7da20cabee90> assign[=] call[name[stereonet_math].cone, parameter[name[p], name[b], name[angle], name[segments]]]
variable[codes] assign[=] call[name[np].hstack, parameter[list[[<ast.Name object at 0x7da20cabcb20>, <ast.Name object at 0x7da20cabd480>]]]]
variable[lons] assign[=] call[name[np].hstack, parameter[list[[<ast.Name object at 0x7da20cabe860>, <ast.Name object at 0x7da20cabf9a0>]]]]
variable[lats] assign[=] call[name[np].hstack, parameter[list[[<ast.Name object at 0x7da20cabcb50>, <ast.Name object at 0x7da20cabda50>]]]]
for taget[tuple[[<ast.Name object at 0x7da20cabc6d0>, <ast.Name object at 0x7da20cabe410>]]] in starred[call[name[zip], parameter[name[lons], name[lats]]]] begin[:]
variable[xy] assign[=] call[name[np].vstack, parameter[list[[<ast.Name object at 0x7da20cabee00>, <ast.Name object at 0x7da20cabf1c0>]]]].T
variable[path] assign[=] call[name[mpath].Path, parameter[name[xy], name[codes]]]
call[name[patches].append, parameter[call[name[mpatches].PathPatch, parameter[name[path]]]]]
variable[col] assign[=] call[name[mcollections].PatchCollection, parameter[name[patches]]]
call[name[self].add_collection, parameter[name[col]]]
return[name[col]] | keyword[def] identifier[cone] ( identifier[self] , identifier[plunge] , identifier[bearing] , identifier[angle] , identifier[segments] = literal[int] , identifier[bidirectional] = keyword[True] ,
** identifier[kwargs] ):
literal[string]
identifier[plunge] , identifier[bearing] , identifier[angle] = identifier[np] . identifier[atleast_1d] ( identifier[plunge] , identifier[bearing] , identifier[angle] )
identifier[patches] =[]
identifier[lons] , identifier[lats] = identifier[stereonet_math] . identifier[cone] ( identifier[plunge] , identifier[bearing] , identifier[angle] , identifier[segments] )
identifier[codes] = identifier[mpath] . identifier[Path] . identifier[LINETO] * identifier[np] . identifier[ones] ( identifier[segments] , identifier[dtype] = identifier[np] . identifier[uint8] )
identifier[codes] [ literal[int] ]= identifier[mpath] . identifier[Path] . identifier[MOVETO]
keyword[if] identifier[bidirectional] :
identifier[p] , identifier[b] =- identifier[plunge] , identifier[bearing] + literal[int]
identifier[alons] , identifier[alats] = identifier[stereonet_math] . identifier[cone] ( identifier[p] , identifier[b] , identifier[angle] , identifier[segments] )
identifier[codes] = identifier[np] . identifier[hstack] ([ identifier[codes] , identifier[codes] ])
identifier[lons] = identifier[np] . identifier[hstack] ([ identifier[lons] , identifier[alons] ])
identifier[lats] = identifier[np] . identifier[hstack] ([ identifier[lats] , identifier[alats] ])
keyword[for] identifier[lon] , identifier[lat] keyword[in] identifier[zip] ( identifier[lons] , identifier[lats] ):
identifier[xy] = identifier[np] . identifier[vstack] ([ identifier[lon] , identifier[lat] ]). identifier[T]
identifier[path] = identifier[mpath] . identifier[Path] ( identifier[xy] , identifier[codes] )
identifier[patches] . identifier[append] ( identifier[mpatches] . identifier[PathPatch] ( identifier[path] ))
identifier[col] = identifier[mcollections] . identifier[PatchCollection] ( identifier[patches] ,** identifier[kwargs] )
identifier[self] . identifier[add_collection] ( identifier[col] )
keyword[return] identifier[col] | def cone(self, plunge, bearing, angle, segments=100, bidirectional=True, **kwargs):
"""
Plot a polygon of a small circle (a.k.a. a cone) with an angular radius
of *angle* centered at a p/b of *plunge*, *bearing*. Additional keyword
arguments are passed on to the ``PathCollection``. (e.g. to have an
unfilled small small circle, pass "facecolor='none'".)
Parameters
----------
plunge : number or sequence of numbers
The plunge of the center of the cone in degrees.
bearing : number or sequence of numbers
The bearing of the center of the cone in degrees.
angle : number or sequence of numbers
The angular radius of the cone in degrees.
segments : int, optional
The number of vertices to use for the cone. Defaults to 100.
bidirectional : boolean, optional
Whether or not to draw two patches (the one given and its antipode)
for each measurement. Defaults to True.
**kwargs
Additional parameters are ``matplotlib.collections.PatchCollection``
properties.
Returns
-------
collection : ``matplotlib.collections.PathCollection``
Notes
-----
If *bidirectional* is ``True``, two circles will be plotted, even if
only one of each pair is visible. This is the default behavior.
"""
(plunge, bearing, angle) = np.atleast_1d(plunge, bearing, angle)
patches = []
(lons, lats) = stereonet_math.cone(plunge, bearing, angle, segments)
codes = mpath.Path.LINETO * np.ones(segments, dtype=np.uint8)
codes[0] = mpath.Path.MOVETO
if bidirectional:
(p, b) = (-plunge, bearing + 180)
(alons, alats) = stereonet_math.cone(p, b, angle, segments)
codes = np.hstack([codes, codes])
lons = np.hstack([lons, alons])
lats = np.hstack([lats, alats]) # depends on [control=['if'], data=[]]
for (lon, lat) in zip(lons, lats):
xy = np.vstack([lon, lat]).T
path = mpath.Path(xy, codes)
patches.append(mpatches.PathPatch(path)) # depends on [control=['for'], data=[]]
col = mcollections.PatchCollection(patches, **kwargs)
self.add_collection(col)
return col |
def container_unfreeze(name, remote_addr=None,
cert=None, key=None, verify_cert=True):
'''
Unfreeze a container
name :
Name of the container to unfreeze
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
'''
container = container_get(
name, remote_addr, cert, key, verify_cert, _raw=True
)
container.unfreeze(wait=True)
return _pylxd_model_to_dict(container) | def function[container_unfreeze, parameter[name, remote_addr, cert, key, verify_cert]]:
constant[
Unfreeze a container
name :
Name of the container to unfreeze
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
]
variable[container] assign[=] call[name[container_get], parameter[name[name], name[remote_addr], name[cert], name[key], name[verify_cert]]]
call[name[container].unfreeze, parameter[]]
return[call[name[_pylxd_model_to_dict], parameter[name[container]]]] | keyword[def] identifier[container_unfreeze] ( identifier[name] , identifier[remote_addr] = keyword[None] ,
identifier[cert] = keyword[None] , identifier[key] = keyword[None] , identifier[verify_cert] = keyword[True] ):
literal[string]
identifier[container] = identifier[container_get] (
identifier[name] , identifier[remote_addr] , identifier[cert] , identifier[key] , identifier[verify_cert] , identifier[_raw] = keyword[True]
)
identifier[container] . identifier[unfreeze] ( identifier[wait] = keyword[True] )
keyword[return] identifier[_pylxd_model_to_dict] ( identifier[container] ) | def container_unfreeze(name, remote_addr=None, cert=None, key=None, verify_cert=True):
"""
Unfreeze a container
name :
Name of the container to unfreeze
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
"""
container = container_get(name, remote_addr, cert, key, verify_cert, _raw=True)
container.unfreeze(wait=True)
return _pylxd_model_to_dict(container) |
def _GetRoutingMap(self, router):
"""Returns a routing map for a given router instance."""
try:
routing_map = self._routing_maps_cache.Get(router.__class__)
except KeyError:
routing_map = self._BuildHttpRoutingMap(router.__class__)
self._routing_maps_cache.Put(router.__class__, routing_map)
return routing_map | def function[_GetRoutingMap, parameter[self, router]]:
constant[Returns a routing map for a given router instance.]
<ast.Try object at 0x7da1b1c3f160>
return[name[routing_map]] | keyword[def] identifier[_GetRoutingMap] ( identifier[self] , identifier[router] ):
literal[string]
keyword[try] :
identifier[routing_map] = identifier[self] . identifier[_routing_maps_cache] . identifier[Get] ( identifier[router] . identifier[__class__] )
keyword[except] identifier[KeyError] :
identifier[routing_map] = identifier[self] . identifier[_BuildHttpRoutingMap] ( identifier[router] . identifier[__class__] )
identifier[self] . identifier[_routing_maps_cache] . identifier[Put] ( identifier[router] . identifier[__class__] , identifier[routing_map] )
keyword[return] identifier[routing_map] | def _GetRoutingMap(self, router):
"""Returns a routing map for a given router instance."""
try:
routing_map = self._routing_maps_cache.Get(router.__class__) # depends on [control=['try'], data=[]]
except KeyError:
routing_map = self._BuildHttpRoutingMap(router.__class__)
self._routing_maps_cache.Put(router.__class__, routing_map) # depends on [control=['except'], data=[]]
return routing_map |
async def deregister(self, service):
"""Deregisters a local service
Parameters:
service (ObjectID): Service ID
Returns:
bool: ``True`` on success
The deregister endpoint is used to remove a service from the local
agent. The agent will take care of deregistering the service with the
Catalog. If there is an associated check, that is also deregistered.
"""
service_id = extract_attr(service, keys=["ServiceID", "ID"])
response = await self._api.get(
"/v1/agent/service/deregister", service_id)
return response.status == 200 | <ast.AsyncFunctionDef object at 0x7da2054a71c0> | keyword[async] keyword[def] identifier[deregister] ( identifier[self] , identifier[service] ):
literal[string]
identifier[service_id] = identifier[extract_attr] ( identifier[service] , identifier[keys] =[ literal[string] , literal[string] ])
identifier[response] = keyword[await] identifier[self] . identifier[_api] . identifier[get] (
literal[string] , identifier[service_id] )
keyword[return] identifier[response] . identifier[status] == literal[int] | async def deregister(self, service):
"""Deregisters a local service
Parameters:
service (ObjectID): Service ID
Returns:
bool: ``True`` on success
The deregister endpoint is used to remove a service from the local
agent. The agent will take care of deregistering the service with the
Catalog. If there is an associated check, that is also deregistered.
"""
service_id = extract_attr(service, keys=['ServiceID', 'ID'])
response = await self._api.get('/v1/agent/service/deregister', service_id)
return response.status == 200 |
def root_open(filename, mode=''):
"""
Open a ROOT file via ROOT's static ROOT.TFile.Open [1] function and return
an asrootpy'd File.
Parameters
----------
filename : string
The absolute or relative path to the ROOT file.
mode : string, optional (default='')
Mode indicating how the file is to be opened. This can be either one
of the options supported by ROOT.TFile.Open [2], or one of `a`, `a+`,
`r`, `r+`, `w` or `w+`, with meanings as for the built-in `open()`
function [3].
Returns
-------
root_file : File
an instance of rootpy's File subclass of ROOT's TFile.
References
----------
.. [1] http://root.cern.ch/root/html/TFile.html#TFile:Open
.. [2] http://root.cern.ch/root/html/TFile.html#TFile:TFile@2
.. [3] https://docs.python.org/2/library/functions.html#open
"""
mode_map = {'a': 'UPDATE',
'a+': 'UPDATE',
'r': 'READ',
'r+': 'UPDATE',
'w': 'RECREATE',
'w+': 'RECREATE'}
if mode in mode_map:
mode = mode_map[mode]
filename = expand_path(filename)
prev_dir = ROOT.gDirectory
root_file = ROOT.R.TFile.Open(filename, mode)
if not root_file:
raise IOError("could not open file: '{0}'".format(filename))
root_file.__class__ = File
root_file._path = filename
root_file._parent = root_file
root_file._prev_dir = prev_dir
# give Python ownership of the TFile so we can delete it
ROOT.SetOwnership(root_file, True)
return root_file | def function[root_open, parameter[filename, mode]]:
constant[
Open a ROOT file via ROOT's static ROOT.TFile.Open [1] function and return
an asrootpy'd File.
Parameters
----------
filename : string
The absolute or relative path to the ROOT file.
mode : string, optional (default='')
Mode indicating how the file is to be opened. This can be either one
of the options supported by ROOT.TFile.Open [2], or one of `a`, `a+`,
`r`, `r+`, `w` or `w+`, with meanings as for the built-in `open()`
function [3].
Returns
-------
root_file : File
an instance of rootpy's File subclass of ROOT's TFile.
References
----------
.. [1] http://root.cern.ch/root/html/TFile.html#TFile:Open
.. [2] http://root.cern.ch/root/html/TFile.html#TFile:TFile@2
.. [3] https://docs.python.org/2/library/functions.html#open
]
variable[mode_map] assign[=] dictionary[[<ast.Constant object at 0x7da1b1193dc0>, <ast.Constant object at 0x7da1b11916c0>, <ast.Constant object at 0x7da1b11925f0>, <ast.Constant object at 0x7da1b11927a0>, <ast.Constant object at 0x7da1b11bd3f0>, <ast.Constant object at 0x7da1b11bf040>], [<ast.Constant object at 0x7da1b11bf5e0>, <ast.Constant object at 0x7da1b11bdc30>, <ast.Constant object at 0x7da1b11bc0d0>, <ast.Constant object at 0x7da1b11bd720>, <ast.Constant object at 0x7da1b11bf400>, <ast.Constant object at 0x7da1b11be200>]]
if compare[name[mode] in name[mode_map]] begin[:]
variable[mode] assign[=] call[name[mode_map]][name[mode]]
variable[filename] assign[=] call[name[expand_path], parameter[name[filename]]]
variable[prev_dir] assign[=] name[ROOT].gDirectory
variable[root_file] assign[=] call[name[ROOT].R.TFile.Open, parameter[name[filename], name[mode]]]
if <ast.UnaryOp object at 0x7da1b11bfeb0> begin[:]
<ast.Raise object at 0x7da1b11bd210>
name[root_file].__class__ assign[=] name[File]
name[root_file]._path assign[=] name[filename]
name[root_file]._parent assign[=] name[root_file]
name[root_file]._prev_dir assign[=] name[prev_dir]
call[name[ROOT].SetOwnership, parameter[name[root_file], constant[True]]]
return[name[root_file]] | keyword[def] identifier[root_open] ( identifier[filename] , identifier[mode] = literal[string] ):
literal[string]
identifier[mode_map] ={ literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] }
keyword[if] identifier[mode] keyword[in] identifier[mode_map] :
identifier[mode] = identifier[mode_map] [ identifier[mode] ]
identifier[filename] = identifier[expand_path] ( identifier[filename] )
identifier[prev_dir] = identifier[ROOT] . identifier[gDirectory]
identifier[root_file] = identifier[ROOT] . identifier[R] . identifier[TFile] . identifier[Open] ( identifier[filename] , identifier[mode] )
keyword[if] keyword[not] identifier[root_file] :
keyword[raise] identifier[IOError] ( literal[string] . identifier[format] ( identifier[filename] ))
identifier[root_file] . identifier[__class__] = identifier[File]
identifier[root_file] . identifier[_path] = identifier[filename]
identifier[root_file] . identifier[_parent] = identifier[root_file]
identifier[root_file] . identifier[_prev_dir] = identifier[prev_dir]
identifier[ROOT] . identifier[SetOwnership] ( identifier[root_file] , keyword[True] )
keyword[return] identifier[root_file] | def root_open(filename, mode=''):
"""
Open a ROOT file via ROOT's static ROOT.TFile.Open [1] function and return
an asrootpy'd File.
Parameters
----------
filename : string
The absolute or relative path to the ROOT file.
mode : string, optional (default='')
Mode indicating how the file is to be opened. This can be either one
of the options supported by ROOT.TFile.Open [2], or one of `a`, `a+`,
`r`, `r+`, `w` or `w+`, with meanings as for the built-in `open()`
function [3].
Returns
-------
root_file : File
an instance of rootpy's File subclass of ROOT's TFile.
References
----------
.. [1] http://root.cern.ch/root/html/TFile.html#TFile:Open
.. [2] http://root.cern.ch/root/html/TFile.html#TFile:TFile@2
.. [3] https://docs.python.org/2/library/functions.html#open
"""
mode_map = {'a': 'UPDATE', 'a+': 'UPDATE', 'r': 'READ', 'r+': 'UPDATE', 'w': 'RECREATE', 'w+': 'RECREATE'}
if mode in mode_map:
mode = mode_map[mode] # depends on [control=['if'], data=['mode', 'mode_map']]
filename = expand_path(filename)
prev_dir = ROOT.gDirectory
root_file = ROOT.R.TFile.Open(filename, mode)
if not root_file:
raise IOError("could not open file: '{0}'".format(filename)) # depends on [control=['if'], data=[]]
root_file.__class__ = File
root_file._path = filename
root_file._parent = root_file
root_file._prev_dir = prev_dir
# give Python ownership of the TFile so we can delete it
ROOT.SetOwnership(root_file, True)
return root_file |
def from_yang(self, text: str) -> ScalarValue:
"""Parse value specified in a YANG module.
Args:
text: String representation of the value.
Raises:
InvalidArgument: If the receiver type cannot parse the text.
"""
res = self.parse_value(text)
if res is None:
raise InvalidArgument(text)
return res | def function[from_yang, parameter[self, text]]:
constant[Parse value specified in a YANG module.
Args:
text: String representation of the value.
Raises:
InvalidArgument: If the receiver type cannot parse the text.
]
variable[res] assign[=] call[name[self].parse_value, parameter[name[text]]]
if compare[name[res] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b05394b0>
return[name[res]] | keyword[def] identifier[from_yang] ( identifier[self] , identifier[text] : identifier[str] )-> identifier[ScalarValue] :
literal[string]
identifier[res] = identifier[self] . identifier[parse_value] ( identifier[text] )
keyword[if] identifier[res] keyword[is] keyword[None] :
keyword[raise] identifier[InvalidArgument] ( identifier[text] )
keyword[return] identifier[res] | def from_yang(self, text: str) -> ScalarValue:
"""Parse value specified in a YANG module.
Args:
text: String representation of the value.
Raises:
InvalidArgument: If the receiver type cannot parse the text.
"""
res = self.parse_value(text)
if res is None:
raise InvalidArgument(text) # depends on [control=['if'], data=[]]
return res |
def construct_txt_file(self):
"""Construct the header of the txt file"""
textlines = ['Prediction of noncovalent interactions for PDB structure %s' % self.mol.pymol_name.upper(), ]
textlines.append("=" * len(textlines[0]))
textlines.append('Created on %s using PLIP v%s\n' % (time.strftime("%Y/%m/%d"), __version__))
textlines.append('If you are using PLIP in your work, please cite:')
textlines.append('Salentin,S. et al. PLIP: fully automated protein-ligand interaction profiler.')
textlines.append('Nucl. Acids Res. (1 July 2015) 43 (W1): W443-W447. doi: 10.1093/nar/gkv315\n')
if len(self.excluded) != 0:
textlines.append('Excluded molecules as ligands: %s\n' % ','.join([lig for lig in self.excluded]))
if config.DNARECEPTOR:
textlines.append('DNA/RNA in structure was chosen as the receptor part.\n')
return textlines | def function[construct_txt_file, parameter[self]]:
constant[Construct the header of the txt file]
variable[textlines] assign[=] list[[<ast.BinOp object at 0x7da18f00d150>]]
call[name[textlines].append, parameter[binary_operation[constant[=] * call[name[len], parameter[call[name[textlines]][constant[0]]]]]]]
call[name[textlines].append, parameter[binary_operation[constant[Created on %s using PLIP v%s
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da18f00c580>, <ast.Name object at 0x7da18f00e5c0>]]]]]
call[name[textlines].append, parameter[constant[If you are using PLIP in your work, please cite:]]]
call[name[textlines].append, parameter[constant[Salentin,S. et al. PLIP: fully automated protein-ligand interaction profiler.]]]
call[name[textlines].append, parameter[constant[Nucl. Acids Res. (1 July 2015) 43 (W1): W443-W447. doi: 10.1093/nar/gkv315
]]]
if compare[call[name[len], parameter[name[self].excluded]] not_equal[!=] constant[0]] begin[:]
call[name[textlines].append, parameter[binary_operation[constant[Excluded molecules as ligands: %s
] <ast.Mod object at 0x7da2590d6920> call[constant[,].join, parameter[<ast.ListComp object at 0x7da18f00fe50>]]]]]
if name[config].DNARECEPTOR begin[:]
call[name[textlines].append, parameter[constant[DNA/RNA in structure was chosen as the receptor part.
]]]
return[name[textlines]] | keyword[def] identifier[construct_txt_file] ( identifier[self] ):
literal[string]
identifier[textlines] =[ literal[string] % identifier[self] . identifier[mol] . identifier[pymol_name] . identifier[upper] (),]
identifier[textlines] . identifier[append] ( literal[string] * identifier[len] ( identifier[textlines] [ literal[int] ]))
identifier[textlines] . identifier[append] ( literal[string] %( identifier[time] . identifier[strftime] ( literal[string] ), identifier[__version__] ))
identifier[textlines] . identifier[append] ( literal[string] )
identifier[textlines] . identifier[append] ( literal[string] )
identifier[textlines] . identifier[append] ( literal[string] )
keyword[if] identifier[len] ( identifier[self] . identifier[excluded] )!= literal[int] :
identifier[textlines] . identifier[append] ( literal[string] % literal[string] . identifier[join] ([ identifier[lig] keyword[for] identifier[lig] keyword[in] identifier[self] . identifier[excluded] ]))
keyword[if] identifier[config] . identifier[DNARECEPTOR] :
identifier[textlines] . identifier[append] ( literal[string] )
keyword[return] identifier[textlines] | def construct_txt_file(self):
"""Construct the header of the txt file"""
textlines = ['Prediction of noncovalent interactions for PDB structure %s' % self.mol.pymol_name.upper()]
textlines.append('=' * len(textlines[0]))
textlines.append('Created on %s using PLIP v%s\n' % (time.strftime('%Y/%m/%d'), __version__))
textlines.append('If you are using PLIP in your work, please cite:')
textlines.append('Salentin,S. et al. PLIP: fully automated protein-ligand interaction profiler.')
textlines.append('Nucl. Acids Res. (1 July 2015) 43 (W1): W443-W447. doi: 10.1093/nar/gkv315\n')
if len(self.excluded) != 0:
textlines.append('Excluded molecules as ligands: %s\n' % ','.join([lig for lig in self.excluded])) # depends on [control=['if'], data=[]]
if config.DNARECEPTOR:
textlines.append('DNA/RNA in structure was chosen as the receptor part.\n') # depends on [control=['if'], data=[]]
return textlines |
def _reset_seaborn(gallery_conf, fname):
"""Reset seaborn."""
# Horrible code to 'unload' seaborn, so that it resets
# its default when is load
# Python does not support unloading of modules
# https://bugs.python.org/issue9072
for module in list(sys.modules.keys()):
if 'seaborn' in module:
del sys.modules[module] | def function[_reset_seaborn, parameter[gallery_conf, fname]]:
constant[Reset seaborn.]
for taget[name[module]] in starred[call[name[list], parameter[call[name[sys].modules.keys, parameter[]]]]] begin[:]
if compare[constant[seaborn] in name[module]] begin[:]
<ast.Delete object at 0x7da18eb54400> | keyword[def] identifier[_reset_seaborn] ( identifier[gallery_conf] , identifier[fname] ):
literal[string]
keyword[for] identifier[module] keyword[in] identifier[list] ( identifier[sys] . identifier[modules] . identifier[keys] ()):
keyword[if] literal[string] keyword[in] identifier[module] :
keyword[del] identifier[sys] . identifier[modules] [ identifier[module] ] | def _reset_seaborn(gallery_conf, fname):
"""Reset seaborn."""
# Horrible code to 'unload' seaborn, so that it resets
# its default when is load
# Python does not support unloading of modules
# https://bugs.python.org/issue9072
for module in list(sys.modules.keys()):
if 'seaborn' in module:
del sys.modules[module] # depends on [control=['if'], data=['module']] # depends on [control=['for'], data=['module']] |
def imdecode(self, s):
"""Decodes a string or byte string to an NDArray.
See mx.img.imdecode for more details."""
def locate():
"""Locate the image file/index if decode fails."""
if self.seq is not None:
idx = self.seq[(self.cur % self.num_image) - 1]
else:
idx = (self.cur % self.num_image) - 1
if self.imglist is not None:
_, fname = self.imglist[idx]
msg = "filename: {}".format(fname)
else:
msg = "index: {}".format(idx)
return "Broken image " + msg
try:
img = imdecode(s)
except Exception as e:
raise RuntimeError("{}, {}".format(locate(), e))
return img | def function[imdecode, parameter[self, s]]:
constant[Decodes a string or byte string to an NDArray.
See mx.img.imdecode for more details.]
def function[locate, parameter[]]:
constant[Locate the image file/index if decode fails.]
if compare[name[self].seq is_not constant[None]] begin[:]
variable[idx] assign[=] call[name[self].seq][binary_operation[binary_operation[name[self].cur <ast.Mod object at 0x7da2590d6920> name[self].num_image] - constant[1]]]
if compare[name[self].imglist is_not constant[None]] begin[:]
<ast.Tuple object at 0x7da20c76cbe0> assign[=] call[name[self].imglist][name[idx]]
variable[msg] assign[=] call[constant[filename: {}].format, parameter[name[fname]]]
return[binary_operation[constant[Broken image ] + name[msg]]]
<ast.Try object at 0x7da18fe930d0>
return[name[img]] | keyword[def] identifier[imdecode] ( identifier[self] , identifier[s] ):
literal[string]
keyword[def] identifier[locate] ():
literal[string]
keyword[if] identifier[self] . identifier[seq] keyword[is] keyword[not] keyword[None] :
identifier[idx] = identifier[self] . identifier[seq] [( identifier[self] . identifier[cur] % identifier[self] . identifier[num_image] )- literal[int] ]
keyword[else] :
identifier[idx] =( identifier[self] . identifier[cur] % identifier[self] . identifier[num_image] )- literal[int]
keyword[if] identifier[self] . identifier[imglist] keyword[is] keyword[not] keyword[None] :
identifier[_] , identifier[fname] = identifier[self] . identifier[imglist] [ identifier[idx] ]
identifier[msg] = literal[string] . identifier[format] ( identifier[fname] )
keyword[else] :
identifier[msg] = literal[string] . identifier[format] ( identifier[idx] )
keyword[return] literal[string] + identifier[msg]
keyword[try] :
identifier[img] = identifier[imdecode] ( identifier[s] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[raise] identifier[RuntimeError] ( literal[string] . identifier[format] ( identifier[locate] (), identifier[e] ))
keyword[return] identifier[img] | def imdecode(self, s):
"""Decodes a string or byte string to an NDArray.
See mx.img.imdecode for more details."""
def locate():
"""Locate the image file/index if decode fails."""
if self.seq is not None:
idx = self.seq[self.cur % self.num_image - 1] # depends on [control=['if'], data=[]]
else:
idx = self.cur % self.num_image - 1
if self.imglist is not None:
(_, fname) = self.imglist[idx]
msg = 'filename: {}'.format(fname) # depends on [control=['if'], data=[]]
else:
msg = 'index: {}'.format(idx)
return 'Broken image ' + msg
try:
img = imdecode(s) # depends on [control=['try'], data=[]]
except Exception as e:
raise RuntimeError('{}, {}'.format(locate(), e)) # depends on [control=['except'], data=['e']]
return img |
def get_month(self):
"""
Return the month from the database in the format expected by the URL.
"""
year = super(BuildableDayArchiveView, self).get_year()
month = super(BuildableDayArchiveView, self).get_month()
fmt = self.get_month_format()
dt = date(int(year), int(month), 1)
return dt.strftime(fmt) | def function[get_month, parameter[self]]:
constant[
Return the month from the database in the format expected by the URL.
]
variable[year] assign[=] call[call[name[super], parameter[name[BuildableDayArchiveView], name[self]]].get_year, parameter[]]
variable[month] assign[=] call[call[name[super], parameter[name[BuildableDayArchiveView], name[self]]].get_month, parameter[]]
variable[fmt] assign[=] call[name[self].get_month_format, parameter[]]
variable[dt] assign[=] call[name[date], parameter[call[name[int], parameter[name[year]]], call[name[int], parameter[name[month]]], constant[1]]]
return[call[name[dt].strftime, parameter[name[fmt]]]] | keyword[def] identifier[get_month] ( identifier[self] ):
literal[string]
identifier[year] = identifier[super] ( identifier[BuildableDayArchiveView] , identifier[self] ). identifier[get_year] ()
identifier[month] = identifier[super] ( identifier[BuildableDayArchiveView] , identifier[self] ). identifier[get_month] ()
identifier[fmt] = identifier[self] . identifier[get_month_format] ()
identifier[dt] = identifier[date] ( identifier[int] ( identifier[year] ), identifier[int] ( identifier[month] ), literal[int] )
keyword[return] identifier[dt] . identifier[strftime] ( identifier[fmt] ) | def get_month(self):
"""
Return the month from the database in the format expected by the URL.
"""
year = super(BuildableDayArchiveView, self).get_year()
month = super(BuildableDayArchiveView, self).get_month()
fmt = self.get_month_format()
dt = date(int(year), int(month), 1)
return dt.strftime(fmt) |
def construct_pipeline_block(env='',
generated=None,
previous_env='',
region='us-east-1',
settings=None,
pipeline_data=None,
region_subnets=None,
**kwargs):
"""Create the Pipeline JSON from template.
This handles the common repeatable patterns in a pipeline, such as
judgement, infrastructure, tagger and qe.
Note:
ASG Health Check type is overridden to `EC2` when deploying to **dev** or
using :ref:`eureka_enabled`.
Args:
env (str): Deploy environment name, e.g. dev, stage, prod.
generated (gogoutils.Generator): Gogo Application name generator.
kwargs (dict): Extra variables to pass to Pipeline Templates.
previous_env (str): The previous deploy environment to use as
Trigger.
region (str): AWS Region to deploy to.
settings (dict): Environment settings from configurations.
pipeline_data (dict): Pipeline settings from configurations
region_subnets (dict): Subnets for a Region, e.g.
{'us-west-2': ['us-west-2a', 'us-west-2b', 'us-west-2c']}.
Returns:
dict: Pipeline JSON template rendered with configurations.
"""
LOG.info('%s block for [%s].', env, region)
LOG.debug('%s info:\n%s', env, pformat(settings))
pipeline_type = pipeline_data['type']
if pipeline_type in EC2_PIPELINE_TYPES:
data = ec2_pipeline_setup(
generated=generated,
settings=settings,
env=env,
region=region,
pipeline_type=pipeline_type,
project=generated.project,
region_subnets=region_subnets,
)
else:
data = copy.deepcopy(settings)
data['app'].update({
'appname': generated.app_name(),
'repo_name': generated.repo,
'group_name': generated.project,
'environment': env,
'region': region,
'previous_env': previous_env,
'promote_restrict': pipeline_data['promote_restrict'],
'owner_email': pipeline_data['owner_email'],
'pipeline': pipeline_data,
})
LOG.debug('Block data:\n%s', pformat(data))
template_name = get_template_name(env, pipeline_type)
pipeline_json = get_template(template_file=template_name, data=data, formats=generated, **kwargs)
return pipeline_json | def function[construct_pipeline_block, parameter[env, generated, previous_env, region, settings, pipeline_data, region_subnets]]:
constant[Create the Pipeline JSON from template.
This handles the common repeatable patterns in a pipeline, such as
judgement, infrastructure, tagger and qe.
Note:
ASG Health Check type is overridden to `EC2` when deploying to **dev** or
using :ref:`eureka_enabled`.
Args:
env (str): Deploy environment name, e.g. dev, stage, prod.
generated (gogoutils.Generator): Gogo Application name generator.
kwargs (dict): Extra variables to pass to Pipeline Templates.
previous_env (str): The previous deploy environment to use as
Trigger.
region (str): AWS Region to deploy to.
settings (dict): Environment settings from configurations.
pipeline_data (dict): Pipeline settings from configurations
region_subnets (dict): Subnets for a Region, e.g.
{'us-west-2': ['us-west-2a', 'us-west-2b', 'us-west-2c']}.
Returns:
dict: Pipeline JSON template rendered with configurations.
]
call[name[LOG].info, parameter[constant[%s block for [%s].], name[env], name[region]]]
call[name[LOG].debug, parameter[constant[%s info:
%s], name[env], call[name[pformat], parameter[name[settings]]]]]
variable[pipeline_type] assign[=] call[name[pipeline_data]][constant[type]]
if compare[name[pipeline_type] in name[EC2_PIPELINE_TYPES]] begin[:]
variable[data] assign[=] call[name[ec2_pipeline_setup], parameter[]]
call[call[name[data]][constant[app]].update, parameter[dictionary[[<ast.Constant object at 0x7da20c6e55a0>, <ast.Constant object at 0x7da20c6e53f0>, <ast.Constant object at 0x7da20c6e7220>, <ast.Constant object at 0x7da20c6e4850>, <ast.Constant object at 0x7da20c6e7550>, <ast.Constant object at 0x7da20c6e7f70>, <ast.Constant object at 0x7da20c6e4fa0>, <ast.Constant object at 0x7da20c6e4a00>, <ast.Constant object at 0x7da20c6e5bd0>], [<ast.Call object at 0x7da20c6e74c0>, <ast.Attribute object at 0x7da20c6e7850>, <ast.Attribute object at 0x7da207f99090>, <ast.Name object at 0x7da207f989d0>, <ast.Name object at 0x7da207f99b70>, <ast.Name object at 0x7da207f9a9b0>, <ast.Subscript object at 0x7da207f98910>, <ast.Subscript object at 0x7da207f9b640>, <ast.Name object at 0x7da207f9bc10>]]]]
call[name[LOG].debug, parameter[constant[Block data:
%s], call[name[pformat], parameter[name[data]]]]]
variable[template_name] assign[=] call[name[get_template_name], parameter[name[env], name[pipeline_type]]]
variable[pipeline_json] assign[=] call[name[get_template], parameter[]]
return[name[pipeline_json]] | keyword[def] identifier[construct_pipeline_block] ( identifier[env] = literal[string] ,
identifier[generated] = keyword[None] ,
identifier[previous_env] = literal[string] ,
identifier[region] = literal[string] ,
identifier[settings] = keyword[None] ,
identifier[pipeline_data] = keyword[None] ,
identifier[region_subnets] = keyword[None] ,
** identifier[kwargs] ):
literal[string]
identifier[LOG] . identifier[info] ( literal[string] , identifier[env] , identifier[region] )
identifier[LOG] . identifier[debug] ( literal[string] , identifier[env] , identifier[pformat] ( identifier[settings] ))
identifier[pipeline_type] = identifier[pipeline_data] [ literal[string] ]
keyword[if] identifier[pipeline_type] keyword[in] identifier[EC2_PIPELINE_TYPES] :
identifier[data] = identifier[ec2_pipeline_setup] (
identifier[generated] = identifier[generated] ,
identifier[settings] = identifier[settings] ,
identifier[env] = identifier[env] ,
identifier[region] = identifier[region] ,
identifier[pipeline_type] = identifier[pipeline_type] ,
identifier[project] = identifier[generated] . identifier[project] ,
identifier[region_subnets] = identifier[region_subnets] ,
)
keyword[else] :
identifier[data] = identifier[copy] . identifier[deepcopy] ( identifier[settings] )
identifier[data] [ literal[string] ]. identifier[update] ({
literal[string] : identifier[generated] . identifier[app_name] (),
literal[string] : identifier[generated] . identifier[repo] ,
literal[string] : identifier[generated] . identifier[project] ,
literal[string] : identifier[env] ,
literal[string] : identifier[region] ,
literal[string] : identifier[previous_env] ,
literal[string] : identifier[pipeline_data] [ literal[string] ],
literal[string] : identifier[pipeline_data] [ literal[string] ],
literal[string] : identifier[pipeline_data] ,
})
identifier[LOG] . identifier[debug] ( literal[string] , identifier[pformat] ( identifier[data] ))
identifier[template_name] = identifier[get_template_name] ( identifier[env] , identifier[pipeline_type] )
identifier[pipeline_json] = identifier[get_template] ( identifier[template_file] = identifier[template_name] , identifier[data] = identifier[data] , identifier[formats] = identifier[generated] ,** identifier[kwargs] )
keyword[return] identifier[pipeline_json] | def construct_pipeline_block(env='', generated=None, previous_env='', region='us-east-1', settings=None, pipeline_data=None, region_subnets=None, **kwargs):
"""Create the Pipeline JSON from template.
This handles the common repeatable patterns in a pipeline, such as
judgement, infrastructure, tagger and qe.
Note:
ASG Health Check type is overridden to `EC2` when deploying to **dev** or
using :ref:`eureka_enabled`.
Args:
env (str): Deploy environment name, e.g. dev, stage, prod.
generated (gogoutils.Generator): Gogo Application name generator.
kwargs (dict): Extra variables to pass to Pipeline Templates.
previous_env (str): The previous deploy environment to use as
Trigger.
region (str): AWS Region to deploy to.
settings (dict): Environment settings from configurations.
pipeline_data (dict): Pipeline settings from configurations
region_subnets (dict): Subnets for a Region, e.g.
{'us-west-2': ['us-west-2a', 'us-west-2b', 'us-west-2c']}.
Returns:
dict: Pipeline JSON template rendered with configurations.
"""
LOG.info('%s block for [%s].', env, region)
LOG.debug('%s info:\n%s', env, pformat(settings))
pipeline_type = pipeline_data['type']
if pipeline_type in EC2_PIPELINE_TYPES:
data = ec2_pipeline_setup(generated=generated, settings=settings, env=env, region=region, pipeline_type=pipeline_type, project=generated.project, region_subnets=region_subnets) # depends on [control=['if'], data=['pipeline_type']]
else:
data = copy.deepcopy(settings)
data['app'].update({'appname': generated.app_name(), 'repo_name': generated.repo, 'group_name': generated.project, 'environment': env, 'region': region, 'previous_env': previous_env, 'promote_restrict': pipeline_data['promote_restrict'], 'owner_email': pipeline_data['owner_email'], 'pipeline': pipeline_data})
LOG.debug('Block data:\n%s', pformat(data))
template_name = get_template_name(env, pipeline_type)
pipeline_json = get_template(template_file=template_name, data=data, formats=generated, **kwargs)
return pipeline_json |
def delete(self, alert_condition_nrql_id):
"""
This API endpoint allows you to delete an alert condition nrql
:type alert_condition_nrql_id: integer
:param alert_condition_nrql_id: Alert Condition ID
:rtype: dict
:return: The JSON response of the API
::
{
"nrql_condition": {
"type": "string",
"id": "integer",
"name": "string",
"runbook_url": "string",
"enabled": "boolean",
"expected_groups": "integer",
"ignore_overlap": "boolean",
"value_function": "string",
"terms": [
{
"duration": "string",
"operator": "string",
"priority": "string",
"threshold": "string",
"time_function": "string"
}
],
"nrql": {
"query": "string",
"since_value": "string"
}
}
}
"""
return self._delete(
url='{0}alerts_nrql_conditions/{1}.json'.format(self.URL, alert_condition_nrql_id),
headers=self.headers
) | def function[delete, parameter[self, alert_condition_nrql_id]]:
constant[
This API endpoint allows you to delete an alert condition nrql
:type alert_condition_nrql_id: integer
:param alert_condition_nrql_id: Alert Condition ID
:rtype: dict
:return: The JSON response of the API
::
{
"nrql_condition": {
"type": "string",
"id": "integer",
"name": "string",
"runbook_url": "string",
"enabled": "boolean",
"expected_groups": "integer",
"ignore_overlap": "boolean",
"value_function": "string",
"terms": [
{
"duration": "string",
"operator": "string",
"priority": "string",
"threshold": "string",
"time_function": "string"
}
],
"nrql": {
"query": "string",
"since_value": "string"
}
}
}
]
return[call[name[self]._delete, parameter[]]] | keyword[def] identifier[delete] ( identifier[self] , identifier[alert_condition_nrql_id] ):
literal[string]
keyword[return] identifier[self] . identifier[_delete] (
identifier[url] = literal[string] . identifier[format] ( identifier[self] . identifier[URL] , identifier[alert_condition_nrql_id] ),
identifier[headers] = identifier[self] . identifier[headers]
) | def delete(self, alert_condition_nrql_id):
"""
This API endpoint allows you to delete an alert condition nrql
:type alert_condition_nrql_id: integer
:param alert_condition_nrql_id: Alert Condition ID
:rtype: dict
:return: The JSON response of the API
::
{
"nrql_condition": {
"type": "string",
"id": "integer",
"name": "string",
"runbook_url": "string",
"enabled": "boolean",
"expected_groups": "integer",
"ignore_overlap": "boolean",
"value_function": "string",
"terms": [
{
"duration": "string",
"operator": "string",
"priority": "string",
"threshold": "string",
"time_function": "string"
}
],
"nrql": {
"query": "string",
"since_value": "string"
}
}
}
"""
return self._delete(url='{0}alerts_nrql_conditions/{1}.json'.format(self.URL, alert_condition_nrql_id), headers=self.headers) |
def is_descendant_of_catalog(self, id_, catalog_id):
"""Tests if an ``Id`` is a descendant of a catalog.
arg: id (osid.id.Id): an ``Id``
arg: catalog_id (osid.id.Id): the ``Id`` of a catalog
return: (boolean) - ``true`` if the ``id`` is a descendant of
the ``catalog_id,`` ``false`` otherwise
raise: NotFound - ``catalog_id`` not found
raise: NullArgument - ``catalog_id`` or ``id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` is not found return ``false``.
"""
# Implemented from template for
# osid.resource.BinHierarchySession.is_descendant_of_bin
if self._catalog_session is not None:
return self._catalog_session.is_descendant_of_catalog(id_=id_, catalog_id=catalog_id)
return self._hierarchy_session.is_descendant(id_=id_, descendant_id=catalog_id) | def function[is_descendant_of_catalog, parameter[self, id_, catalog_id]]:
constant[Tests if an ``Id`` is a descendant of a catalog.
arg: id (osid.id.Id): an ``Id``
arg: catalog_id (osid.id.Id): the ``Id`` of a catalog
return: (boolean) - ``true`` if the ``id`` is a descendant of
the ``catalog_id,`` ``false`` otherwise
raise: NotFound - ``catalog_id`` not found
raise: NullArgument - ``catalog_id`` or ``id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` is not found return ``false``.
]
if compare[name[self]._catalog_session is_not constant[None]] begin[:]
return[call[name[self]._catalog_session.is_descendant_of_catalog, parameter[]]]
return[call[name[self]._hierarchy_session.is_descendant, parameter[]]] | keyword[def] identifier[is_descendant_of_catalog] ( identifier[self] , identifier[id_] , identifier[catalog_id] ):
literal[string]
keyword[if] identifier[self] . identifier[_catalog_session] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[self] . identifier[_catalog_session] . identifier[is_descendant_of_catalog] ( identifier[id_] = identifier[id_] , identifier[catalog_id] = identifier[catalog_id] )
keyword[return] identifier[self] . identifier[_hierarchy_session] . identifier[is_descendant] ( identifier[id_] = identifier[id_] , identifier[descendant_id] = identifier[catalog_id] ) | def is_descendant_of_catalog(self, id_, catalog_id):
"""Tests if an ``Id`` is a descendant of a catalog.
arg: id (osid.id.Id): an ``Id``
arg: catalog_id (osid.id.Id): the ``Id`` of a catalog
return: (boolean) - ``true`` if the ``id`` is a descendant of
the ``catalog_id,`` ``false`` otherwise
raise: NotFound - ``catalog_id`` not found
raise: NullArgument - ``catalog_id`` or ``id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` is not found return ``false``.
"""
# Implemented from template for
# osid.resource.BinHierarchySession.is_descendant_of_bin
if self._catalog_session is not None:
return self._catalog_session.is_descendant_of_catalog(id_=id_, catalog_id=catalog_id) # depends on [control=['if'], data=[]]
return self._hierarchy_session.is_descendant(id_=id_, descendant_id=catalog_id) |
def download(self, temp_ver, store_metadata=True):
"""
Retrieve the given template version
Args:
temp_ver (TemplateVersion): template version to retrieve
store_metadata (bool): If set to ``False``, will not refresh the
local metadata with the retrieved one
Returns:
None
"""
dest = self._prefixed(temp_ver.name)
temp_dest = '%s.tmp' % dest
with utils.LockFile(dest + '.lock'):
# Image was downloaded while we were waiting
if os.path.exists(dest):
return
temp_ver.download(temp_dest)
if store_metadata:
with open('%s.metadata' % dest, 'w') as f:
utils.json_dump(temp_ver.get_metadata(), f)
sha1 = utils.get_hash(temp_dest)
if temp_ver.get_hash() != sha1:
raise RuntimeError(
'Image %s does not match the expected hash %s' % (
temp_ver.name,
sha1,
)
)
with open('%s.hash' % dest, 'w') as f:
f.write(sha1)
with log_utils.LogTask('Convert image', logger=LOGGER):
result = utils.run_command(
[
'qemu-img',
'convert',
'-O',
'raw',
temp_dest,
dest,
],
)
os.unlink(temp_dest)
if result:
raise RuntimeError(result.err) | def function[download, parameter[self, temp_ver, store_metadata]]:
constant[
Retrieve the given template version
Args:
temp_ver (TemplateVersion): template version to retrieve
store_metadata (bool): If set to ``False``, will not refresh the
local metadata with the retrieved one
Returns:
None
]
variable[dest] assign[=] call[name[self]._prefixed, parameter[name[temp_ver].name]]
variable[temp_dest] assign[=] binary_operation[constant[%s.tmp] <ast.Mod object at 0x7da2590d6920> name[dest]]
with call[name[utils].LockFile, parameter[binary_operation[name[dest] + constant[.lock]]]] begin[:]
if call[name[os].path.exists, parameter[name[dest]]] begin[:]
return[None]
call[name[temp_ver].download, parameter[name[temp_dest]]]
if name[store_metadata] begin[:]
with call[name[open], parameter[binary_operation[constant[%s.metadata] <ast.Mod object at 0x7da2590d6920> name[dest]], constant[w]]] begin[:]
call[name[utils].json_dump, parameter[call[name[temp_ver].get_metadata, parameter[]], name[f]]]
variable[sha1] assign[=] call[name[utils].get_hash, parameter[name[temp_dest]]]
if compare[call[name[temp_ver].get_hash, parameter[]] not_equal[!=] name[sha1]] begin[:]
<ast.Raise object at 0x7da18f00c430>
with call[name[open], parameter[binary_operation[constant[%s.hash] <ast.Mod object at 0x7da2590d6920> name[dest]], constant[w]]] begin[:]
call[name[f].write, parameter[name[sha1]]]
with call[name[log_utils].LogTask, parameter[constant[Convert image]]] begin[:]
variable[result] assign[=] call[name[utils].run_command, parameter[list[[<ast.Constant object at 0x7da18f00ef50>, <ast.Constant object at 0x7da18f00d720>, <ast.Constant object at 0x7da18f00f970>, <ast.Constant object at 0x7da18f00cca0>, <ast.Name object at 0x7da18f00dc60>, <ast.Name object at 0x7da18f00ec50>]]]]
call[name[os].unlink, parameter[name[temp_dest]]]
if name[result] begin[:]
<ast.Raise object at 0x7da18f00da50> | keyword[def] identifier[download] ( identifier[self] , identifier[temp_ver] , identifier[store_metadata] = keyword[True] ):
literal[string]
identifier[dest] = identifier[self] . identifier[_prefixed] ( identifier[temp_ver] . identifier[name] )
identifier[temp_dest] = literal[string] % identifier[dest]
keyword[with] identifier[utils] . identifier[LockFile] ( identifier[dest] + literal[string] ):
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[dest] ):
keyword[return]
identifier[temp_ver] . identifier[download] ( identifier[temp_dest] )
keyword[if] identifier[store_metadata] :
keyword[with] identifier[open] ( literal[string] % identifier[dest] , literal[string] ) keyword[as] identifier[f] :
identifier[utils] . identifier[json_dump] ( identifier[temp_ver] . identifier[get_metadata] (), identifier[f] )
identifier[sha1] = identifier[utils] . identifier[get_hash] ( identifier[temp_dest] )
keyword[if] identifier[temp_ver] . identifier[get_hash] ()!= identifier[sha1] :
keyword[raise] identifier[RuntimeError] (
literal[string] %(
identifier[temp_ver] . identifier[name] ,
identifier[sha1] ,
)
)
keyword[with] identifier[open] ( literal[string] % identifier[dest] , literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( identifier[sha1] )
keyword[with] identifier[log_utils] . identifier[LogTask] ( literal[string] , identifier[logger] = identifier[LOGGER] ):
identifier[result] = identifier[utils] . identifier[run_command] (
[
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
identifier[temp_dest] ,
identifier[dest] ,
],
)
identifier[os] . identifier[unlink] ( identifier[temp_dest] )
keyword[if] identifier[result] :
keyword[raise] identifier[RuntimeError] ( identifier[result] . identifier[err] ) | def download(self, temp_ver, store_metadata=True):
"""
Retrieve the given template version
Args:
temp_ver (TemplateVersion): template version to retrieve
store_metadata (bool): If set to ``False``, will not refresh the
local metadata with the retrieved one
Returns:
None
"""
dest = self._prefixed(temp_ver.name)
temp_dest = '%s.tmp' % dest
with utils.LockFile(dest + '.lock'):
# Image was downloaded while we were waiting
if os.path.exists(dest):
return # depends on [control=['if'], data=[]]
temp_ver.download(temp_dest)
if store_metadata:
with open('%s.metadata' % dest, 'w') as f:
utils.json_dump(temp_ver.get_metadata(), f) # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]]
sha1 = utils.get_hash(temp_dest)
if temp_ver.get_hash() != sha1:
raise RuntimeError('Image %s does not match the expected hash %s' % (temp_ver.name, sha1)) # depends on [control=['if'], data=['sha1']]
with open('%s.hash' % dest, 'w') as f:
f.write(sha1) # depends on [control=['with'], data=['f']]
with log_utils.LogTask('Convert image', logger=LOGGER):
result = utils.run_command(['qemu-img', 'convert', '-O', 'raw', temp_dest, dest])
os.unlink(temp_dest)
if result:
raise RuntimeError(result.err) # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]] # depends on [control=['with'], data=[]] |
def simxCreateDummy(clientID, size, color, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
handle = ct.c_int()
if color != None:
c_color = (ct.c_ubyte*12)(*color)
else:
c_color = None
return c_CreateDummy(clientID, size, c_color, ct.byref(handle), operationMode), handle.value | def function[simxCreateDummy, parameter[clientID, size, color, operationMode]]:
constant[
Please have a look at the function description/documentation in the V-REP user manual
]
variable[handle] assign[=] call[name[ct].c_int, parameter[]]
if compare[name[color] not_equal[!=] constant[None]] begin[:]
variable[c_color] assign[=] call[binary_operation[name[ct].c_ubyte * constant[12]], parameter[<ast.Starred object at 0x7da18f58e380>]]
return[tuple[[<ast.Call object at 0x7da18f58eb00>, <ast.Attribute object at 0x7da18f58c490>]]] | keyword[def] identifier[simxCreateDummy] ( identifier[clientID] , identifier[size] , identifier[color] , identifier[operationMode] ):
literal[string]
identifier[handle] = identifier[ct] . identifier[c_int] ()
keyword[if] identifier[color] != keyword[None] :
identifier[c_color] =( identifier[ct] . identifier[c_ubyte] * literal[int] )(* identifier[color] )
keyword[else] :
identifier[c_color] = keyword[None]
keyword[return] identifier[c_CreateDummy] ( identifier[clientID] , identifier[size] , identifier[c_color] , identifier[ct] . identifier[byref] ( identifier[handle] ), identifier[operationMode] ), identifier[handle] . identifier[value] | def simxCreateDummy(clientID, size, color, operationMode):
"""
Please have a look at the function description/documentation in the V-REP user manual
"""
handle = ct.c_int()
if color != None:
c_color = (ct.c_ubyte * 12)(*color) # depends on [control=['if'], data=['color']]
else:
c_color = None
return (c_CreateDummy(clientID, size, c_color, ct.byref(handle), operationMode), handle.value) |
def _mod_run_check(cmd_kwargs, onlyif, unless):
'''
Execute the onlyif and unless logic.
Return a result dict if:
* onlyif failed (onlyif != 0)
* unless succeeded (unless == 0)
else return True
'''
if onlyif:
if __salt__['cmd.retcode'](onlyif, **cmd_kwargs) != 0:
return {'comment': 'onlyif condition is false',
'skip_watch': True,
'result': True}
if unless:
if __salt__['cmd.retcode'](unless, **cmd_kwargs) == 0:
return {'comment': 'unless condition is true',
'skip_watch': True,
'result': True}
# No reason to stop, return True
return True | def function[_mod_run_check, parameter[cmd_kwargs, onlyif, unless]]:
constant[
Execute the onlyif and unless logic.
Return a result dict if:
* onlyif failed (onlyif != 0)
* unless succeeded (unless == 0)
else return True
]
if name[onlyif] begin[:]
if compare[call[call[name[__salt__]][constant[cmd.retcode]], parameter[name[onlyif]]] not_equal[!=] constant[0]] begin[:]
return[dictionary[[<ast.Constant object at 0x7da1b21a3910>, <ast.Constant object at 0x7da1b21a2890>, <ast.Constant object at 0x7da1b21a1ba0>], [<ast.Constant object at 0x7da1b21a3a00>, <ast.Constant object at 0x7da1b21a13f0>, <ast.Constant object at 0x7da1b21a3b20>]]]
if name[unless] begin[:]
if compare[call[call[name[__salt__]][constant[cmd.retcode]], parameter[name[unless]]] equal[==] constant[0]] begin[:]
return[dictionary[[<ast.Constant object at 0x7da1b21a3550>, <ast.Constant object at 0x7da1b21a09d0>, <ast.Constant object at 0x7da1b21a00d0>], [<ast.Constant object at 0x7da1b21a0880>, <ast.Constant object at 0x7da1b21a21a0>, <ast.Constant object at 0x7da1b21a0f70>]]]
return[constant[True]] | keyword[def] identifier[_mod_run_check] ( identifier[cmd_kwargs] , identifier[onlyif] , identifier[unless] ):
literal[string]
keyword[if] identifier[onlyif] :
keyword[if] identifier[__salt__] [ literal[string] ]( identifier[onlyif] ,** identifier[cmd_kwargs] )!= literal[int] :
keyword[return] { literal[string] : literal[string] ,
literal[string] : keyword[True] ,
literal[string] : keyword[True] }
keyword[if] identifier[unless] :
keyword[if] identifier[__salt__] [ literal[string] ]( identifier[unless] ,** identifier[cmd_kwargs] )== literal[int] :
keyword[return] { literal[string] : literal[string] ,
literal[string] : keyword[True] ,
literal[string] : keyword[True] }
keyword[return] keyword[True] | def _mod_run_check(cmd_kwargs, onlyif, unless):
"""
Execute the onlyif and unless logic.
Return a result dict if:
* onlyif failed (onlyif != 0)
* unless succeeded (unless == 0)
else return True
"""
if onlyif:
if __salt__['cmd.retcode'](onlyif, **cmd_kwargs) != 0:
return {'comment': 'onlyif condition is false', 'skip_watch': True, 'result': True} # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if unless:
if __salt__['cmd.retcode'](unless, **cmd_kwargs) == 0:
return {'comment': 'unless condition is true', 'skip_watch': True, 'result': True} # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# No reason to stop, return True
return True |
def close(self):
""" Prompt the objects to output pdf code, and save to file. """
self.document._set_page_numbers()
# Places header, pages, page content first.
self._put_header()
self._put_pages()
self._put_resources()
# Information object
self._put_information()
# Catalog object
self._put_catalog()
# Cross-reference object
#self._put_cross_reference()
# Trailer object
self._put_trailer()
if hasattr(self.destination, "write"):
output = self._output_to_io()
elif self.destination == 'string':
output = self._output_to_string()
else:
self._output_to_file()
output = None
return output | def function[close, parameter[self]]:
constant[ Prompt the objects to output pdf code, and save to file. ]
call[name[self].document._set_page_numbers, parameter[]]
call[name[self]._put_header, parameter[]]
call[name[self]._put_pages, parameter[]]
call[name[self]._put_resources, parameter[]]
call[name[self]._put_information, parameter[]]
call[name[self]._put_catalog, parameter[]]
call[name[self]._put_trailer, parameter[]]
if call[name[hasattr], parameter[name[self].destination, constant[write]]] begin[:]
variable[output] assign[=] call[name[self]._output_to_io, parameter[]]
return[name[output]] | keyword[def] identifier[close] ( identifier[self] ):
literal[string]
identifier[self] . identifier[document] . identifier[_set_page_numbers] ()
identifier[self] . identifier[_put_header] ()
identifier[self] . identifier[_put_pages] ()
identifier[self] . identifier[_put_resources] ()
identifier[self] . identifier[_put_information] ()
identifier[self] . identifier[_put_catalog] ()
identifier[self] . identifier[_put_trailer] ()
keyword[if] identifier[hasattr] ( identifier[self] . identifier[destination] , literal[string] ):
identifier[output] = identifier[self] . identifier[_output_to_io] ()
keyword[elif] identifier[self] . identifier[destination] == literal[string] :
identifier[output] = identifier[self] . identifier[_output_to_string] ()
keyword[else] :
identifier[self] . identifier[_output_to_file] ()
identifier[output] = keyword[None]
keyword[return] identifier[output] | def close(self):
""" Prompt the objects to output pdf code, and save to file. """
self.document._set_page_numbers() # Places header, pages, page content first.
self._put_header()
self._put_pages()
self._put_resources() # Information object
self._put_information() # Catalog object
self._put_catalog() # Cross-reference object
#self._put_cross_reference()
# Trailer object
self._put_trailer()
if hasattr(self.destination, 'write'):
output = self._output_to_io() # depends on [control=['if'], data=[]]
elif self.destination == 'string':
output = self._output_to_string() # depends on [control=['if'], data=[]]
else:
self._output_to_file()
output = None
return output |
def build_vocab(self, *args, **kwargs):
"""Construct the Vocab object for this field from one or more datasets.
Arguments:
Positional arguments: Dataset objects or other iterable data
sources from which to construct the Vocab object that
represents the set of possible values for this field. If
a Dataset object is provided, all columns corresponding
to this field are used; individual columns can also be
provided directly.
Remaining keyword arguments: Passed to the constructor of Vocab.
"""
counter = Counter()
sources = []
for arg in args:
if isinstance(arg, Dataset):
sources += [getattr(arg, name) for name, field in
arg.fields.items() if field is self]
else:
sources.append(arg)
for data in sources:
for x in data:
if not self.sequential:
x = [x]
try:
counter.update(x)
except TypeError:
counter.update(chain.from_iterable(x))
specials = list(OrderedDict.fromkeys(
tok for tok in [self.unk_token, self.pad_token, self.init_token,
self.eos_token] + kwargs.pop('specials', [])
if tok is not None))
self.vocab = self.vocab_cls(counter, specials=specials, **kwargs) | def function[build_vocab, parameter[self]]:
constant[Construct the Vocab object for this field from one or more datasets.
Arguments:
Positional arguments: Dataset objects or other iterable data
sources from which to construct the Vocab object that
represents the set of possible values for this field. If
a Dataset object is provided, all columns corresponding
to this field are used; individual columns can also be
provided directly.
Remaining keyword arguments: Passed to the constructor of Vocab.
]
variable[counter] assign[=] call[name[Counter], parameter[]]
variable[sources] assign[=] list[[]]
for taget[name[arg]] in starred[name[args]] begin[:]
if call[name[isinstance], parameter[name[arg], name[Dataset]]] begin[:]
<ast.AugAssign object at 0x7da1b2178940>
for taget[name[data]] in starred[name[sources]] begin[:]
for taget[name[x]] in starred[name[data]] begin[:]
if <ast.UnaryOp object at 0x7da18dc9a3e0> begin[:]
variable[x] assign[=] list[[<ast.Name object at 0x7da18dc9bdc0>]]
<ast.Try object at 0x7da18dc994b0>
variable[specials] assign[=] call[name[list], parameter[call[name[OrderedDict].fromkeys, parameter[<ast.GeneratorExp object at 0x7da18dc9b7f0>]]]]
name[self].vocab assign[=] call[name[self].vocab_cls, parameter[name[counter]]] | keyword[def] identifier[build_vocab] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[counter] = identifier[Counter] ()
identifier[sources] =[]
keyword[for] identifier[arg] keyword[in] identifier[args] :
keyword[if] identifier[isinstance] ( identifier[arg] , identifier[Dataset] ):
identifier[sources] +=[ identifier[getattr] ( identifier[arg] , identifier[name] ) keyword[for] identifier[name] , identifier[field] keyword[in]
identifier[arg] . identifier[fields] . identifier[items] () keyword[if] identifier[field] keyword[is] identifier[self] ]
keyword[else] :
identifier[sources] . identifier[append] ( identifier[arg] )
keyword[for] identifier[data] keyword[in] identifier[sources] :
keyword[for] identifier[x] keyword[in] identifier[data] :
keyword[if] keyword[not] identifier[self] . identifier[sequential] :
identifier[x] =[ identifier[x] ]
keyword[try] :
identifier[counter] . identifier[update] ( identifier[x] )
keyword[except] identifier[TypeError] :
identifier[counter] . identifier[update] ( identifier[chain] . identifier[from_iterable] ( identifier[x] ))
identifier[specials] = identifier[list] ( identifier[OrderedDict] . identifier[fromkeys] (
identifier[tok] keyword[for] identifier[tok] keyword[in] [ identifier[self] . identifier[unk_token] , identifier[self] . identifier[pad_token] , identifier[self] . identifier[init_token] ,
identifier[self] . identifier[eos_token] ]+ identifier[kwargs] . identifier[pop] ( literal[string] ,[])
keyword[if] identifier[tok] keyword[is] keyword[not] keyword[None] ))
identifier[self] . identifier[vocab] = identifier[self] . identifier[vocab_cls] ( identifier[counter] , identifier[specials] = identifier[specials] ,** identifier[kwargs] ) | def build_vocab(self, *args, **kwargs):
"""Construct the Vocab object for this field from one or more datasets.
Arguments:
Positional arguments: Dataset objects or other iterable data
sources from which to construct the Vocab object that
represents the set of possible values for this field. If
a Dataset object is provided, all columns corresponding
to this field are used; individual columns can also be
provided directly.
Remaining keyword arguments: Passed to the constructor of Vocab.
"""
counter = Counter()
sources = []
for arg in args:
if isinstance(arg, Dataset):
sources += [getattr(arg, name) for (name, field) in arg.fields.items() if field is self] # depends on [control=['if'], data=[]]
else:
sources.append(arg) # depends on [control=['for'], data=['arg']]
for data in sources:
for x in data:
if not self.sequential:
x = [x] # depends on [control=['if'], data=[]]
try:
counter.update(x) # depends on [control=['try'], data=[]]
except TypeError:
counter.update(chain.from_iterable(x)) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['x']] # depends on [control=['for'], data=['data']]
specials = list(OrderedDict.fromkeys((tok for tok in [self.unk_token, self.pad_token, self.init_token, self.eos_token] + kwargs.pop('specials', []) if tok is not None)))
self.vocab = self.vocab_cls(counter, specials=specials, **kwargs) |
def get_one(self, schema, query=None, **kwargs):
"""
get one row from the db matching filters set in query
schema -- Schema()
query -- Query()
return -- dict -- the matching row
"""
ret = self._get_query(self._get_one, schema, query, **kwargs)
if not ret: ret = {}
return ret | def function[get_one, parameter[self, schema, query]]:
constant[
get one row from the db matching filters set in query
schema -- Schema()
query -- Query()
return -- dict -- the matching row
]
variable[ret] assign[=] call[name[self]._get_query, parameter[name[self]._get_one, name[schema], name[query]]]
if <ast.UnaryOp object at 0x7da18dc98370> begin[:]
variable[ret] assign[=] dictionary[[], []]
return[name[ret]] | keyword[def] identifier[get_one] ( identifier[self] , identifier[schema] , identifier[query] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[ret] = identifier[self] . identifier[_get_query] ( identifier[self] . identifier[_get_one] , identifier[schema] , identifier[query] ,** identifier[kwargs] )
keyword[if] keyword[not] identifier[ret] : identifier[ret] ={}
keyword[return] identifier[ret] | def get_one(self, schema, query=None, **kwargs):
"""
get one row from the db matching filters set in query
schema -- Schema()
query -- Query()
return -- dict -- the matching row
"""
ret = self._get_query(self._get_one, schema, query, **kwargs)
if not ret:
ret = {} # depends on [control=['if'], data=[]]
return ret |
def read_config(config_path):
"""read config_path and return options as dictionary"""
result = {}
with open(config_path, 'r') as fd:
for line in fd.readlines():
if '=' in line:
key, value = line.split('=', 1)
try:
result[key] = json.loads(value)
except ValueError:
result[key] = value.rstrip('\n')
return result | def function[read_config, parameter[config_path]]:
constant[read config_path and return options as dictionary]
variable[result] assign[=] dictionary[[], []]
with call[name[open], parameter[name[config_path], constant[r]]] begin[:]
for taget[name[line]] in starred[call[name[fd].readlines, parameter[]]] begin[:]
if compare[constant[=] in name[line]] begin[:]
<ast.Tuple object at 0x7da18c4ce9b0> assign[=] call[name[line].split, parameter[constant[=], constant[1]]]
<ast.Try object at 0x7da18c4ccee0>
return[name[result]] | keyword[def] identifier[read_config] ( identifier[config_path] ):
literal[string]
identifier[result] ={}
keyword[with] identifier[open] ( identifier[config_path] , literal[string] ) keyword[as] identifier[fd] :
keyword[for] identifier[line] keyword[in] identifier[fd] . identifier[readlines] ():
keyword[if] literal[string] keyword[in] identifier[line] :
identifier[key] , identifier[value] = identifier[line] . identifier[split] ( literal[string] , literal[int] )
keyword[try] :
identifier[result] [ identifier[key] ]= identifier[json] . identifier[loads] ( identifier[value] )
keyword[except] identifier[ValueError] :
identifier[result] [ identifier[key] ]= identifier[value] . identifier[rstrip] ( literal[string] )
keyword[return] identifier[result] | def read_config(config_path):
"""read config_path and return options as dictionary"""
result = {}
with open(config_path, 'r') as fd:
for line in fd.readlines():
if '=' in line:
(key, value) = line.split('=', 1)
try:
result[key] = json.loads(value) # depends on [control=['try'], data=[]]
except ValueError:
result[key] = value.rstrip('\n') # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['line']] # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['fd']]
return result |
def DEFINE_multi( # pylint: disable=g-bad-name,redefined-builtin
parser, serializer, name, default, help, flag_values=FLAGS,
module_name=None, **args):
"""Registers a generic MultiFlag that parses its args with a given parser.
Auxiliary function. Normal users should NOT use it directly.
Developers who need to create their own 'Parser' classes for options
which can appear multiple times can call this module function to
register their flags.
Args:
parser: ArgumentParser that is used to parse the flag arguments.
serializer: ArgumentSerializer that serializes the flag value.
name: A string, the flag name.
default: The default value of the flag.
help: A help string.
flag_values: FlagValues object with which the flag will be registered.
module_name: A string, the name of the Python module declaring this flag.
If not provided, it will be computed using the stack trace of this call.
**args: Dictionary with extra keyword args that are passed to the
Flag __init__.
"""
DEFINE_flag(MultiFlag(parser, serializer, name, default, help, **args),
flag_values, module_name) | def function[DEFINE_multi, parameter[parser, serializer, name, default, help, flag_values, module_name]]:
constant[Registers a generic MultiFlag that parses its args with a given parser.
Auxiliary function. Normal users should NOT use it directly.
Developers who need to create their own 'Parser' classes for options
which can appear multiple times can call this module function to
register their flags.
Args:
parser: ArgumentParser that is used to parse the flag arguments.
serializer: ArgumentSerializer that serializes the flag value.
name: A string, the flag name.
default: The default value of the flag.
help: A help string.
flag_values: FlagValues object with which the flag will be registered.
module_name: A string, the name of the Python module declaring this flag.
If not provided, it will be computed using the stack trace of this call.
**args: Dictionary with extra keyword args that are passed to the
Flag __init__.
]
call[name[DEFINE_flag], parameter[call[name[MultiFlag], parameter[name[parser], name[serializer], name[name], name[default], name[help]]], name[flag_values], name[module_name]]] | keyword[def] identifier[DEFINE_multi] (
identifier[parser] , identifier[serializer] , identifier[name] , identifier[default] , identifier[help] , identifier[flag_values] = identifier[FLAGS] ,
identifier[module_name] = keyword[None] ,** identifier[args] ):
literal[string]
identifier[DEFINE_flag] ( identifier[MultiFlag] ( identifier[parser] , identifier[serializer] , identifier[name] , identifier[default] , identifier[help] ,** identifier[args] ),
identifier[flag_values] , identifier[module_name] ) | def DEFINE_multi(parser, serializer, name, default, help, flag_values=FLAGS, module_name=None, **args): # pylint: disable=g-bad-name,redefined-builtin
"Registers a generic MultiFlag that parses its args with a given parser.\n\n Auxiliary function. Normal users should NOT use it directly.\n\n Developers who need to create their own 'Parser' classes for options\n which can appear multiple times can call this module function to\n register their flags.\n\n Args:\n parser: ArgumentParser that is used to parse the flag arguments.\n serializer: ArgumentSerializer that serializes the flag value.\n name: A string, the flag name.\n default: The default value of the flag.\n help: A help string.\n flag_values: FlagValues object with which the flag will be registered.\n module_name: A string, the name of the Python module declaring this flag.\n If not provided, it will be computed using the stack trace of this call.\n **args: Dictionary with extra keyword args that are passed to the\n Flag __init__.\n "
DEFINE_flag(MultiFlag(parser, serializer, name, default, help, **args), flag_values, module_name) |
def square_off(series, time_delta=None, transition_seconds=1):
"""Insert samples in regularly sampled data to produce stairsteps from ramps when plotted.
New samples are 1 second (1e9 ns) before each existing samples, to facilitate plotting and sorting
>>> square_off(pd.Series(range(3), index=pd.date_range('2014-01-01', periods=3, freq='15m')),
... time_delta=5.5) # doctest: +NORMALIZE_WHITESPACE
2014-01-31 00:00:00 0
2014-01-31 00:00:05.500000 0
2015-04-30 00:00:00 1
2015-04-30 00:00:05.500000 1
2016-07-31 00:00:00 2
2016-07-31 00:00:05.500000 2
dtype: int64
>>> square_off(pd.Series(range(2), index=pd.date_range('2014-01-01', periods=2, freq='15min')),
... transition_seconds=2.5) # doctest: +NORMALIZE_WHITESPACE
2014-01-01 00:00:00 0
2014-01-01 00:14:57.500000 0
2014-01-01 00:15:00 1
2014-01-01 00:29:57.500000 1
dtype: int64
"""
if time_delta:
# int, float means delta is in seconds (not years!)
if isinstance(time_delta, (int, float)):
time_delta = datetime.timedelta(0, time_delta)
new_times = series.index + time_delta
else:
diff = np.diff(series.index)
time_delta = np.append(diff, [diff[-1]])
new_times = series.index + time_delta
new_times = pd.DatetimeIndex(new_times) - datetime.timedelta(0, transition_seconds)
return pd.concat([series, pd.Series(series.values, index=new_times)]).sort_index() | def function[square_off, parameter[series, time_delta, transition_seconds]]:
constant[Insert samples in regularly sampled data to produce stairsteps from ramps when plotted.
New samples are 1 second (1e9 ns) before each existing samples, to facilitate plotting and sorting
>>> square_off(pd.Series(range(3), index=pd.date_range('2014-01-01', periods=3, freq='15m')),
... time_delta=5.5) # doctest: +NORMALIZE_WHITESPACE
2014-01-31 00:00:00 0
2014-01-31 00:00:05.500000 0
2015-04-30 00:00:00 1
2015-04-30 00:00:05.500000 1
2016-07-31 00:00:00 2
2016-07-31 00:00:05.500000 2
dtype: int64
>>> square_off(pd.Series(range(2), index=pd.date_range('2014-01-01', periods=2, freq='15min')),
... transition_seconds=2.5) # doctest: +NORMALIZE_WHITESPACE
2014-01-01 00:00:00 0
2014-01-01 00:14:57.500000 0
2014-01-01 00:15:00 1
2014-01-01 00:29:57.500000 1
dtype: int64
]
if name[time_delta] begin[:]
if call[name[isinstance], parameter[name[time_delta], tuple[[<ast.Name object at 0x7da1b168c820>, <ast.Name object at 0x7da1b168ff40>]]]] begin[:]
variable[time_delta] assign[=] call[name[datetime].timedelta, parameter[constant[0], name[time_delta]]]
variable[new_times] assign[=] binary_operation[name[series].index + name[time_delta]]
return[call[call[name[pd].concat, parameter[list[[<ast.Name object at 0x7da1b146e200>, <ast.Call object at 0x7da1b146e1d0>]]]].sort_index, parameter[]]] | keyword[def] identifier[square_off] ( identifier[series] , identifier[time_delta] = keyword[None] , identifier[transition_seconds] = literal[int] ):
literal[string]
keyword[if] identifier[time_delta] :
keyword[if] identifier[isinstance] ( identifier[time_delta] ,( identifier[int] , identifier[float] )):
identifier[time_delta] = identifier[datetime] . identifier[timedelta] ( literal[int] , identifier[time_delta] )
identifier[new_times] = identifier[series] . identifier[index] + identifier[time_delta]
keyword[else] :
identifier[diff] = identifier[np] . identifier[diff] ( identifier[series] . identifier[index] )
identifier[time_delta] = identifier[np] . identifier[append] ( identifier[diff] ,[ identifier[diff] [- literal[int] ]])
identifier[new_times] = identifier[series] . identifier[index] + identifier[time_delta]
identifier[new_times] = identifier[pd] . identifier[DatetimeIndex] ( identifier[new_times] )- identifier[datetime] . identifier[timedelta] ( literal[int] , identifier[transition_seconds] )
keyword[return] identifier[pd] . identifier[concat] ([ identifier[series] , identifier[pd] . identifier[Series] ( identifier[series] . identifier[values] , identifier[index] = identifier[new_times] )]). identifier[sort_index] () | def square_off(series, time_delta=None, transition_seconds=1):
"""Insert samples in regularly sampled data to produce stairsteps from ramps when plotted.
New samples are 1 second (1e9 ns) before each existing samples, to facilitate plotting and sorting
>>> square_off(pd.Series(range(3), index=pd.date_range('2014-01-01', periods=3, freq='15m')),
... time_delta=5.5) # doctest: +NORMALIZE_WHITESPACE
2014-01-31 00:00:00 0
2014-01-31 00:00:05.500000 0
2015-04-30 00:00:00 1
2015-04-30 00:00:05.500000 1
2016-07-31 00:00:00 2
2016-07-31 00:00:05.500000 2
dtype: int64
>>> square_off(pd.Series(range(2), index=pd.date_range('2014-01-01', periods=2, freq='15min')),
... transition_seconds=2.5) # doctest: +NORMALIZE_WHITESPACE
2014-01-01 00:00:00 0
2014-01-01 00:14:57.500000 0
2014-01-01 00:15:00 1
2014-01-01 00:29:57.500000 1
dtype: int64
"""
if time_delta:
# int, float means delta is in seconds (not years!)
if isinstance(time_delta, (int, float)):
time_delta = datetime.timedelta(0, time_delta) # depends on [control=['if'], data=[]]
new_times = series.index + time_delta # depends on [control=['if'], data=[]]
else:
diff = np.diff(series.index)
time_delta = np.append(diff, [diff[-1]])
new_times = series.index + time_delta
new_times = pd.DatetimeIndex(new_times) - datetime.timedelta(0, transition_seconds)
return pd.concat([series, pd.Series(series.values, index=new_times)]).sort_index() |
def run(self, *args):
"""Merge two identities.
When <from_uuid> or <to_uuid> are empty the command does not have
any effect. The same happens when both <from_uuid> and <to_uuid>
are the same unique identity.
"""
params = self.parser.parse_args(args)
from_uuid = params.from_uuid
to_uuid = params.to_uuid
code = self.merge(from_uuid, to_uuid)
return code | def function[run, parameter[self]]:
constant[Merge two identities.
When <from_uuid> or <to_uuid> are empty the command does not have
any effect. The same happens when both <from_uuid> and <to_uuid>
are the same unique identity.
]
variable[params] assign[=] call[name[self].parser.parse_args, parameter[name[args]]]
variable[from_uuid] assign[=] name[params].from_uuid
variable[to_uuid] assign[=] name[params].to_uuid
variable[code] assign[=] call[name[self].merge, parameter[name[from_uuid], name[to_uuid]]]
return[name[code]] | keyword[def] identifier[run] ( identifier[self] ,* identifier[args] ):
literal[string]
identifier[params] = identifier[self] . identifier[parser] . identifier[parse_args] ( identifier[args] )
identifier[from_uuid] = identifier[params] . identifier[from_uuid]
identifier[to_uuid] = identifier[params] . identifier[to_uuid]
identifier[code] = identifier[self] . identifier[merge] ( identifier[from_uuid] , identifier[to_uuid] )
keyword[return] identifier[code] | def run(self, *args):
"""Merge two identities.
When <from_uuid> or <to_uuid> are empty the command does not have
any effect. The same happens when both <from_uuid> and <to_uuid>
are the same unique identity.
"""
params = self.parser.parse_args(args)
from_uuid = params.from_uuid
to_uuid = params.to_uuid
code = self.merge(from_uuid, to_uuid)
return code |
def resize(widthWindow, heightWindow):
"""Setup 3D projection for window"""
glViewport(0, 0, widthWindow, heightWindow)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(70, 1.0*widthWindow/heightWindow, 0.001, 10000.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity() | def function[resize, parameter[widthWindow, heightWindow]]:
constant[Setup 3D projection for window]
call[name[glViewport], parameter[constant[0], constant[0], name[widthWindow], name[heightWindow]]]
call[name[glMatrixMode], parameter[name[GL_PROJECTION]]]
call[name[glLoadIdentity], parameter[]]
call[name[gluPerspective], parameter[constant[70], binary_operation[binary_operation[constant[1.0] * name[widthWindow]] / name[heightWindow]], constant[0.001], constant[10000.0]]]
call[name[glMatrixMode], parameter[name[GL_MODELVIEW]]]
call[name[glLoadIdentity], parameter[]] | keyword[def] identifier[resize] ( identifier[widthWindow] , identifier[heightWindow] ):
literal[string]
identifier[glViewport] ( literal[int] , literal[int] , identifier[widthWindow] , identifier[heightWindow] )
identifier[glMatrixMode] ( identifier[GL_PROJECTION] )
identifier[glLoadIdentity] ()
identifier[gluPerspective] ( literal[int] , literal[int] * identifier[widthWindow] / identifier[heightWindow] , literal[int] , literal[int] )
identifier[glMatrixMode] ( identifier[GL_MODELVIEW] )
identifier[glLoadIdentity] () | def resize(widthWindow, heightWindow):
"""Setup 3D projection for window"""
glViewport(0, 0, widthWindow, heightWindow)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(70, 1.0 * widthWindow / heightWindow, 0.001, 10000.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity() |
def __tokenize_segments(self):
"""
tokenizes every RS3 segment (i.e. an RST nucleus or satellite).
for each token, a node is added to the graph, as well as an edge from
the segment node to the token node. the token node IDs are also added
to ``self.tokens``.
"""
for seg_node_id in self.segments:
segment_toks = self.node[seg_node_id][self.ns+':text'].split()
for i, tok in enumerate(segment_toks):
tok_node_id = '{0}:{1}_{2}'.format(self.ns, seg_node_id, i)
self.add_node(tok_node_id, layers={self.ns, self.ns+':token'},
attr_dict={self.ns+':token': tok, 'label': tok})
self.tokens.append(tok_node_id)
self.add_edge(seg_node_id, tok_node_id,
layers={'rst', 'rst:token'},
edge_type=EdgeTypes.spanning_relation) | def function[__tokenize_segments, parameter[self]]:
constant[
tokenizes every RS3 segment (i.e. an RST nucleus or satellite).
for each token, a node is added to the graph, as well as an edge from
the segment node to the token node. the token node IDs are also added
to ``self.tokens``.
]
for taget[name[seg_node_id]] in starred[name[self].segments] begin[:]
variable[segment_toks] assign[=] call[call[call[name[self].node][name[seg_node_id]]][binary_operation[name[self].ns + constant[:text]]].split, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da2043445b0>, <ast.Name object at 0x7da204345390>]]] in starred[call[name[enumerate], parameter[name[segment_toks]]]] begin[:]
variable[tok_node_id] assign[=] call[constant[{0}:{1}_{2}].format, parameter[name[self].ns, name[seg_node_id], name[i]]]
call[name[self].add_node, parameter[name[tok_node_id]]]
call[name[self].tokens.append, parameter[name[tok_node_id]]]
call[name[self].add_edge, parameter[name[seg_node_id], name[tok_node_id]]] | keyword[def] identifier[__tokenize_segments] ( identifier[self] ):
literal[string]
keyword[for] identifier[seg_node_id] keyword[in] identifier[self] . identifier[segments] :
identifier[segment_toks] = identifier[self] . identifier[node] [ identifier[seg_node_id] ][ identifier[self] . identifier[ns] + literal[string] ]. identifier[split] ()
keyword[for] identifier[i] , identifier[tok] keyword[in] identifier[enumerate] ( identifier[segment_toks] ):
identifier[tok_node_id] = literal[string] . identifier[format] ( identifier[self] . identifier[ns] , identifier[seg_node_id] , identifier[i] )
identifier[self] . identifier[add_node] ( identifier[tok_node_id] , identifier[layers] ={ identifier[self] . identifier[ns] , identifier[self] . identifier[ns] + literal[string] },
identifier[attr_dict] ={ identifier[self] . identifier[ns] + literal[string] : identifier[tok] , literal[string] : identifier[tok] })
identifier[self] . identifier[tokens] . identifier[append] ( identifier[tok_node_id] )
identifier[self] . identifier[add_edge] ( identifier[seg_node_id] , identifier[tok_node_id] ,
identifier[layers] ={ literal[string] , literal[string] },
identifier[edge_type] = identifier[EdgeTypes] . identifier[spanning_relation] ) | def __tokenize_segments(self):
"""
tokenizes every RS3 segment (i.e. an RST nucleus or satellite).
for each token, a node is added to the graph, as well as an edge from
the segment node to the token node. the token node IDs are also added
to ``self.tokens``.
"""
for seg_node_id in self.segments:
segment_toks = self.node[seg_node_id][self.ns + ':text'].split()
for (i, tok) in enumerate(segment_toks):
tok_node_id = '{0}:{1}_{2}'.format(self.ns, seg_node_id, i)
self.add_node(tok_node_id, layers={self.ns, self.ns + ':token'}, attr_dict={self.ns + ':token': tok, 'label': tok})
self.tokens.append(tok_node_id)
self.add_edge(seg_node_id, tok_node_id, layers={'rst', 'rst:token'}, edge_type=EdgeTypes.spanning_relation) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['seg_node_id']] |
def _add_grid_attributes(self, ds):
"""Add model grid attributes to a dataset"""
for name_int, names_ext in self._grid_attrs.items():
ds_coord_name = set(names_ext).intersection(set(ds.coords) |
set(ds.data_vars))
model_attr = getattr(self.model, name_int, None)
if ds_coord_name and (model_attr is not None):
# Force coords to have desired name.
ds = ds.rename({list(ds_coord_name)[0]: name_int})
ds = ds.set_coords(name_int)
if not np.array_equal(ds[name_int], model_attr):
if np.allclose(ds[name_int], model_attr):
msg = ("Values for '{0}' are nearly (but not exactly) "
"the same in the Run {1} and the Model {2}. "
"Therefore replacing Run's values with the "
"model's.".format(name_int, self.run,
self.model))
logging.info(msg)
ds[name_int].values = model_attr.values
else:
msg = ("Model coordinates for '{0}' do not match those"
" in Run: {1} vs. {2}"
"".format(name_int, ds[name_int], model_attr))
logging.info(msg)
else:
# Bring in coord from model object if it exists.
ds = ds.load()
if model_attr is not None:
ds[name_int] = model_attr
ds = ds.set_coords(name_int)
if (self.dtype_in_vert == 'pressure' and
internal_names.PLEVEL_STR in ds.coords):
self.pressure = ds.level
return ds | def function[_add_grid_attributes, parameter[self, ds]]:
constant[Add model grid attributes to a dataset]
for taget[tuple[[<ast.Name object at 0x7da1b04f5240>, <ast.Name object at 0x7da1b04f5b10>]]] in starred[call[name[self]._grid_attrs.items, parameter[]]] begin[:]
variable[ds_coord_name] assign[=] call[call[name[set], parameter[name[names_ext]]].intersection, parameter[binary_operation[call[name[set], parameter[name[ds].coords]] <ast.BitOr object at 0x7da2590d6aa0> call[name[set], parameter[name[ds].data_vars]]]]]
variable[model_attr] assign[=] call[name[getattr], parameter[name[self].model, name[name_int], constant[None]]]
if <ast.BoolOp object at 0x7da1b04f4be0> begin[:]
variable[ds] assign[=] call[name[ds].rename, parameter[dictionary[[<ast.Subscript object at 0x7da1b04f4fd0>], [<ast.Name object at 0x7da1b04f5690>]]]]
variable[ds] assign[=] call[name[ds].set_coords, parameter[name[name_int]]]
if <ast.UnaryOp object at 0x7da1b04f5c00> begin[:]
if call[name[np].allclose, parameter[call[name[ds]][name[name_int]], name[model_attr]]] begin[:]
variable[msg] assign[=] call[constant[Values for '{0}' are nearly (but not exactly) the same in the Run {1} and the Model {2}. Therefore replacing Run's values with the model's.].format, parameter[name[name_int], name[self].run, name[self].model]]
call[name[logging].info, parameter[name[msg]]]
call[name[ds]][name[name_int]].values assign[=] name[model_attr].values
if <ast.BoolOp object at 0x7da1b04f5ab0> begin[:]
name[self].pressure assign[=] name[ds].level
return[name[ds]] | keyword[def] identifier[_add_grid_attributes] ( identifier[self] , identifier[ds] ):
literal[string]
keyword[for] identifier[name_int] , identifier[names_ext] keyword[in] identifier[self] . identifier[_grid_attrs] . identifier[items] ():
identifier[ds_coord_name] = identifier[set] ( identifier[names_ext] ). identifier[intersection] ( identifier[set] ( identifier[ds] . identifier[coords] )|
identifier[set] ( identifier[ds] . identifier[data_vars] ))
identifier[model_attr] = identifier[getattr] ( identifier[self] . identifier[model] , identifier[name_int] , keyword[None] )
keyword[if] identifier[ds_coord_name] keyword[and] ( identifier[model_attr] keyword[is] keyword[not] keyword[None] ):
identifier[ds] = identifier[ds] . identifier[rename] ({ identifier[list] ( identifier[ds_coord_name] )[ literal[int] ]: identifier[name_int] })
identifier[ds] = identifier[ds] . identifier[set_coords] ( identifier[name_int] )
keyword[if] keyword[not] identifier[np] . identifier[array_equal] ( identifier[ds] [ identifier[name_int] ], identifier[model_attr] ):
keyword[if] identifier[np] . identifier[allclose] ( identifier[ds] [ identifier[name_int] ], identifier[model_attr] ):
identifier[msg] =( literal[string]
literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[name_int] , identifier[self] . identifier[run] ,
identifier[self] . identifier[model] ))
identifier[logging] . identifier[info] ( identifier[msg] )
identifier[ds] [ identifier[name_int] ]. identifier[values] = identifier[model_attr] . identifier[values]
keyword[else] :
identifier[msg] =( literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[name_int] , identifier[ds] [ identifier[name_int] ], identifier[model_attr] ))
identifier[logging] . identifier[info] ( identifier[msg] )
keyword[else] :
identifier[ds] = identifier[ds] . identifier[load] ()
keyword[if] identifier[model_attr] keyword[is] keyword[not] keyword[None] :
identifier[ds] [ identifier[name_int] ]= identifier[model_attr]
identifier[ds] = identifier[ds] . identifier[set_coords] ( identifier[name_int] )
keyword[if] ( identifier[self] . identifier[dtype_in_vert] == literal[string] keyword[and]
identifier[internal_names] . identifier[PLEVEL_STR] keyword[in] identifier[ds] . identifier[coords] ):
identifier[self] . identifier[pressure] = identifier[ds] . identifier[level]
keyword[return] identifier[ds] | def _add_grid_attributes(self, ds):
"""Add model grid attributes to a dataset"""
for (name_int, names_ext) in self._grid_attrs.items():
ds_coord_name = set(names_ext).intersection(set(ds.coords) | set(ds.data_vars))
model_attr = getattr(self.model, name_int, None)
if ds_coord_name and model_attr is not None:
# Force coords to have desired name.
ds = ds.rename({list(ds_coord_name)[0]: name_int})
ds = ds.set_coords(name_int)
if not np.array_equal(ds[name_int], model_attr):
if np.allclose(ds[name_int], model_attr):
msg = "Values for '{0}' are nearly (but not exactly) the same in the Run {1} and the Model {2}. Therefore replacing Run's values with the model's.".format(name_int, self.run, self.model)
logging.info(msg)
ds[name_int].values = model_attr.values # depends on [control=['if'], data=[]]
else:
msg = "Model coordinates for '{0}' do not match those in Run: {1} vs. {2}".format(name_int, ds[name_int], model_attr)
logging.info(msg) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
# Bring in coord from model object if it exists.
ds = ds.load()
if model_attr is not None:
ds[name_int] = model_attr
ds = ds.set_coords(name_int) # depends on [control=['if'], data=['model_attr']]
if self.dtype_in_vert == 'pressure' and internal_names.PLEVEL_STR in ds.coords:
self.pressure = ds.level # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return ds |
def get_cluster_info(host, port, ignore_cluster_errors=False):
"""
return dict with info about nodes in cluster and current version
{
'nodes': [
'IP:port',
'IP:port',
],
'version': '1.4.4'
}
"""
client = Telnet(host, int(port))
client.write(b'version\n')
res = client.read_until(b'\r\n').strip()
version_list = res.split(b' ')
if len(version_list) not in [2, 3] or version_list[0] != b'VERSION':
raise WrongProtocolData('version', res)
version = version_list[1]
if StrictVersion(smart_text(version)) >= StrictVersion('1.4.14'):
cmd = b'config get cluster\n'
else:
cmd = b'get AmazonElastiCache:cluster\n'
client.write(cmd)
regex_index, match_object, res = client.expect([
re.compile(b'\n\r\nEND\r\n'),
re.compile(b'ERROR\r\n')
])
client.close()
if res == b'ERROR\r\n' and ignore_cluster_errors:
return {
'version': version,
'nodes': [
'{0}:{1}'.format(smart_text(host),
smart_text(port))
]
}
ls = list(filter(None, re.compile(br'\r?\n').split(res)))
if len(ls) != 4:
raise WrongProtocolData(cmd, res)
try:
version = int(ls[1])
except ValueError:
raise WrongProtocolData(cmd, res)
nodes = []
try:
for node in ls[2].split(b' '):
host, ip, port = node.split(b'|')
nodes.append('{0}:{1}'.format(smart_text(ip or host),
smart_text(port)))
except ValueError:
raise WrongProtocolData(cmd, res)
return {
'version': version,
'nodes': nodes
} | def function[get_cluster_info, parameter[host, port, ignore_cluster_errors]]:
constant[
return dict with info about nodes in cluster and current version
{
'nodes': [
'IP:port',
'IP:port',
],
'version': '1.4.4'
}
]
variable[client] assign[=] call[name[Telnet], parameter[name[host], call[name[int], parameter[name[port]]]]]
call[name[client].write, parameter[constant[b'version\n']]]
variable[res] assign[=] call[call[name[client].read_until, parameter[constant[b'\r\n']]].strip, parameter[]]
variable[version_list] assign[=] call[name[res].split, parameter[constant[b' ']]]
if <ast.BoolOp object at 0x7da1b0549300> begin[:]
<ast.Raise object at 0x7da1b054bcd0>
variable[version] assign[=] call[name[version_list]][constant[1]]
if compare[call[name[StrictVersion], parameter[call[name[smart_text], parameter[name[version]]]]] greater_or_equal[>=] call[name[StrictVersion], parameter[constant[1.4.14]]]] begin[:]
variable[cmd] assign[=] constant[b'config get cluster\n']
call[name[client].write, parameter[name[cmd]]]
<ast.Tuple object at 0x7da1b0549570> assign[=] call[name[client].expect, parameter[list[[<ast.Call object at 0x7da1b054af80>, <ast.Call object at 0x7da1b054a3b0>]]]]
call[name[client].close, parameter[]]
if <ast.BoolOp object at 0x7da1b04d8460> begin[:]
return[dictionary[[<ast.Constant object at 0x7da1b04d8be0>, <ast.Constant object at 0x7da1b04daf50>], [<ast.Name object at 0x7da1b04d8880>, <ast.List object at 0x7da1b04db4c0>]]]
variable[ls] assign[=] call[name[list], parameter[call[name[filter], parameter[constant[None], call[call[name[re].compile, parameter[constant[b'\\r?\\n']]].split, parameter[name[res]]]]]]]
if compare[call[name[len], parameter[name[ls]]] not_equal[!=] constant[4]] begin[:]
<ast.Raise object at 0x7da1b04d8f40>
<ast.Try object at 0x7da1b04d9750>
variable[nodes] assign[=] list[[]]
<ast.Try object at 0x7da1b04d9330>
return[dictionary[[<ast.Constant object at 0x7da1b04d9690>, <ast.Constant object at 0x7da1b04d9390>], [<ast.Name object at 0x7da1b04d8160>, <ast.Name object at 0x7da1b04d9780>]]] | keyword[def] identifier[get_cluster_info] ( identifier[host] , identifier[port] , identifier[ignore_cluster_errors] = keyword[False] ):
literal[string]
identifier[client] = identifier[Telnet] ( identifier[host] , identifier[int] ( identifier[port] ))
identifier[client] . identifier[write] ( literal[string] )
identifier[res] = identifier[client] . identifier[read_until] ( literal[string] ). identifier[strip] ()
identifier[version_list] = identifier[res] . identifier[split] ( literal[string] )
keyword[if] identifier[len] ( identifier[version_list] ) keyword[not] keyword[in] [ literal[int] , literal[int] ] keyword[or] identifier[version_list] [ literal[int] ]!= literal[string] :
keyword[raise] identifier[WrongProtocolData] ( literal[string] , identifier[res] )
identifier[version] = identifier[version_list] [ literal[int] ]
keyword[if] identifier[StrictVersion] ( identifier[smart_text] ( identifier[version] ))>= identifier[StrictVersion] ( literal[string] ):
identifier[cmd] = literal[string]
keyword[else] :
identifier[cmd] = literal[string]
identifier[client] . identifier[write] ( identifier[cmd] )
identifier[regex_index] , identifier[match_object] , identifier[res] = identifier[client] . identifier[expect] ([
identifier[re] . identifier[compile] ( literal[string] ),
identifier[re] . identifier[compile] ( literal[string] )
])
identifier[client] . identifier[close] ()
keyword[if] identifier[res] == literal[string] keyword[and] identifier[ignore_cluster_errors] :
keyword[return] {
literal[string] : identifier[version] ,
literal[string] :[
literal[string] . identifier[format] ( identifier[smart_text] ( identifier[host] ),
identifier[smart_text] ( identifier[port] ))
]
}
identifier[ls] = identifier[list] ( identifier[filter] ( keyword[None] , identifier[re] . identifier[compile] ( literal[string] ). identifier[split] ( identifier[res] )))
keyword[if] identifier[len] ( identifier[ls] )!= literal[int] :
keyword[raise] identifier[WrongProtocolData] ( identifier[cmd] , identifier[res] )
keyword[try] :
identifier[version] = identifier[int] ( identifier[ls] [ literal[int] ])
keyword[except] identifier[ValueError] :
keyword[raise] identifier[WrongProtocolData] ( identifier[cmd] , identifier[res] )
identifier[nodes] =[]
keyword[try] :
keyword[for] identifier[node] keyword[in] identifier[ls] [ literal[int] ]. identifier[split] ( literal[string] ):
identifier[host] , identifier[ip] , identifier[port] = identifier[node] . identifier[split] ( literal[string] )
identifier[nodes] . identifier[append] ( literal[string] . identifier[format] ( identifier[smart_text] ( identifier[ip] keyword[or] identifier[host] ),
identifier[smart_text] ( identifier[port] )))
keyword[except] identifier[ValueError] :
keyword[raise] identifier[WrongProtocolData] ( identifier[cmd] , identifier[res] )
keyword[return] {
literal[string] : identifier[version] ,
literal[string] : identifier[nodes]
} | def get_cluster_info(host, port, ignore_cluster_errors=False):
"""
return dict with info about nodes in cluster and current version
{
'nodes': [
'IP:port',
'IP:port',
],
'version': '1.4.4'
}
"""
client = Telnet(host, int(port))
client.write(b'version\n')
res = client.read_until(b'\r\n').strip()
version_list = res.split(b' ')
if len(version_list) not in [2, 3] or version_list[0] != b'VERSION':
raise WrongProtocolData('version', res) # depends on [control=['if'], data=[]]
version = version_list[1]
if StrictVersion(smart_text(version)) >= StrictVersion('1.4.14'):
cmd = b'config get cluster\n' # depends on [control=['if'], data=[]]
else:
cmd = b'get AmazonElastiCache:cluster\n'
client.write(cmd)
(regex_index, match_object, res) = client.expect([re.compile(b'\n\r\nEND\r\n'), re.compile(b'ERROR\r\n')])
client.close()
if res == b'ERROR\r\n' and ignore_cluster_errors:
return {'version': version, 'nodes': ['{0}:{1}'.format(smart_text(host), smart_text(port))]} # depends on [control=['if'], data=[]]
ls = list(filter(None, re.compile(b'\\r?\\n').split(res)))
if len(ls) != 4:
raise WrongProtocolData(cmd, res) # depends on [control=['if'], data=[]]
try:
version = int(ls[1]) # depends on [control=['try'], data=[]]
except ValueError:
raise WrongProtocolData(cmd, res) # depends on [control=['except'], data=[]]
nodes = []
try:
for node in ls[2].split(b' '):
(host, ip, port) = node.split(b'|')
nodes.append('{0}:{1}'.format(smart_text(ip or host), smart_text(port))) # depends on [control=['for'], data=['node']] # depends on [control=['try'], data=[]]
except ValueError:
raise WrongProtocolData(cmd, res) # depends on [control=['except'], data=[]]
return {'version': version, 'nodes': nodes} |
def set_game_score(
self,
user_id: Union[int, str],
score: int,
force: bool = None,
disable_edit_message: bool = None,
chat_id: Union[int, str] = None,
message_id: int = None
):
# inline_message_id: str = None): TODO Add inline_message_id
"""Use this method to set the score of the specified user in a game.
Args:
user_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
For your personal cloud (Saved Messages) you can simply use "me" or "self".
For a contact that exists in your Telegram address book you can use his phone number (str).
score (``int``):
New score, must be non-negative.
force (``bool``, *optional*):
Pass True, if the high score is allowed to decrease.
This can be useful when fixing mistakes or banning cheaters.
disable_edit_message (``bool``, *optional*):
Pass True, if the game message should not be automatically edited to include the current scoreboard.
chat_id (``int`` | ``str``, *optional*):
Unique identifier (int) or username (str) of the target chat.
For your personal cloud (Saved Messages) you can simply use "me" or "self".
For a contact that exists in your Telegram address book you can use his phone number (str).
Required if inline_message_id is not specified.
message_id (``int``, *optional*):
Identifier of the sent message.
Required if inline_message_id is not specified.
Returns:
On success, if the message was sent by the bot, returns the edited :obj:`Message <pyrogram.Message>`,
otherwise returns True.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
:class:`BotScoreNotModified` if the new score is not greater than the user's current score in the chat and force is False.
"""
r = self.send(
functions.messages.SetGameScore(
peer=self.resolve_peer(chat_id),
score=score,
id=message_id,
user_id=self.resolve_peer(user_id),
force=force or None,
edit_message=not disable_edit_message or None
)
)
for i in r.updates:
if isinstance(i, (types.UpdateEditMessage, types.UpdateEditChannelMessage)):
return pyrogram.Message._parse(
self, i.message,
{i.id: i for i in r.users},
{i.id: i for i in r.chats}
)
return True | def function[set_game_score, parameter[self, user_id, score, force, disable_edit_message, chat_id, message_id]]:
constant[Use this method to set the score of the specified user in a game.
Args:
user_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
For your personal cloud (Saved Messages) you can simply use "me" or "self".
For a contact that exists in your Telegram address book you can use his phone number (str).
score (``int``):
New score, must be non-negative.
force (``bool``, *optional*):
Pass True, if the high score is allowed to decrease.
This can be useful when fixing mistakes or banning cheaters.
disable_edit_message (``bool``, *optional*):
Pass True, if the game message should not be automatically edited to include the current scoreboard.
chat_id (``int`` | ``str``, *optional*):
Unique identifier (int) or username (str) of the target chat.
For your personal cloud (Saved Messages) you can simply use "me" or "self".
For a contact that exists in your Telegram address book you can use his phone number (str).
Required if inline_message_id is not specified.
message_id (``int``, *optional*):
Identifier of the sent message.
Required if inline_message_id is not specified.
Returns:
On success, if the message was sent by the bot, returns the edited :obj:`Message <pyrogram.Message>`,
otherwise returns True.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
:class:`BotScoreNotModified` if the new score is not greater than the user's current score in the chat and force is False.
]
variable[r] assign[=] call[name[self].send, parameter[call[name[functions].messages.SetGameScore, parameter[]]]]
for taget[name[i]] in starred[name[r].updates] begin[:]
if call[name[isinstance], parameter[name[i], tuple[[<ast.Attribute object at 0x7da1b21da710>, <ast.Attribute object at 0x7da1b21da950>]]]] begin[:]
return[call[name[pyrogram].Message._parse, parameter[name[self], name[i].message, <ast.DictComp object at 0x7da1b21da860>, <ast.DictComp object at 0x7da1b21dbee0>]]]
return[constant[True]] | keyword[def] identifier[set_game_score] (
identifier[self] ,
identifier[user_id] : identifier[Union] [ identifier[int] , identifier[str] ],
identifier[score] : identifier[int] ,
identifier[force] : identifier[bool] = keyword[None] ,
identifier[disable_edit_message] : identifier[bool] = keyword[None] ,
identifier[chat_id] : identifier[Union] [ identifier[int] , identifier[str] ]= keyword[None] ,
identifier[message_id] : identifier[int] = keyword[None]
):
literal[string]
identifier[r] = identifier[self] . identifier[send] (
identifier[functions] . identifier[messages] . identifier[SetGameScore] (
identifier[peer] = identifier[self] . identifier[resolve_peer] ( identifier[chat_id] ),
identifier[score] = identifier[score] ,
identifier[id] = identifier[message_id] ,
identifier[user_id] = identifier[self] . identifier[resolve_peer] ( identifier[user_id] ),
identifier[force] = identifier[force] keyword[or] keyword[None] ,
identifier[edit_message] = keyword[not] identifier[disable_edit_message] keyword[or] keyword[None]
)
)
keyword[for] identifier[i] keyword[in] identifier[r] . identifier[updates] :
keyword[if] identifier[isinstance] ( identifier[i] ,( identifier[types] . identifier[UpdateEditMessage] , identifier[types] . identifier[UpdateEditChannelMessage] )):
keyword[return] identifier[pyrogram] . identifier[Message] . identifier[_parse] (
identifier[self] , identifier[i] . identifier[message] ,
{ identifier[i] . identifier[id] : identifier[i] keyword[for] identifier[i] keyword[in] identifier[r] . identifier[users] },
{ identifier[i] . identifier[id] : identifier[i] keyword[for] identifier[i] keyword[in] identifier[r] . identifier[chats] }
)
keyword[return] keyword[True] | def set_game_score(self, user_id: Union[int, str], score: int, force: bool=None, disable_edit_message: bool=None, chat_id: Union[int, str]=None, message_id: int=None):
# inline_message_id: str = None): TODO Add inline_message_id
'Use this method to set the score of the specified user in a game.\n\n Args:\n user_id (``int`` | ``str``):\n Unique identifier (int) or username (str) of the target chat.\n For your personal cloud (Saved Messages) you can simply use "me" or "self".\n For a contact that exists in your Telegram address book you can use his phone number (str).\n\n score (``int``):\n New score, must be non-negative.\n\n force (``bool``, *optional*):\n Pass True, if the high score is allowed to decrease.\n This can be useful when fixing mistakes or banning cheaters.\n\n disable_edit_message (``bool``, *optional*):\n Pass True, if the game message should not be automatically edited to include the current scoreboard.\n\n chat_id (``int`` | ``str``, *optional*):\n Unique identifier (int) or username (str) of the target chat.\n For your personal cloud (Saved Messages) you can simply use "me" or "self".\n For a contact that exists in your Telegram address book you can use his phone number (str).\n Required if inline_message_id is not specified.\n\n message_id (``int``, *optional*):\n Identifier of the sent message.\n Required if inline_message_id is not specified.\n\n Returns:\n On success, if the message was sent by the bot, returns the edited :obj:`Message <pyrogram.Message>`,\n otherwise returns True.\n\n Raises:\n :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.\n :class:`BotScoreNotModified` if the new score is not greater than the user\'s current score in the chat and force is False.\n '
r = self.send(functions.messages.SetGameScore(peer=self.resolve_peer(chat_id), score=score, id=message_id, user_id=self.resolve_peer(user_id), force=force or None, edit_message=not disable_edit_message or None))
for i in r.updates:
if isinstance(i, (types.UpdateEditMessage, types.UpdateEditChannelMessage)):
return pyrogram.Message._parse(self, i.message, {i.id: i for i in r.users}, {i.id: i for i in r.chats}) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
return True |
def labelForAction(self, action):
"""
Returns the label that contains the inputed action.
:return <XDockActionLabel> || None
"""
for label in self.actionLabels():
if label.action() == action:
return label
return None | def function[labelForAction, parameter[self, action]]:
constant[
Returns the label that contains the inputed action.
:return <XDockActionLabel> || None
]
for taget[name[label]] in starred[call[name[self].actionLabels, parameter[]]] begin[:]
if compare[call[name[label].action, parameter[]] equal[==] name[action]] begin[:]
return[name[label]]
return[constant[None]] | keyword[def] identifier[labelForAction] ( identifier[self] , identifier[action] ):
literal[string]
keyword[for] identifier[label] keyword[in] identifier[self] . identifier[actionLabels] ():
keyword[if] identifier[label] . identifier[action] ()== identifier[action] :
keyword[return] identifier[label]
keyword[return] keyword[None] | def labelForAction(self, action):
"""
Returns the label that contains the inputed action.
:return <XDockActionLabel> || None
"""
for label in self.actionLabels():
if label.action() == action:
return label # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['label']]
return None |
def send(self, sender, **named):
"""
Send signal from sender to all connected receivers.
If any receiver raises an error, the error propagates back through send,
terminating the dispatch loop. So it's possible that all receivers
won't be called if an error is raised.
Arguments:
sender
The sender of the signal. Either a specific object or None.
named
Named arguments which will be passed to receivers.
Returns a list of tuple pairs [(receiver, response), ... ].
"""
responses = []
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return responses
for receiver in self._live_receivers(sender):
response = receiver(signal=self, sender=sender, **named)
responses.append((receiver, response))
return responses | def function[send, parameter[self, sender]]:
constant[
Send signal from sender to all connected receivers.
If any receiver raises an error, the error propagates back through send,
terminating the dispatch loop. So it's possible that all receivers
won't be called if an error is raised.
Arguments:
sender
The sender of the signal. Either a specific object or None.
named
Named arguments which will be passed to receivers.
Returns a list of tuple pairs [(receiver, response), ... ].
]
variable[responses] assign[=] list[[]]
if <ast.BoolOp object at 0x7da20e955e10> begin[:]
return[name[responses]]
for taget[name[receiver]] in starred[call[name[self]._live_receivers, parameter[name[sender]]]] begin[:]
variable[response] assign[=] call[name[receiver], parameter[]]
call[name[responses].append, parameter[tuple[[<ast.Name object at 0x7da1b060b430>, <ast.Name object at 0x7da1b060a230>]]]]
return[name[responses]] | keyword[def] identifier[send] ( identifier[self] , identifier[sender] ,** identifier[named] ):
literal[string]
identifier[responses] =[]
keyword[if] keyword[not] identifier[self] . identifier[receivers] keyword[or] identifier[self] . identifier[sender_receivers_cache] . identifier[get] ( identifier[sender] ) keyword[is] identifier[NO_RECEIVERS] :
keyword[return] identifier[responses]
keyword[for] identifier[receiver] keyword[in] identifier[self] . identifier[_live_receivers] ( identifier[sender] ):
identifier[response] = identifier[receiver] ( identifier[signal] = identifier[self] , identifier[sender] = identifier[sender] ,** identifier[named] )
identifier[responses] . identifier[append] (( identifier[receiver] , identifier[response] ))
keyword[return] identifier[responses] | def send(self, sender, **named):
"""
Send signal from sender to all connected receivers.
If any receiver raises an error, the error propagates back through send,
terminating the dispatch loop. So it's possible that all receivers
won't be called if an error is raised.
Arguments:
sender
The sender of the signal. Either a specific object or None.
named
Named arguments which will be passed to receivers.
Returns a list of tuple pairs [(receiver, response), ... ].
"""
responses = []
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return responses # depends on [control=['if'], data=[]]
for receiver in self._live_receivers(sender):
response = receiver(signal=self, sender=sender, **named)
responses.append((receiver, response)) # depends on [control=['for'], data=['receiver']]
return responses |
def search(self, **kwargs):
"""Search.
:return:
An :class:`~.AmazonSearch` iterable.
"""
region = kwargs.get('region', self.region)
kwargs.update({'region': region})
return AmazonSearch(self.api, self.aws_associate_tag, **kwargs) | def function[search, parameter[self]]:
constant[Search.
:return:
An :class:`~.AmazonSearch` iterable.
]
variable[region] assign[=] call[name[kwargs].get, parameter[constant[region], name[self].region]]
call[name[kwargs].update, parameter[dictionary[[<ast.Constant object at 0x7da20c6a9240>], [<ast.Name object at 0x7da20c6a8dc0>]]]]
return[call[name[AmazonSearch], parameter[name[self].api, name[self].aws_associate_tag]]] | keyword[def] identifier[search] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[region] = identifier[kwargs] . identifier[get] ( literal[string] , identifier[self] . identifier[region] )
identifier[kwargs] . identifier[update] ({ literal[string] : identifier[region] })
keyword[return] identifier[AmazonSearch] ( identifier[self] . identifier[api] , identifier[self] . identifier[aws_associate_tag] ,** identifier[kwargs] ) | def search(self, **kwargs):
"""Search.
:return:
An :class:`~.AmazonSearch` iterable.
"""
region = kwargs.get('region', self.region)
kwargs.update({'region': region})
return AmazonSearch(self.api, self.aws_associate_tag, **kwargs) |
def createLearningRateScheduler(self, optimizer):
"""
Creates the learning rate scheduler and attach the optimizer
"""
return torch.optim.lr_scheduler.StepLR(optimizer,
step_size=1,
gamma=self.lr_scheduler_gamma) | def function[createLearningRateScheduler, parameter[self, optimizer]]:
constant[
Creates the learning rate scheduler and attach the optimizer
]
return[call[name[torch].optim.lr_scheduler.StepLR, parameter[name[optimizer]]]] | keyword[def] identifier[createLearningRateScheduler] ( identifier[self] , identifier[optimizer] ):
literal[string]
keyword[return] identifier[torch] . identifier[optim] . identifier[lr_scheduler] . identifier[StepLR] ( identifier[optimizer] ,
identifier[step_size] = literal[int] ,
identifier[gamma] = identifier[self] . identifier[lr_scheduler_gamma] ) | def createLearningRateScheduler(self, optimizer):
"""
Creates the learning rate scheduler and attach the optimizer
"""
return torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=self.lr_scheduler_gamma) |
def ac_factory(path=""):
"""Attribute Converter factory
:param path: The path to a directory where the attribute maps are expected
to reside.
:return: A AttributeConverter instance
"""
acs = []
if path:
if path not in sys.path:
sys.path.insert(0, path)
for fil in os.listdir(path):
if fil.endswith(".py"):
mod = import_module(fil[:-3])
for key, item in mod.__dict__.items():
if key.startswith("__"):
continue
if isinstance(item,
dict) and "to" in item and "fro" in item:
atco = AttributeConverter(item["identifier"])
atco.from_dict(item)
acs.append(atco)
else:
from saml2 import attributemaps
for typ in attributemaps.__all__:
mod = import_module(".%s" % typ, "saml2.attributemaps")
for key, item in mod.__dict__.items():
if key.startswith("__"):
continue
if isinstance(item, dict) and "to" in item and "fro" in item:
atco = AttributeConverter(item["identifier"])
atco.from_dict(item)
acs.append(atco)
return acs | def function[ac_factory, parameter[path]]:
constant[Attribute Converter factory
:param path: The path to a directory where the attribute maps are expected
to reside.
:return: A AttributeConverter instance
]
variable[acs] assign[=] list[[]]
if name[path] begin[:]
if compare[name[path] <ast.NotIn object at 0x7da2590d7190> name[sys].path] begin[:]
call[name[sys].path.insert, parameter[constant[0], name[path]]]
for taget[name[fil]] in starred[call[name[os].listdir, parameter[name[path]]]] begin[:]
if call[name[fil].endswith, parameter[constant[.py]]] begin[:]
variable[mod] assign[=] call[name[import_module], parameter[call[name[fil]][<ast.Slice object at 0x7da1b206a6e0>]]]
for taget[tuple[[<ast.Name object at 0x7da1b206a230>, <ast.Name object at 0x7da1b206b310>]]] in starred[call[name[mod].__dict__.items, parameter[]]] begin[:]
if call[name[key].startswith, parameter[constant[__]]] begin[:]
continue
if <ast.BoolOp object at 0x7da1b206ad40> begin[:]
variable[atco] assign[=] call[name[AttributeConverter], parameter[call[name[item]][constant[identifier]]]]
call[name[atco].from_dict, parameter[name[item]]]
call[name[acs].append, parameter[name[atco]]]
return[name[acs]] | keyword[def] identifier[ac_factory] ( identifier[path] = literal[string] ):
literal[string]
identifier[acs] =[]
keyword[if] identifier[path] :
keyword[if] identifier[path] keyword[not] keyword[in] identifier[sys] . identifier[path] :
identifier[sys] . identifier[path] . identifier[insert] ( literal[int] , identifier[path] )
keyword[for] identifier[fil] keyword[in] identifier[os] . identifier[listdir] ( identifier[path] ):
keyword[if] identifier[fil] . identifier[endswith] ( literal[string] ):
identifier[mod] = identifier[import_module] ( identifier[fil] [:- literal[int] ])
keyword[for] identifier[key] , identifier[item] keyword[in] identifier[mod] . identifier[__dict__] . identifier[items] ():
keyword[if] identifier[key] . identifier[startswith] ( literal[string] ):
keyword[continue]
keyword[if] identifier[isinstance] ( identifier[item] ,
identifier[dict] ) keyword[and] literal[string] keyword[in] identifier[item] keyword[and] literal[string] keyword[in] identifier[item] :
identifier[atco] = identifier[AttributeConverter] ( identifier[item] [ literal[string] ])
identifier[atco] . identifier[from_dict] ( identifier[item] )
identifier[acs] . identifier[append] ( identifier[atco] )
keyword[else] :
keyword[from] identifier[saml2] keyword[import] identifier[attributemaps]
keyword[for] identifier[typ] keyword[in] identifier[attributemaps] . identifier[__all__] :
identifier[mod] = identifier[import_module] ( literal[string] % identifier[typ] , literal[string] )
keyword[for] identifier[key] , identifier[item] keyword[in] identifier[mod] . identifier[__dict__] . identifier[items] ():
keyword[if] identifier[key] . identifier[startswith] ( literal[string] ):
keyword[continue]
keyword[if] identifier[isinstance] ( identifier[item] , identifier[dict] ) keyword[and] literal[string] keyword[in] identifier[item] keyword[and] literal[string] keyword[in] identifier[item] :
identifier[atco] = identifier[AttributeConverter] ( identifier[item] [ literal[string] ])
identifier[atco] . identifier[from_dict] ( identifier[item] )
identifier[acs] . identifier[append] ( identifier[atco] )
keyword[return] identifier[acs] | def ac_factory(path=''):
"""Attribute Converter factory
:param path: The path to a directory where the attribute maps are expected
to reside.
:return: A AttributeConverter instance
"""
acs = []
if path:
if path not in sys.path:
sys.path.insert(0, path) # depends on [control=['if'], data=['path']]
for fil in os.listdir(path):
if fil.endswith('.py'):
mod = import_module(fil[:-3])
for (key, item) in mod.__dict__.items():
if key.startswith('__'):
continue # depends on [control=['if'], data=[]]
if isinstance(item, dict) and 'to' in item and ('fro' in item):
atco = AttributeConverter(item['identifier'])
atco.from_dict(item)
acs.append(atco) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['fil']] # depends on [control=['if'], data=[]]
else:
from saml2 import attributemaps
for typ in attributemaps.__all__:
mod = import_module('.%s' % typ, 'saml2.attributemaps')
for (key, item) in mod.__dict__.items():
if key.startswith('__'):
continue # depends on [control=['if'], data=[]]
if isinstance(item, dict) and 'to' in item and ('fro' in item):
atco = AttributeConverter(item['identifier'])
atco.from_dict(item)
acs.append(atco) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['typ']]
return acs |
def init_state(self, x):
"""
Initialize t, m, and u
"""
optim_state = {}
optim_state["t"] = 0.
optim_state["m"] = [tf.zeros_like(v) for v in x]
optim_state["u"] = [tf.zeros_like(v) for v in x]
return optim_state | def function[init_state, parameter[self, x]]:
constant[
Initialize t, m, and u
]
variable[optim_state] assign[=] dictionary[[], []]
call[name[optim_state]][constant[t]] assign[=] constant[0.0]
call[name[optim_state]][constant[m]] assign[=] <ast.ListComp object at 0x7da20c7946d0>
call[name[optim_state]][constant[u]] assign[=] <ast.ListComp object at 0x7da18dc98eb0>
return[name[optim_state]] | keyword[def] identifier[init_state] ( identifier[self] , identifier[x] ):
literal[string]
identifier[optim_state] ={}
identifier[optim_state] [ literal[string] ]= literal[int]
identifier[optim_state] [ literal[string] ]=[ identifier[tf] . identifier[zeros_like] ( identifier[v] ) keyword[for] identifier[v] keyword[in] identifier[x] ]
identifier[optim_state] [ literal[string] ]=[ identifier[tf] . identifier[zeros_like] ( identifier[v] ) keyword[for] identifier[v] keyword[in] identifier[x] ]
keyword[return] identifier[optim_state] | def init_state(self, x):
"""
Initialize t, m, and u
"""
optim_state = {}
optim_state['t'] = 0.0
optim_state['m'] = [tf.zeros_like(v) for v in x]
optim_state['u'] = [tf.zeros_like(v) for v in x]
return optim_state |
def which_lease_to_steal(self, stealable_leases, have_lease_count):
"""
Determines and return which lease to steal
If the number of leases is a multiple of the number of hosts, then the desired
configuration is that all hosts own the name number of leases, and the
difference between the "biggest" owner and any other is 0.
If the number of leases is not a multiple of the number of hosts, then the most
even configurationpossible is for some hosts to have (self, leases/hosts) leases
and others to have (self, (self, leases/hosts) + 1). For example, for 16 partitions
distributed over five hosts, the distribution would be 4, 3, 3, 3, 3, or any of the
possible reorderings.
In either case, if the difference between this host and the biggest owner is 2 or more,
then thesystem is not in the most evenly-distributed configuration, so steal one lease
from the biggest. If there is a tie for biggest, we pick whichever appears first in the
list because it doesn't really matter which "biggest" is trimmed down.
Stealing one at a time prevents flapping because it reduces the difference between the
biggest and this host by two at a time. If the starting difference is two or greater,
then the difference cannot end up below 0. This host may become tied for biggest, but it
cannot become larger than the host that it is stealing from.
:param stealable_leases: List of leases to determine which can be stolen.
:type stealable_leases: list[~azure.eventprocessorhost.lease.Lease]
:param have_lease_count: Lease count.
:type have_lease_count: int
:rtype: ~azure.eventprocessorhost.lease.Lease
"""
counts_by_owner = self.count_leases_by_owner(stealable_leases)
biggest_owner = (sorted(counts_by_owner.items(), key=lambda kv: kv[1])).pop()
steal_this_lease = None
if (biggest_owner[1] - have_lease_count) >= 2:
steal_this_lease = [l for l in stealable_leases if l.owner == biggest_owner[0]][0]
return steal_this_lease | def function[which_lease_to_steal, parameter[self, stealable_leases, have_lease_count]]:
constant[
Determines and return which lease to steal
If the number of leases is a multiple of the number of hosts, then the desired
configuration is that all hosts own the name number of leases, and the
difference between the "biggest" owner and any other is 0.
If the number of leases is not a multiple of the number of hosts, then the most
even configurationpossible is for some hosts to have (self, leases/hosts) leases
and others to have (self, (self, leases/hosts) + 1). For example, for 16 partitions
distributed over five hosts, the distribution would be 4, 3, 3, 3, 3, or any of the
possible reorderings.
In either case, if the difference between this host and the biggest owner is 2 or more,
then thesystem is not in the most evenly-distributed configuration, so steal one lease
from the biggest. If there is a tie for biggest, we pick whichever appears first in the
list because it doesn't really matter which "biggest" is trimmed down.
Stealing one at a time prevents flapping because it reduces the difference between the
biggest and this host by two at a time. If the starting difference is two or greater,
then the difference cannot end up below 0. This host may become tied for biggest, but it
cannot become larger than the host that it is stealing from.
:param stealable_leases: List of leases to determine which can be stolen.
:type stealable_leases: list[~azure.eventprocessorhost.lease.Lease]
:param have_lease_count: Lease count.
:type have_lease_count: int
:rtype: ~azure.eventprocessorhost.lease.Lease
]
variable[counts_by_owner] assign[=] call[name[self].count_leases_by_owner, parameter[name[stealable_leases]]]
variable[biggest_owner] assign[=] call[call[name[sorted], parameter[call[name[counts_by_owner].items, parameter[]]]].pop, parameter[]]
variable[steal_this_lease] assign[=] constant[None]
if compare[binary_operation[call[name[biggest_owner]][constant[1]] - name[have_lease_count]] greater_or_equal[>=] constant[2]] begin[:]
variable[steal_this_lease] assign[=] call[<ast.ListComp object at 0x7da20c9926b0>][constant[0]]
return[name[steal_this_lease]] | keyword[def] identifier[which_lease_to_steal] ( identifier[self] , identifier[stealable_leases] , identifier[have_lease_count] ):
literal[string]
identifier[counts_by_owner] = identifier[self] . identifier[count_leases_by_owner] ( identifier[stealable_leases] )
identifier[biggest_owner] =( identifier[sorted] ( identifier[counts_by_owner] . identifier[items] (), identifier[key] = keyword[lambda] identifier[kv] : identifier[kv] [ literal[int] ])). identifier[pop] ()
identifier[steal_this_lease] = keyword[None]
keyword[if] ( identifier[biggest_owner] [ literal[int] ]- identifier[have_lease_count] )>= literal[int] :
identifier[steal_this_lease] =[ identifier[l] keyword[for] identifier[l] keyword[in] identifier[stealable_leases] keyword[if] identifier[l] . identifier[owner] == identifier[biggest_owner] [ literal[int] ]][ literal[int] ]
keyword[return] identifier[steal_this_lease] | def which_lease_to_steal(self, stealable_leases, have_lease_count):
"""
Determines and return which lease to steal
If the number of leases is a multiple of the number of hosts, then the desired
configuration is that all hosts own the name number of leases, and the
difference between the "biggest" owner and any other is 0.
If the number of leases is not a multiple of the number of hosts, then the most
even configurationpossible is for some hosts to have (self, leases/hosts) leases
and others to have (self, (self, leases/hosts) + 1). For example, for 16 partitions
distributed over five hosts, the distribution would be 4, 3, 3, 3, 3, or any of the
possible reorderings.
In either case, if the difference between this host and the biggest owner is 2 or more,
then thesystem is not in the most evenly-distributed configuration, so steal one lease
from the biggest. If there is a tie for biggest, we pick whichever appears first in the
list because it doesn't really matter which "biggest" is trimmed down.
Stealing one at a time prevents flapping because it reduces the difference between the
biggest and this host by two at a time. If the starting difference is two or greater,
then the difference cannot end up below 0. This host may become tied for biggest, but it
cannot become larger than the host that it is stealing from.
:param stealable_leases: List of leases to determine which can be stolen.
:type stealable_leases: list[~azure.eventprocessorhost.lease.Lease]
:param have_lease_count: Lease count.
:type have_lease_count: int
:rtype: ~azure.eventprocessorhost.lease.Lease
"""
counts_by_owner = self.count_leases_by_owner(stealable_leases)
biggest_owner = sorted(counts_by_owner.items(), key=lambda kv: kv[1]).pop()
steal_this_lease = None
if biggest_owner[1] - have_lease_count >= 2:
steal_this_lease = [l for l in stealable_leases if l.owner == biggest_owner[0]][0] # depends on [control=['if'], data=[]]
return steal_this_lease |
def forwards(self, orm):
"Write your forwards methods here."
for qde_xtf in orm['xtf.QualifiedDublinCoreElement'].objects.all().order_by('id'):
qde = orm.QualifiedDublinCoreElement()
qde.content = qde_xtf.content
qde.term = qde_xtf.term
qde.qualifier = qde_xtf.qualifier
#import pdb;pdb.set_trace()
c= orm['contenttypes.ContentType'].objects.get(pk=qde_xtf.content_type.pk)
#c.name = qde_xtf.content_type.name
#c.app_label = qde_xtf.content_type.app_label
#c.model = qde_xtf.content_type.model
qde.content_type = c
#qde.content_type = qde_xtf.content_type
qde.object_id = qde_xtf.object_id
qde.save()
for qdeh_xtf in orm['xtf.QualifiedDublinCoreElementHistory'].objects.all().order_by('id'):
qdeh = orm.QualifiedDublinCoreElementHistory()
qdeh.content = qdeh_xtf.content
qdeh.term = qdeh_xtf.term
qdeh.qualifier = qdeh_xtf.qualifier
c= orm['contenttypes.ContentType'].objects.get(pk=qdeh_xtf.content_type.pk)
#c.name = qdeh_xtf.content_type.name
#c.app_label = qdeh_xtf.content_type.app_label
#c.model = qdeh_xtf.content_type.model
qdeh.content_type = c
#qdeh.content_type = qdeh_xtf.content_type
qdeh.object_id = qdeh_xtf.object_id
qdeh.qdce = orm['dublincore.QualifiedDublinCoreElement'].objects.get(pk=qdeh_xtf.qdce.pk)
qdeh.qdce_id_stored = qdeh_xtf.qdce_id_stored
qdeh.save() | def function[forwards, parameter[self, orm]]:
constant[Write your forwards methods here.]
for taget[name[qde_xtf]] in starred[call[call[call[name[orm]][constant[xtf.QualifiedDublinCoreElement]].objects.all, parameter[]].order_by, parameter[constant[id]]]] begin[:]
variable[qde] assign[=] call[name[orm].QualifiedDublinCoreElement, parameter[]]
name[qde].content assign[=] name[qde_xtf].content
name[qde].term assign[=] name[qde_xtf].term
name[qde].qualifier assign[=] name[qde_xtf].qualifier
variable[c] assign[=] call[call[name[orm]][constant[contenttypes.ContentType]].objects.get, parameter[]]
name[qde].content_type assign[=] name[c]
name[qde].object_id assign[=] name[qde_xtf].object_id
call[name[qde].save, parameter[]]
for taget[name[qdeh_xtf]] in starred[call[call[call[name[orm]][constant[xtf.QualifiedDublinCoreElementHistory]].objects.all, parameter[]].order_by, parameter[constant[id]]]] begin[:]
variable[qdeh] assign[=] call[name[orm].QualifiedDublinCoreElementHistory, parameter[]]
name[qdeh].content assign[=] name[qdeh_xtf].content
name[qdeh].term assign[=] name[qdeh_xtf].term
name[qdeh].qualifier assign[=] name[qdeh_xtf].qualifier
variable[c] assign[=] call[call[name[orm]][constant[contenttypes.ContentType]].objects.get, parameter[]]
name[qdeh].content_type assign[=] name[c]
name[qdeh].object_id assign[=] name[qdeh_xtf].object_id
name[qdeh].qdce assign[=] call[call[name[orm]][constant[dublincore.QualifiedDublinCoreElement]].objects.get, parameter[]]
name[qdeh].qdce_id_stored assign[=] name[qdeh_xtf].qdce_id_stored
call[name[qdeh].save, parameter[]] | keyword[def] identifier[forwards] ( identifier[self] , identifier[orm] ):
literal[string]
keyword[for] identifier[qde_xtf] keyword[in] identifier[orm] [ literal[string] ]. identifier[objects] . identifier[all] (). identifier[order_by] ( literal[string] ):
identifier[qde] = identifier[orm] . identifier[QualifiedDublinCoreElement] ()
identifier[qde] . identifier[content] = identifier[qde_xtf] . identifier[content]
identifier[qde] . identifier[term] = identifier[qde_xtf] . identifier[term]
identifier[qde] . identifier[qualifier] = identifier[qde_xtf] . identifier[qualifier]
identifier[c] = identifier[orm] [ literal[string] ]. identifier[objects] . identifier[get] ( identifier[pk] = identifier[qde_xtf] . identifier[content_type] . identifier[pk] )
identifier[qde] . identifier[content_type] = identifier[c]
identifier[qde] . identifier[object_id] = identifier[qde_xtf] . identifier[object_id]
identifier[qde] . identifier[save] ()
keyword[for] identifier[qdeh_xtf] keyword[in] identifier[orm] [ literal[string] ]. identifier[objects] . identifier[all] (). identifier[order_by] ( literal[string] ):
identifier[qdeh] = identifier[orm] . identifier[QualifiedDublinCoreElementHistory] ()
identifier[qdeh] . identifier[content] = identifier[qdeh_xtf] . identifier[content]
identifier[qdeh] . identifier[term] = identifier[qdeh_xtf] . identifier[term]
identifier[qdeh] . identifier[qualifier] = identifier[qdeh_xtf] . identifier[qualifier]
identifier[c] = identifier[orm] [ literal[string] ]. identifier[objects] . identifier[get] ( identifier[pk] = identifier[qdeh_xtf] . identifier[content_type] . identifier[pk] )
identifier[qdeh] . identifier[content_type] = identifier[c]
identifier[qdeh] . identifier[object_id] = identifier[qdeh_xtf] . identifier[object_id]
identifier[qdeh] . identifier[qdce] = identifier[orm] [ literal[string] ]. identifier[objects] . identifier[get] ( identifier[pk] = identifier[qdeh_xtf] . identifier[qdce] . identifier[pk] )
identifier[qdeh] . identifier[qdce_id_stored] = identifier[qdeh_xtf] . identifier[qdce_id_stored]
identifier[qdeh] . identifier[save] () | def forwards(self, orm):
"""Write your forwards methods here."""
for qde_xtf in orm['xtf.QualifiedDublinCoreElement'].objects.all().order_by('id'):
qde = orm.QualifiedDublinCoreElement()
qde.content = qde_xtf.content
qde.term = qde_xtf.term
qde.qualifier = qde_xtf.qualifier
#import pdb;pdb.set_trace()
c = orm['contenttypes.ContentType'].objects.get(pk=qde_xtf.content_type.pk)
#c.name = qde_xtf.content_type.name
#c.app_label = qde_xtf.content_type.app_label
#c.model = qde_xtf.content_type.model
qde.content_type = c
#qde.content_type = qde_xtf.content_type
qde.object_id = qde_xtf.object_id
qde.save() # depends on [control=['for'], data=['qde_xtf']]
for qdeh_xtf in orm['xtf.QualifiedDublinCoreElementHistory'].objects.all().order_by('id'):
qdeh = orm.QualifiedDublinCoreElementHistory()
qdeh.content = qdeh_xtf.content
qdeh.term = qdeh_xtf.term
qdeh.qualifier = qdeh_xtf.qualifier
c = orm['contenttypes.ContentType'].objects.get(pk=qdeh_xtf.content_type.pk)
#c.name = qdeh_xtf.content_type.name
#c.app_label = qdeh_xtf.content_type.app_label
#c.model = qdeh_xtf.content_type.model
qdeh.content_type = c
#qdeh.content_type = qdeh_xtf.content_type
qdeh.object_id = qdeh_xtf.object_id
qdeh.qdce = orm['dublincore.QualifiedDublinCoreElement'].objects.get(pk=qdeh_xtf.qdce.pk)
qdeh.qdce_id_stored = qdeh_xtf.qdce_id_stored
qdeh.save() # depends on [control=['for'], data=['qdeh_xtf']] |
def bind(self, func: Callable[[Any], IO]) -> 'Put':
"""IO a -> (a -> IO b) -> IO b"""
text, a = self._value
return Put(text, a.bind(func)) | def function[bind, parameter[self, func]]:
constant[IO a -> (a -> IO b) -> IO b]
<ast.Tuple object at 0x7da1b0bdafe0> assign[=] name[self]._value
return[call[name[Put], parameter[name[text], call[name[a].bind, parameter[name[func]]]]]] | keyword[def] identifier[bind] ( identifier[self] , identifier[func] : identifier[Callable] [[ identifier[Any] ], identifier[IO] ])-> literal[string] :
literal[string]
identifier[text] , identifier[a] = identifier[self] . identifier[_value]
keyword[return] identifier[Put] ( identifier[text] , identifier[a] . identifier[bind] ( identifier[func] )) | def bind(self, func: Callable[[Any], IO]) -> 'Put':
"""IO a -> (a -> IO b) -> IO b"""
(text, a) = self._value
return Put(text, a.bind(func)) |
def ends(self, s):
length = len(s)
""" True iff 0...k ends with string s """
res = (self.b[self.k-length+1:self.k+1] == s)
if res:
self.j = self.k - length
return res | def function[ends, parameter[self, s]]:
variable[length] assign[=] call[name[len], parameter[name[s]]]
constant[ True iff 0...k ends with string s ]
variable[res] assign[=] compare[call[name[self].b][<ast.Slice object at 0x7da1b1da31c0>] equal[==] name[s]]
if name[res] begin[:]
name[self].j assign[=] binary_operation[name[self].k - name[length]]
return[name[res]] | keyword[def] identifier[ends] ( identifier[self] , identifier[s] ):
identifier[length] = identifier[len] ( identifier[s] )
literal[string]
identifier[res] =( identifier[self] . identifier[b] [ identifier[self] . identifier[k] - identifier[length] + literal[int] : identifier[self] . identifier[k] + literal[int] ]== identifier[s] )
keyword[if] identifier[res] :
identifier[self] . identifier[j] = identifier[self] . identifier[k] - identifier[length]
keyword[return] identifier[res] | def ends(self, s):
length = len(s)
' True iff 0...k ends with string s '
res = self.b[self.k - length + 1:self.k + 1] == s
if res:
self.j = self.k - length # depends on [control=['if'], data=[]]
return res |
def sanitize_unicode(item):
"""Safely pass string values to the CASA tools.
item
A value to be passed to a CASA tool.
In Python 2, the bindings to CASA tasks expect to receive all string values
as binary data (:class:`str`) and not Unicode. But :mod:`pwkit` often uses
the ``from __future__ import unicode_literals`` statement to prepare for
Python 3 compatibility, and other Python modules are getting better about
using Unicode consistently, so more and more module code ends up using
Unicode strings in cases where they might get exposed to CASA. Doing so
will lead to errors.
This helper function converts Unicode into UTF-8 encoded bytes for
arguments that you might pass to a CASA tool. It will leave non-strings
unchanged and recursively transform collections, so you can safely use it
just about anywhere.
I usually import this as just ``b`` and write ``tool.method(b(arg))``, in
analogy with the ``b''`` byte string syntax. This leads to code such as::
from pwkit.environments.casa.util import tools, sanitize_unicode as b
tb = tools.table()
path = u'data.ms'
tb.open(path) # => raises exception
tb.open(b(path)) # => works
"""
if isinstance(item, text_type):
return item.encode('utf8')
if isinstance(item, dict):
return dict((sanitize_unicode(k), sanitize_unicode(v)) for k, v in six.iteritems(item))
if isinstance(item,(list, tuple)):
return item.__class__(sanitize_unicode(x) for x in item)
from ...io import Path
if isinstance(item, Path):
return str(item)
return item | def function[sanitize_unicode, parameter[item]]:
constant[Safely pass string values to the CASA tools.
item
A value to be passed to a CASA tool.
In Python 2, the bindings to CASA tasks expect to receive all string values
as binary data (:class:`str`) and not Unicode. But :mod:`pwkit` often uses
the ``from __future__ import unicode_literals`` statement to prepare for
Python 3 compatibility, and other Python modules are getting better about
using Unicode consistently, so more and more module code ends up using
Unicode strings in cases where they might get exposed to CASA. Doing so
will lead to errors.
This helper function converts Unicode into UTF-8 encoded bytes for
arguments that you might pass to a CASA tool. It will leave non-strings
unchanged and recursively transform collections, so you can safely use it
just about anywhere.
I usually import this as just ``b`` and write ``tool.method(b(arg))``, in
analogy with the ``b''`` byte string syntax. This leads to code such as::
from pwkit.environments.casa.util import tools, sanitize_unicode as b
tb = tools.table()
path = u'data.ms'
tb.open(path) # => raises exception
tb.open(b(path)) # => works
]
if call[name[isinstance], parameter[name[item], name[text_type]]] begin[:]
return[call[name[item].encode, parameter[constant[utf8]]]]
if call[name[isinstance], parameter[name[item], name[dict]]] begin[:]
return[call[name[dict], parameter[<ast.GeneratorExp object at 0x7da1b2778ac0>]]]
if call[name[isinstance], parameter[name[item], tuple[[<ast.Name object at 0x7da1b2778dc0>, <ast.Name object at 0x7da1b2778cd0>]]]] begin[:]
return[call[name[item].__class__, parameter[<ast.GeneratorExp object at 0x7da1b2639540>]]]
from relative_module[io] import module[Path]
if call[name[isinstance], parameter[name[item], name[Path]]] begin[:]
return[call[name[str], parameter[name[item]]]]
return[name[item]] | keyword[def] identifier[sanitize_unicode] ( identifier[item] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[item] , identifier[text_type] ):
keyword[return] identifier[item] . identifier[encode] ( literal[string] )
keyword[if] identifier[isinstance] ( identifier[item] , identifier[dict] ):
keyword[return] identifier[dict] (( identifier[sanitize_unicode] ( identifier[k] ), identifier[sanitize_unicode] ( identifier[v] )) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[six] . identifier[iteritems] ( identifier[item] ))
keyword[if] identifier[isinstance] ( identifier[item] ,( identifier[list] , identifier[tuple] )):
keyword[return] identifier[item] . identifier[__class__] ( identifier[sanitize_unicode] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[item] )
keyword[from] ... identifier[io] keyword[import] identifier[Path]
keyword[if] identifier[isinstance] ( identifier[item] , identifier[Path] ):
keyword[return] identifier[str] ( identifier[item] )
keyword[return] identifier[item] | def sanitize_unicode(item):
"""Safely pass string values to the CASA tools.
item
A value to be passed to a CASA tool.
In Python 2, the bindings to CASA tasks expect to receive all string values
as binary data (:class:`str`) and not Unicode. But :mod:`pwkit` often uses
the ``from __future__ import unicode_literals`` statement to prepare for
Python 3 compatibility, and other Python modules are getting better about
using Unicode consistently, so more and more module code ends up using
Unicode strings in cases where they might get exposed to CASA. Doing so
will lead to errors.
This helper function converts Unicode into UTF-8 encoded bytes for
arguments that you might pass to a CASA tool. It will leave non-strings
unchanged and recursively transform collections, so you can safely use it
just about anywhere.
I usually import this as just ``b`` and write ``tool.method(b(arg))``, in
analogy with the ``b''`` byte string syntax. This leads to code such as::
from pwkit.environments.casa.util import tools, sanitize_unicode as b
tb = tools.table()
path = u'data.ms'
tb.open(path) # => raises exception
tb.open(b(path)) # => works
"""
if isinstance(item, text_type):
return item.encode('utf8') # depends on [control=['if'], data=[]]
if isinstance(item, dict):
return dict(((sanitize_unicode(k), sanitize_unicode(v)) for (k, v) in six.iteritems(item))) # depends on [control=['if'], data=[]]
if isinstance(item, (list, tuple)):
return item.__class__((sanitize_unicode(x) for x in item)) # depends on [control=['if'], data=[]]
from ...io import Path
if isinstance(item, Path):
return str(item) # depends on [control=['if'], data=[]]
return item |
def packages(ctx, opts, owner_repo, page, page_size, query):
"""
List packages for a repository.
OWNER/REPO: Specify the OWNER namespace (i.e. user or org), and the
REPO name to list packages for that namespace and repository. All separated
by a slash.
You can use the search query (-q|--query) to filter packages:
- By name: 'my-package' (implicit) or 'name:my-package'
- By filename: 'pkg.ext' (implicit) or 'filename:pkg.ext' (explicit)
- By version: '1.0.0' (implicit) or 'version:1.0.0' (explicit)
- By arch: 'x86_64' (implicit) or 'architecture:x86_64' (explicit)
- By disto: 'el' (implicit) or 'distribution:el' (explicit)
You can also modify the search terms:
- '^foo' to anchor to start of term
- 'foo$' to anchor to end of term
- 'foo*bar' for fuzzy matching
- '~foo' for negation of the term (explicit only, e.g. name:~foo)
Multiple search terms are conjunctive (AND).
Examples, to find packages named exactly foo, with a zip filename, that are
NOT the x86 architecture, use something like this:
--query 'name:^foo$ filename:.zip$ architecture:~x86'
"""
owner, repo = owner_repo
# Use stderr for messages if the output is something else (e.g. # JSON)
use_stderr = opts.output != "pretty"
click.echo("Getting list of packages ... ", nl=False, err=use_stderr)
context_msg = "Failed to get list of packages!"
with handle_api_exceptions(ctx, opts=opts, context_msg=context_msg):
with maybe_spinner(opts):
packages_, page_info = list_packages(
owner=owner, repo=repo, page=page, page_size=page_size, query=query
)
click.secho("OK", fg="green", err=use_stderr)
if utils.maybe_print_as_json(opts, packages_, page_info):
return
headers = ["Name", "Version", "Status", "Owner / Repository (Identifier)"]
rows = []
for package in sorted(packages_, key=itemgetter("namespace", "slug")):
rows.append(
[
click.style(_get_package_name(package), fg="cyan"),
click.style(_get_package_version(package), fg="yellow"),
click.style(_get_package_status(package), fg="blue"),
"%(owner_slug)s/%(repo_slug)s/%(slug)s"
% {
"owner_slug": click.style(package["namespace"], fg="magenta"),
"repo_slug": click.style(package["repository"], fg="magenta"),
"slug": click.style(package["slug"], fg="green"),
},
]
)
if packages_:
click.echo()
utils.pretty_print_table(headers, rows)
click.echo()
num_results = len(packages_)
list_suffix = "package%s visible" % ("s" if num_results != 1 else "")
utils.pretty_print_list_info(
num_results=num_results, page_info=page_info, suffix=list_suffix
) | def function[packages, parameter[ctx, opts, owner_repo, page, page_size, query]]:
constant[
List packages for a repository.
OWNER/REPO: Specify the OWNER namespace (i.e. user or org), and the
REPO name to list packages for that namespace and repository. All separated
by a slash.
You can use the search query (-q|--query) to filter packages:
- By name: 'my-package' (implicit) or 'name:my-package'
- By filename: 'pkg.ext' (implicit) or 'filename:pkg.ext' (explicit)
- By version: '1.0.0' (implicit) or 'version:1.0.0' (explicit)
- By arch: 'x86_64' (implicit) or 'architecture:x86_64' (explicit)
- By disto: 'el' (implicit) or 'distribution:el' (explicit)
You can also modify the search terms:
- '^foo' to anchor to start of term
- 'foo$' to anchor to end of term
- 'foo*bar' for fuzzy matching
- '~foo' for negation of the term (explicit only, e.g. name:~foo)
Multiple search terms are conjunctive (AND).
Examples, to find packages named exactly foo, with a zip filename, that are
NOT the x86 architecture, use something like this:
--query 'name:^foo$ filename:.zip$ architecture:~x86'
]
<ast.Tuple object at 0x7da1b1a1f430> assign[=] name[owner_repo]
variable[use_stderr] assign[=] compare[name[opts].output not_equal[!=] constant[pretty]]
call[name[click].echo, parameter[constant[Getting list of packages ... ]]]
variable[context_msg] assign[=] constant[Failed to get list of packages!]
with call[name[handle_api_exceptions], parameter[name[ctx]]] begin[:]
with call[name[maybe_spinner], parameter[name[opts]]] begin[:]
<ast.Tuple object at 0x7da1b1951630> assign[=] call[name[list_packages], parameter[]]
call[name[click].secho, parameter[constant[OK]]]
if call[name[utils].maybe_print_as_json, parameter[name[opts], name[packages_], name[page_info]]] begin[:]
return[None]
variable[headers] assign[=] list[[<ast.Constant object at 0x7da1b1951090>, <ast.Constant object at 0x7da1b1952770>, <ast.Constant object at 0x7da1b19a8fa0>, <ast.Constant object at 0x7da1b19a96c0>]]
variable[rows] assign[=] list[[]]
for taget[name[package]] in starred[call[name[sorted], parameter[name[packages_]]]] begin[:]
call[name[rows].append, parameter[list[[<ast.Call object at 0x7da1b19d8790>, <ast.Call object at 0x7da1b19d8490>, <ast.Call object at 0x7da1b19d9060>, <ast.BinOp object at 0x7da1b1b0d1e0>]]]]
if name[packages_] begin[:]
call[name[click].echo, parameter[]]
call[name[utils].pretty_print_table, parameter[name[headers], name[rows]]]
call[name[click].echo, parameter[]]
variable[num_results] assign[=] call[name[len], parameter[name[packages_]]]
variable[list_suffix] assign[=] binary_operation[constant[package%s visible] <ast.Mod object at 0x7da2590d6920> <ast.IfExp object at 0x7da1b1b0d330>]
call[name[utils].pretty_print_list_info, parameter[]] | keyword[def] identifier[packages] ( identifier[ctx] , identifier[opts] , identifier[owner_repo] , identifier[page] , identifier[page_size] , identifier[query] ):
literal[string]
identifier[owner] , identifier[repo] = identifier[owner_repo]
identifier[use_stderr] = identifier[opts] . identifier[output] != literal[string]
identifier[click] . identifier[echo] ( literal[string] , identifier[nl] = keyword[False] , identifier[err] = identifier[use_stderr] )
identifier[context_msg] = literal[string]
keyword[with] identifier[handle_api_exceptions] ( identifier[ctx] , identifier[opts] = identifier[opts] , identifier[context_msg] = identifier[context_msg] ):
keyword[with] identifier[maybe_spinner] ( identifier[opts] ):
identifier[packages_] , identifier[page_info] = identifier[list_packages] (
identifier[owner] = identifier[owner] , identifier[repo] = identifier[repo] , identifier[page] = identifier[page] , identifier[page_size] = identifier[page_size] , identifier[query] = identifier[query]
)
identifier[click] . identifier[secho] ( literal[string] , identifier[fg] = literal[string] , identifier[err] = identifier[use_stderr] )
keyword[if] identifier[utils] . identifier[maybe_print_as_json] ( identifier[opts] , identifier[packages_] , identifier[page_info] ):
keyword[return]
identifier[headers] =[ literal[string] , literal[string] , literal[string] , literal[string] ]
identifier[rows] =[]
keyword[for] identifier[package] keyword[in] identifier[sorted] ( identifier[packages_] , identifier[key] = identifier[itemgetter] ( literal[string] , literal[string] )):
identifier[rows] . identifier[append] (
[
identifier[click] . identifier[style] ( identifier[_get_package_name] ( identifier[package] ), identifier[fg] = literal[string] ),
identifier[click] . identifier[style] ( identifier[_get_package_version] ( identifier[package] ), identifier[fg] = literal[string] ),
identifier[click] . identifier[style] ( identifier[_get_package_status] ( identifier[package] ), identifier[fg] = literal[string] ),
literal[string]
%{
literal[string] : identifier[click] . identifier[style] ( identifier[package] [ literal[string] ], identifier[fg] = literal[string] ),
literal[string] : identifier[click] . identifier[style] ( identifier[package] [ literal[string] ], identifier[fg] = literal[string] ),
literal[string] : identifier[click] . identifier[style] ( identifier[package] [ literal[string] ], identifier[fg] = literal[string] ),
},
]
)
keyword[if] identifier[packages_] :
identifier[click] . identifier[echo] ()
identifier[utils] . identifier[pretty_print_table] ( identifier[headers] , identifier[rows] )
identifier[click] . identifier[echo] ()
identifier[num_results] = identifier[len] ( identifier[packages_] )
identifier[list_suffix] = literal[string] %( literal[string] keyword[if] identifier[num_results] != literal[int] keyword[else] literal[string] )
identifier[utils] . identifier[pretty_print_list_info] (
identifier[num_results] = identifier[num_results] , identifier[page_info] = identifier[page_info] , identifier[suffix] = identifier[list_suffix]
) | def packages(ctx, opts, owner_repo, page, page_size, query):
"""
List packages for a repository.
OWNER/REPO: Specify the OWNER namespace (i.e. user or org), and the
REPO name to list packages for that namespace and repository. All separated
by a slash.
You can use the search query (-q|--query) to filter packages:
- By name: 'my-package' (implicit) or 'name:my-package'
- By filename: 'pkg.ext' (implicit) or 'filename:pkg.ext' (explicit)
- By version: '1.0.0' (implicit) or 'version:1.0.0' (explicit)
- By arch: 'x86_64' (implicit) or 'architecture:x86_64' (explicit)
- By disto: 'el' (implicit) or 'distribution:el' (explicit)
You can also modify the search terms:
- '^foo' to anchor to start of term
- 'foo$' to anchor to end of term
- 'foo*bar' for fuzzy matching
- '~foo' for negation of the term (explicit only, e.g. name:~foo)
Multiple search terms are conjunctive (AND).
Examples, to find packages named exactly foo, with a zip filename, that are
NOT the x86 architecture, use something like this:
--query 'name:^foo$ filename:.zip$ architecture:~x86'
"""
(owner, repo) = owner_repo
# Use stderr for messages if the output is something else (e.g. # JSON)
use_stderr = opts.output != 'pretty'
click.echo('Getting list of packages ... ', nl=False, err=use_stderr)
context_msg = 'Failed to get list of packages!'
with handle_api_exceptions(ctx, opts=opts, context_msg=context_msg):
with maybe_spinner(opts):
(packages_, page_info) = list_packages(owner=owner, repo=repo, page=page, page_size=page_size, query=query) # depends on [control=['with'], data=[]] # depends on [control=['with'], data=[]]
click.secho('OK', fg='green', err=use_stderr)
if utils.maybe_print_as_json(opts, packages_, page_info):
return # depends on [control=['if'], data=[]]
headers = ['Name', 'Version', 'Status', 'Owner / Repository (Identifier)']
rows = []
for package in sorted(packages_, key=itemgetter('namespace', 'slug')):
rows.append([click.style(_get_package_name(package), fg='cyan'), click.style(_get_package_version(package), fg='yellow'), click.style(_get_package_status(package), fg='blue'), '%(owner_slug)s/%(repo_slug)s/%(slug)s' % {'owner_slug': click.style(package['namespace'], fg='magenta'), 'repo_slug': click.style(package['repository'], fg='magenta'), 'slug': click.style(package['slug'], fg='green')}]) # depends on [control=['for'], data=['package']]
if packages_:
click.echo()
utils.pretty_print_table(headers, rows) # depends on [control=['if'], data=[]]
click.echo()
num_results = len(packages_)
list_suffix = 'package%s visible' % ('s' if num_results != 1 else '')
utils.pretty_print_list_info(num_results=num_results, page_info=page_info, suffix=list_suffix) |
def set_notify_dispatch_request(self, notify_dispatch_request, *args):
"""Set function to call just before requests are dispatched
Args:
notify_dispatch_request (callable): function will be called
with request as single arg just before request is dispatched
"""
self._notify_dispatch_request = notify_dispatch_request
self._notify_args = args | def function[set_notify_dispatch_request, parameter[self, notify_dispatch_request]]:
constant[Set function to call just before requests are dispatched
Args:
notify_dispatch_request (callable): function will be called
with request as single arg just before request is dispatched
]
name[self]._notify_dispatch_request assign[=] name[notify_dispatch_request]
name[self]._notify_args assign[=] name[args] | keyword[def] identifier[set_notify_dispatch_request] ( identifier[self] , identifier[notify_dispatch_request] ,* identifier[args] ):
literal[string]
identifier[self] . identifier[_notify_dispatch_request] = identifier[notify_dispatch_request]
identifier[self] . identifier[_notify_args] = identifier[args] | def set_notify_dispatch_request(self, notify_dispatch_request, *args):
"""Set function to call just before requests are dispatched
Args:
notify_dispatch_request (callable): function will be called
with request as single arg just before request is dispatched
"""
self._notify_dispatch_request = notify_dispatch_request
self._notify_args = args |
def __retrieve_updates(self, timeout=20):
"""
Retrieves any updates from the Telegram API.
Registered listeners and applicable message handlers will be notified when a new message arrives.
:raises ApiException when a call has failed.
"""
if self.skip_pending:
logger.debug('Skipped {0} pending messages'.format(self.__skip_updates()))
self.skip_pending = False
updates = self.get_updates(offset=(self.last_update_id + 1), timeout=timeout)
self.process_new_updates(updates) | def function[__retrieve_updates, parameter[self, timeout]]:
constant[
Retrieves any updates from the Telegram API.
Registered listeners and applicable message handlers will be notified when a new message arrives.
:raises ApiException when a call has failed.
]
if name[self].skip_pending begin[:]
call[name[logger].debug, parameter[call[constant[Skipped {0} pending messages].format, parameter[call[name[self].__skip_updates, parameter[]]]]]]
name[self].skip_pending assign[=] constant[False]
variable[updates] assign[=] call[name[self].get_updates, parameter[]]
call[name[self].process_new_updates, parameter[name[updates]]] | keyword[def] identifier[__retrieve_updates] ( identifier[self] , identifier[timeout] = literal[int] ):
literal[string]
keyword[if] identifier[self] . identifier[skip_pending] :
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[self] . identifier[__skip_updates] ()))
identifier[self] . identifier[skip_pending] = keyword[False]
identifier[updates] = identifier[self] . identifier[get_updates] ( identifier[offset] =( identifier[self] . identifier[last_update_id] + literal[int] ), identifier[timeout] = identifier[timeout] )
identifier[self] . identifier[process_new_updates] ( identifier[updates] ) | def __retrieve_updates(self, timeout=20):
"""
Retrieves any updates from the Telegram API.
Registered listeners and applicable message handlers will be notified when a new message arrives.
:raises ApiException when a call has failed.
"""
if self.skip_pending:
logger.debug('Skipped {0} pending messages'.format(self.__skip_updates()))
self.skip_pending = False # depends on [control=['if'], data=[]]
updates = self.get_updates(offset=self.last_update_id + 1, timeout=timeout)
self.process_new_updates(updates) |
def copy(self):
"""Return a copy of this `Fact`."""
content = [(k, v) for k, v in self.items()]
intidx = [(k, v) for k, v in content if isinstance(k, int)]
args = [v for k, v in sorted(intidx)]
kwargs = {k: v
for k, v in content
if not isinstance(k, int) and not self.is_special(k)}
return self.__class__(*args, **kwargs) | def function[copy, parameter[self]]:
constant[Return a copy of this `Fact`.]
variable[content] assign[=] <ast.ListComp object at 0x7da1b1e64550>
variable[intidx] assign[=] <ast.ListComp object at 0x7da1b1e666e0>
variable[args] assign[=] <ast.ListComp object at 0x7da1b1e67910>
variable[kwargs] assign[=] <ast.DictComp object at 0x7da1b1e664a0>
return[call[name[self].__class__, parameter[<ast.Starred object at 0x7da1b1e66920>]]] | keyword[def] identifier[copy] ( identifier[self] ):
literal[string]
identifier[content] =[( identifier[k] , identifier[v] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[self] . identifier[items] ()]
identifier[intidx] =[( identifier[k] , identifier[v] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[content] keyword[if] identifier[isinstance] ( identifier[k] , identifier[int] )]
identifier[args] =[ identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[sorted] ( identifier[intidx] )]
identifier[kwargs] ={ identifier[k] : identifier[v]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[content]
keyword[if] keyword[not] identifier[isinstance] ( identifier[k] , identifier[int] ) keyword[and] keyword[not] identifier[self] . identifier[is_special] ( identifier[k] )}
keyword[return] identifier[self] . identifier[__class__] (* identifier[args] ,** identifier[kwargs] ) | def copy(self):
"""Return a copy of this `Fact`."""
content = [(k, v) for (k, v) in self.items()]
intidx = [(k, v) for (k, v) in content if isinstance(k, int)]
args = [v for (k, v) in sorted(intidx)]
kwargs = {k: v for (k, v) in content if not isinstance(k, int) and (not self.is_special(k))}
return self.__class__(*args, **kwargs) |
def get_random_distorted_bottlenecks(
sess, image_lists, how_many, category, image_dir, input_jpeg_tensor,
distorted_image, resized_input_tensor, bottleneck_tensor):
"""Retrieves bottleneck values for training images, after distortions.
If we're training with distortions like crops, scales, or flips, we have to
recalculate the full model for every image, and so we can't use cached
bottleneck values. Instead we find random images for the requested category,
run them through the distortion graph, and then the full graph to get the
bottleneck results for each.
Args:
sess: Current TensorFlow Session.
image_lists: OrderedDict of training images for each label.
how_many: The integer number of bottleneck values to return.
category: Name string of which set of images to fetch - training, testing,
or validation.
image_dir: Root folder string of the subfolders containing the training
images.
input_jpeg_tensor: The input layer we feed the image data to.
distorted_image: The output node of the distortion graph.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
Returns:
List of bottleneck arrays and their corresponding ground truths.
"""
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
image_path = get_image_path(image_lists, label_name, image_index, image_dir,
category)
if not tf.gfile.Exists(image_path):
tf.logging.fatal('File does not exist %s', image_path)
jpeg_data = tf.gfile.GFile(image_path, 'rb').read()
# Note that we materialize the distorted_image_data as a numpy array before
# sending running inference on the image. This involves 2 memory copies and
# might be optimized in other implementations.
distorted_image_data = sess.run(distorted_image,
{input_jpeg_tensor: jpeg_data})
bottleneck_values = sess.run(bottleneck_tensor,
{resized_input_tensor: distorted_image_data})
bottleneck_values = np.squeeze(bottleneck_values)
bottlenecks.append(bottleneck_values)
ground_truths.append(label_index)
return bottlenecks, ground_truths | def function[get_random_distorted_bottlenecks, parameter[sess, image_lists, how_many, category, image_dir, input_jpeg_tensor, distorted_image, resized_input_tensor, bottleneck_tensor]]:
constant[Retrieves bottleneck values for training images, after distortions.
If we're training with distortions like crops, scales, or flips, we have to
recalculate the full model for every image, and so we can't use cached
bottleneck values. Instead we find random images for the requested category,
run them through the distortion graph, and then the full graph to get the
bottleneck results for each.
Args:
sess: Current TensorFlow Session.
image_lists: OrderedDict of training images for each label.
how_many: The integer number of bottleneck values to return.
category: Name string of which set of images to fetch - training, testing,
or validation.
image_dir: Root folder string of the subfolders containing the training
images.
input_jpeg_tensor: The input layer we feed the image data to.
distorted_image: The output node of the distortion graph.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
Returns:
List of bottleneck arrays and their corresponding ground truths.
]
variable[class_count] assign[=] call[name[len], parameter[call[name[image_lists].keys, parameter[]]]]
variable[bottlenecks] assign[=] list[[]]
variable[ground_truths] assign[=] list[[]]
for taget[name[unused_i]] in starred[call[name[range], parameter[name[how_many]]]] begin[:]
variable[label_index] assign[=] call[name[random].randrange, parameter[name[class_count]]]
variable[label_name] assign[=] call[call[name[list], parameter[call[name[image_lists].keys, parameter[]]]]][name[label_index]]
variable[image_index] assign[=] call[name[random].randrange, parameter[binary_operation[name[MAX_NUM_IMAGES_PER_CLASS] + constant[1]]]]
variable[image_path] assign[=] call[name[get_image_path], parameter[name[image_lists], name[label_name], name[image_index], name[image_dir], name[category]]]
if <ast.UnaryOp object at 0x7da1b1f19d20> begin[:]
call[name[tf].logging.fatal, parameter[constant[File does not exist %s], name[image_path]]]
variable[jpeg_data] assign[=] call[call[name[tf].gfile.GFile, parameter[name[image_path], constant[rb]]].read, parameter[]]
variable[distorted_image_data] assign[=] call[name[sess].run, parameter[name[distorted_image], dictionary[[<ast.Name object at 0x7da1b1f19930>], [<ast.Name object at 0x7da1b1f1ace0>]]]]
variable[bottleneck_values] assign[=] call[name[sess].run, parameter[name[bottleneck_tensor], dictionary[[<ast.Name object at 0x7da1b1f1a020>], [<ast.Name object at 0x7da1b1f1a7a0>]]]]
variable[bottleneck_values] assign[=] call[name[np].squeeze, parameter[name[bottleneck_values]]]
call[name[bottlenecks].append, parameter[name[bottleneck_values]]]
call[name[ground_truths].append, parameter[name[label_index]]]
return[tuple[[<ast.Name object at 0x7da1b1f1a740>, <ast.Name object at 0x7da1b1f1a3b0>]]] | keyword[def] identifier[get_random_distorted_bottlenecks] (
identifier[sess] , identifier[image_lists] , identifier[how_many] , identifier[category] , identifier[image_dir] , identifier[input_jpeg_tensor] ,
identifier[distorted_image] , identifier[resized_input_tensor] , identifier[bottleneck_tensor] ):
literal[string]
identifier[class_count] = identifier[len] ( identifier[image_lists] . identifier[keys] ())
identifier[bottlenecks] =[]
identifier[ground_truths] =[]
keyword[for] identifier[unused_i] keyword[in] identifier[range] ( identifier[how_many] ):
identifier[label_index] = identifier[random] . identifier[randrange] ( identifier[class_count] )
identifier[label_name] = identifier[list] ( identifier[image_lists] . identifier[keys] ())[ identifier[label_index] ]
identifier[image_index] = identifier[random] . identifier[randrange] ( identifier[MAX_NUM_IMAGES_PER_CLASS] + literal[int] )
identifier[image_path] = identifier[get_image_path] ( identifier[image_lists] , identifier[label_name] , identifier[image_index] , identifier[image_dir] ,
identifier[category] )
keyword[if] keyword[not] identifier[tf] . identifier[gfile] . identifier[Exists] ( identifier[image_path] ):
identifier[tf] . identifier[logging] . identifier[fatal] ( literal[string] , identifier[image_path] )
identifier[jpeg_data] = identifier[tf] . identifier[gfile] . identifier[GFile] ( identifier[image_path] , literal[string] ). identifier[read] ()
identifier[distorted_image_data] = identifier[sess] . identifier[run] ( identifier[distorted_image] ,
{ identifier[input_jpeg_tensor] : identifier[jpeg_data] })
identifier[bottleneck_values] = identifier[sess] . identifier[run] ( identifier[bottleneck_tensor] ,
{ identifier[resized_input_tensor] : identifier[distorted_image_data] })
identifier[bottleneck_values] = identifier[np] . identifier[squeeze] ( identifier[bottleneck_values] )
identifier[bottlenecks] . identifier[append] ( identifier[bottleneck_values] )
identifier[ground_truths] . identifier[append] ( identifier[label_index] )
keyword[return] identifier[bottlenecks] , identifier[ground_truths] | def get_random_distorted_bottlenecks(sess, image_lists, how_many, category, image_dir, input_jpeg_tensor, distorted_image, resized_input_tensor, bottleneck_tensor):
"""Retrieves bottleneck values for training images, after distortions.
If we're training with distortions like crops, scales, or flips, we have to
recalculate the full model for every image, and so we can't use cached
bottleneck values. Instead we find random images for the requested category,
run them through the distortion graph, and then the full graph to get the
bottleneck results for each.
Args:
sess: Current TensorFlow Session.
image_lists: OrderedDict of training images for each label.
how_many: The integer number of bottleneck values to return.
category: Name string of which set of images to fetch - training, testing,
or validation.
image_dir: Root folder string of the subfolders containing the training
images.
input_jpeg_tensor: The input layer we feed the image data to.
distorted_image: The output node of the distortion graph.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
Returns:
List of bottleneck arrays and their corresponding ground truths.
"""
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
image_path = get_image_path(image_lists, label_name, image_index, image_dir, category)
if not tf.gfile.Exists(image_path):
tf.logging.fatal('File does not exist %s', image_path) # depends on [control=['if'], data=[]]
jpeg_data = tf.gfile.GFile(image_path, 'rb').read()
# Note that we materialize the distorted_image_data as a numpy array before
# sending running inference on the image. This involves 2 memory copies and
# might be optimized in other implementations.
distorted_image_data = sess.run(distorted_image, {input_jpeg_tensor: jpeg_data})
bottleneck_values = sess.run(bottleneck_tensor, {resized_input_tensor: distorted_image_data})
bottleneck_values = np.squeeze(bottleneck_values)
bottlenecks.append(bottleneck_values)
ground_truths.append(label_index) # depends on [control=['for'], data=[]]
return (bottlenecks, ground_truths) |
def get_string(self):
"""A string representation of the junction
:return: string represnetation
:rtype: string
"""
return self.left.chr+':'+str(self.left.end)+'-'+self.right.chr+':'+str(self.right.start) | def function[get_string, parameter[self]]:
constant[A string representation of the junction
:return: string represnetation
:rtype: string
]
return[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[self].left.chr + constant[:]] + call[name[str], parameter[name[self].left.end]]] + constant[-]] + name[self].right.chr] + constant[:]] + call[name[str], parameter[name[self].right.start]]]] | keyword[def] identifier[get_string] ( identifier[self] ):
literal[string]
keyword[return] identifier[self] . identifier[left] . identifier[chr] + literal[string] + identifier[str] ( identifier[self] . identifier[left] . identifier[end] )+ literal[string] + identifier[self] . identifier[right] . identifier[chr] + literal[string] + identifier[str] ( identifier[self] . identifier[right] . identifier[start] ) | def get_string(self):
"""A string representation of the junction
:return: string represnetation
:rtype: string
"""
return self.left.chr + ':' + str(self.left.end) + '-' + self.right.chr + ':' + str(self.right.start) |
def prep_stream_data(data):
"""Take an input and prepare it for use as a stream.
:param data: Input data
:returns: Prepared stream
:rtype: InsistentReaderBytesIO
"""
if isinstance(data, (six.string_types, six.binary_type)):
stream = io.BytesIO(to_bytes(data))
else:
stream = data
return InsistentReaderBytesIO(stream) | def function[prep_stream_data, parameter[data]]:
constant[Take an input and prepare it for use as a stream.
:param data: Input data
:returns: Prepared stream
:rtype: InsistentReaderBytesIO
]
if call[name[isinstance], parameter[name[data], tuple[[<ast.Attribute object at 0x7da18fe93010>, <ast.Attribute object at 0x7da18fe90280>]]]] begin[:]
variable[stream] assign[=] call[name[io].BytesIO, parameter[call[name[to_bytes], parameter[name[data]]]]]
return[call[name[InsistentReaderBytesIO], parameter[name[stream]]]] | keyword[def] identifier[prep_stream_data] ( identifier[data] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[data] ,( identifier[six] . identifier[string_types] , identifier[six] . identifier[binary_type] )):
identifier[stream] = identifier[io] . identifier[BytesIO] ( identifier[to_bytes] ( identifier[data] ))
keyword[else] :
identifier[stream] = identifier[data]
keyword[return] identifier[InsistentReaderBytesIO] ( identifier[stream] ) | def prep_stream_data(data):
"""Take an input and prepare it for use as a stream.
:param data: Input data
:returns: Prepared stream
:rtype: InsistentReaderBytesIO
"""
if isinstance(data, (six.string_types, six.binary_type)):
stream = io.BytesIO(to_bytes(data)) # depends on [control=['if'], data=[]]
else:
stream = data
return InsistentReaderBytesIO(stream) |
def _transform(self, W):
for t in range(16, 80):
W.append(_rotateLeft(
W[t-3] ^ W[t-8] ^ W[t-14] ^ W[t-16], 1) & 0xffffffff)
A = self.H0
B = self.H1
C = self.H2
D = self.H3
E = self.H4
"""
This loop was unrolled to gain about 10% in speed
for t in range(0, 80):
TEMP = _rotateLeft(A, 5) + f[t/20] + E + W[t] + K[t/20]
E = D
D = C
C = _rotateLeft(B, 30) & 0xffffffff
B = A
A = TEMP & 0xffffffff
"""
for t in range(0, 20):
TEMP = _rotateLeft(A, 5) + ((B & C) | ((~ B) & D)) + E + W[t] + K[0]
E = D
D = C
C = _rotateLeft(B, 30) & 0xffffffff
B = A
A = TEMP & 0xffffffff
for t in range(20, 40):
TEMP = _rotateLeft(A, 5) + (B ^ C ^ D) + E + W[t] + K[1]
E = D
D = C
C = _rotateLeft(B, 30) & 0xffffffff
B = A
A = TEMP & 0xffffffff
for t in range(40, 60):
TEMP = _rotateLeft(A, 5) + ((B & C) | (B & D) | (C & D)) + E + W[t] + K[2]
E = D
D = C
C = _rotateLeft(B, 30) & 0xffffffff
B = A
A = TEMP & 0xffffffff
for t in range(60, 80):
TEMP = _rotateLeft(A, 5) + (B ^ C ^ D) + E + W[t] + K[3]
E = D
D = C
C = _rotateLeft(B, 30) & 0xffffffff
B = A
A = TEMP & 0xffffffff
self.H0 = (self.H0 + A) & 0xffffffff
self.H1 = (self.H1 + B) & 0xffffffff
self.H2 = (self.H2 + C) & 0xffffffff
self.H3 = (self.H3 + D) & 0xffffffff
self.H4 = (self.H4 + E) & 0xffffffff | def function[_transform, parameter[self, W]]:
for taget[name[t]] in starred[call[name[range], parameter[constant[16], constant[80]]]] begin[:]
call[name[W].append, parameter[binary_operation[call[name[_rotateLeft], parameter[binary_operation[binary_operation[binary_operation[call[name[W]][binary_operation[name[t] - constant[3]]] <ast.BitXor object at 0x7da2590d6b00> call[name[W]][binary_operation[name[t] - constant[8]]]] <ast.BitXor object at 0x7da2590d6b00> call[name[W]][binary_operation[name[t] - constant[14]]]] <ast.BitXor object at 0x7da2590d6b00> call[name[W]][binary_operation[name[t] - constant[16]]]], constant[1]]] <ast.BitAnd object at 0x7da2590d6b60> constant[4294967295]]]]
variable[A] assign[=] name[self].H0
variable[B] assign[=] name[self].H1
variable[C] assign[=] name[self].H2
variable[D] assign[=] name[self].H3
variable[E] assign[=] name[self].H4
constant[
This loop was unrolled to gain about 10% in speed
for t in range(0, 80):
TEMP = _rotateLeft(A, 5) + f[t/20] + E + W[t] + K[t/20]
E = D
D = C
C = _rotateLeft(B, 30) & 0xffffffff
B = A
A = TEMP & 0xffffffff
]
for taget[name[t]] in starred[call[name[range], parameter[constant[0], constant[20]]]] begin[:]
variable[TEMP] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[call[name[_rotateLeft], parameter[name[A], constant[5]]] + binary_operation[binary_operation[name[B] <ast.BitAnd object at 0x7da2590d6b60> name[C]] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[<ast.UnaryOp object at 0x7da1b23458a0> <ast.BitAnd object at 0x7da2590d6b60> name[D]]]] + name[E]] + call[name[W]][name[t]]] + call[name[K]][constant[0]]]
variable[E] assign[=] name[D]
variable[D] assign[=] name[C]
variable[C] assign[=] binary_operation[call[name[_rotateLeft], parameter[name[B], constant[30]]] <ast.BitAnd object at 0x7da2590d6b60> constant[4294967295]]
variable[B] assign[=] name[A]
variable[A] assign[=] binary_operation[name[TEMP] <ast.BitAnd object at 0x7da2590d6b60> constant[4294967295]]
for taget[name[t]] in starred[call[name[range], parameter[constant[20], constant[40]]]] begin[:]
variable[TEMP] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[call[name[_rotateLeft], parameter[name[A], constant[5]]] + binary_operation[binary_operation[name[B] <ast.BitXor object at 0x7da2590d6b00> name[C]] <ast.BitXor object at 0x7da2590d6b00> name[D]]] + name[E]] + call[name[W]][name[t]]] + call[name[K]][constant[1]]]
variable[E] assign[=] name[D]
variable[D] assign[=] name[C]
variable[C] assign[=] binary_operation[call[name[_rotateLeft], parameter[name[B], constant[30]]] <ast.BitAnd object at 0x7da2590d6b60> constant[4294967295]]
variable[B] assign[=] name[A]
variable[A] assign[=] binary_operation[name[TEMP] <ast.BitAnd object at 0x7da2590d6b60> constant[4294967295]]
for taget[name[t]] in starred[call[name[range], parameter[constant[40], constant[60]]]] begin[:]
variable[TEMP] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[call[name[_rotateLeft], parameter[name[A], constant[5]]] + binary_operation[binary_operation[binary_operation[name[B] <ast.BitAnd object at 0x7da2590d6b60> name[C]] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[name[B] <ast.BitAnd object at 0x7da2590d6b60> name[D]]] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[name[C] <ast.BitAnd object at 0x7da2590d6b60> name[D]]]] + name[E]] + call[name[W]][name[t]]] + call[name[K]][constant[2]]]
variable[E] assign[=] name[D]
variable[D] assign[=] name[C]
variable[C] assign[=] binary_operation[call[name[_rotateLeft], parameter[name[B], constant[30]]] <ast.BitAnd object at 0x7da2590d6b60> constant[4294967295]]
variable[B] assign[=] name[A]
variable[A] assign[=] binary_operation[name[TEMP] <ast.BitAnd object at 0x7da2590d6b60> constant[4294967295]]
for taget[name[t]] in starred[call[name[range], parameter[constant[60], constant[80]]]] begin[:]
variable[TEMP] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[call[name[_rotateLeft], parameter[name[A], constant[5]]] + binary_operation[binary_operation[name[B] <ast.BitXor object at 0x7da2590d6b00> name[C]] <ast.BitXor object at 0x7da2590d6b00> name[D]]] + name[E]] + call[name[W]][name[t]]] + call[name[K]][constant[3]]]
variable[E] assign[=] name[D]
variable[D] assign[=] name[C]
variable[C] assign[=] binary_operation[call[name[_rotateLeft], parameter[name[B], constant[30]]] <ast.BitAnd object at 0x7da2590d6b60> constant[4294967295]]
variable[B] assign[=] name[A]
variable[A] assign[=] binary_operation[name[TEMP] <ast.BitAnd object at 0x7da2590d6b60> constant[4294967295]]
name[self].H0 assign[=] binary_operation[binary_operation[name[self].H0 + name[A]] <ast.BitAnd object at 0x7da2590d6b60> constant[4294967295]]
name[self].H1 assign[=] binary_operation[binary_operation[name[self].H1 + name[B]] <ast.BitAnd object at 0x7da2590d6b60> constant[4294967295]]
name[self].H2 assign[=] binary_operation[binary_operation[name[self].H2 + name[C]] <ast.BitAnd object at 0x7da2590d6b60> constant[4294967295]]
name[self].H3 assign[=] binary_operation[binary_operation[name[self].H3 + name[D]] <ast.BitAnd object at 0x7da2590d6b60> constant[4294967295]]
name[self].H4 assign[=] binary_operation[binary_operation[name[self].H4 + name[E]] <ast.BitAnd object at 0x7da2590d6b60> constant[4294967295]] | keyword[def] identifier[_transform] ( identifier[self] , identifier[W] ):
keyword[for] identifier[t] keyword[in] identifier[range] ( literal[int] , literal[int] ):
identifier[W] . identifier[append] ( identifier[_rotateLeft] (
identifier[W] [ identifier[t] - literal[int] ]^ identifier[W] [ identifier[t] - literal[int] ]^ identifier[W] [ identifier[t] - literal[int] ]^ identifier[W] [ identifier[t] - literal[int] ], literal[int] )& literal[int] )
identifier[A] = identifier[self] . identifier[H0]
identifier[B] = identifier[self] . identifier[H1]
identifier[C] = identifier[self] . identifier[H2]
identifier[D] = identifier[self] . identifier[H3]
identifier[E] = identifier[self] . identifier[H4]
literal[string]
keyword[for] identifier[t] keyword[in] identifier[range] ( literal[int] , literal[int] ):
identifier[TEMP] = identifier[_rotateLeft] ( identifier[A] , literal[int] )+(( identifier[B] & identifier[C] )|((~ identifier[B] )& identifier[D] ))+ identifier[E] + identifier[W] [ identifier[t] ]+ identifier[K] [ literal[int] ]
identifier[E] = identifier[D]
identifier[D] = identifier[C]
identifier[C] = identifier[_rotateLeft] ( identifier[B] , literal[int] )& literal[int]
identifier[B] = identifier[A]
identifier[A] = identifier[TEMP] & literal[int]
keyword[for] identifier[t] keyword[in] identifier[range] ( literal[int] , literal[int] ):
identifier[TEMP] = identifier[_rotateLeft] ( identifier[A] , literal[int] )+( identifier[B] ^ identifier[C] ^ identifier[D] )+ identifier[E] + identifier[W] [ identifier[t] ]+ identifier[K] [ literal[int] ]
identifier[E] = identifier[D]
identifier[D] = identifier[C]
identifier[C] = identifier[_rotateLeft] ( identifier[B] , literal[int] )& literal[int]
identifier[B] = identifier[A]
identifier[A] = identifier[TEMP] & literal[int]
keyword[for] identifier[t] keyword[in] identifier[range] ( literal[int] , literal[int] ):
identifier[TEMP] = identifier[_rotateLeft] ( identifier[A] , literal[int] )+(( identifier[B] & identifier[C] )|( identifier[B] & identifier[D] )|( identifier[C] & identifier[D] ))+ identifier[E] + identifier[W] [ identifier[t] ]+ identifier[K] [ literal[int] ]
identifier[E] = identifier[D]
identifier[D] = identifier[C]
identifier[C] = identifier[_rotateLeft] ( identifier[B] , literal[int] )& literal[int]
identifier[B] = identifier[A]
identifier[A] = identifier[TEMP] & literal[int]
keyword[for] identifier[t] keyword[in] identifier[range] ( literal[int] , literal[int] ):
identifier[TEMP] = identifier[_rotateLeft] ( identifier[A] , literal[int] )+( identifier[B] ^ identifier[C] ^ identifier[D] )+ identifier[E] + identifier[W] [ identifier[t] ]+ identifier[K] [ literal[int] ]
identifier[E] = identifier[D]
identifier[D] = identifier[C]
identifier[C] = identifier[_rotateLeft] ( identifier[B] , literal[int] )& literal[int]
identifier[B] = identifier[A]
identifier[A] = identifier[TEMP] & literal[int]
identifier[self] . identifier[H0] =( identifier[self] . identifier[H0] + identifier[A] )& literal[int]
identifier[self] . identifier[H1] =( identifier[self] . identifier[H1] + identifier[B] )& literal[int]
identifier[self] . identifier[H2] =( identifier[self] . identifier[H2] + identifier[C] )& literal[int]
identifier[self] . identifier[H3] =( identifier[self] . identifier[H3] + identifier[D] )& literal[int]
identifier[self] . identifier[H4] =( identifier[self] . identifier[H4] + identifier[E] )& literal[int] | def _transform(self, W):
for t in range(16, 80):
W.append(_rotateLeft(W[t - 3] ^ W[t - 8] ^ W[t - 14] ^ W[t - 16], 1) & 4294967295) # depends on [control=['for'], data=['t']]
A = self.H0
B = self.H1
C = self.H2
D = self.H3
E = self.H4
'\n This loop was unrolled to gain about 10% in speed\n for t in range(0, 80):\n TEMP = _rotateLeft(A, 5) + f[t/20] + E + W[t] + K[t/20]\n E = D\n D = C\n C = _rotateLeft(B, 30) & 0xffffffff\n B = A\n A = TEMP & 0xffffffff\n '
for t in range(0, 20):
TEMP = _rotateLeft(A, 5) + (B & C | ~B & D) + E + W[t] + K[0]
E = D
D = C
C = _rotateLeft(B, 30) & 4294967295
B = A
A = TEMP & 4294967295 # depends on [control=['for'], data=['t']]
for t in range(20, 40):
TEMP = _rotateLeft(A, 5) + (B ^ C ^ D) + E + W[t] + K[1]
E = D
D = C
C = _rotateLeft(B, 30) & 4294967295
B = A
A = TEMP & 4294967295 # depends on [control=['for'], data=['t']]
for t in range(40, 60):
TEMP = _rotateLeft(A, 5) + (B & C | B & D | C & D) + E + W[t] + K[2]
E = D
D = C
C = _rotateLeft(B, 30) & 4294967295
B = A
A = TEMP & 4294967295 # depends on [control=['for'], data=['t']]
for t in range(60, 80):
TEMP = _rotateLeft(A, 5) + (B ^ C ^ D) + E + W[t] + K[3]
E = D
D = C
C = _rotateLeft(B, 30) & 4294967295
B = A
A = TEMP & 4294967295 # depends on [control=['for'], data=['t']]
self.H0 = self.H0 + A & 4294967295
self.H1 = self.H1 + B & 4294967295
self.H2 = self.H2 + C & 4294967295
self.H3 = self.H3 + D & 4294967295
self.H4 = self.H4 + E & 4294967295 |
def init_defaults(self):
"""
Sets a query instance variable to the table value
"""
super(QueryTable, self).init_defaults()
self.query = self.table
self.query.is_inner = True | def function[init_defaults, parameter[self]]:
constant[
Sets a query instance variable to the table value
]
call[call[name[super], parameter[name[QueryTable], name[self]]].init_defaults, parameter[]]
name[self].query assign[=] name[self].table
name[self].query.is_inner assign[=] constant[True] | keyword[def] identifier[init_defaults] ( identifier[self] ):
literal[string]
identifier[super] ( identifier[QueryTable] , identifier[self] ). identifier[init_defaults] ()
identifier[self] . identifier[query] = identifier[self] . identifier[table]
identifier[self] . identifier[query] . identifier[is_inner] = keyword[True] | def init_defaults(self):
"""
Sets a query instance variable to the table value
"""
super(QueryTable, self).init_defaults()
self.query = self.table
self.query.is_inner = True |
def wait_for_completion(report, interval=10):
"""Wait for asynchronous jobs stil running in the given campaign.
:param report: memory representation of a campaign report
:type campaign: ReportNode
:param interval: wait interval
:type interval: int or float
:return: list of asynchronous job identifiers
"""
for jobid in report.collect('jobid'):
try:
if not Job.finished(jobid):
logging.info('waiting for SLURM job %s', jobid)
time.sleep(interval)
while not Job.finished(jobid):
time.sleep(interval)
yield Job.fromid(jobid)._asdict()
except OSError as e:
if e.errno == errno.ENOENT:
yield dict(id=str(jobid))
else:
raise e | def function[wait_for_completion, parameter[report, interval]]:
constant[Wait for asynchronous jobs stil running in the given campaign.
:param report: memory representation of a campaign report
:type campaign: ReportNode
:param interval: wait interval
:type interval: int or float
:return: list of asynchronous job identifiers
]
for taget[name[jobid]] in starred[call[name[report].collect, parameter[constant[jobid]]]] begin[:]
<ast.Try object at 0x7da18f09c9a0> | keyword[def] identifier[wait_for_completion] ( identifier[report] , identifier[interval] = literal[int] ):
literal[string]
keyword[for] identifier[jobid] keyword[in] identifier[report] . identifier[collect] ( literal[string] ):
keyword[try] :
keyword[if] keyword[not] identifier[Job] . identifier[finished] ( identifier[jobid] ):
identifier[logging] . identifier[info] ( literal[string] , identifier[jobid] )
identifier[time] . identifier[sleep] ( identifier[interval] )
keyword[while] keyword[not] identifier[Job] . identifier[finished] ( identifier[jobid] ):
identifier[time] . identifier[sleep] ( identifier[interval] )
keyword[yield] identifier[Job] . identifier[fromid] ( identifier[jobid] ). identifier[_asdict] ()
keyword[except] identifier[OSError] keyword[as] identifier[e] :
keyword[if] identifier[e] . identifier[errno] == identifier[errno] . identifier[ENOENT] :
keyword[yield] identifier[dict] ( identifier[id] = identifier[str] ( identifier[jobid] ))
keyword[else] :
keyword[raise] identifier[e] | def wait_for_completion(report, interval=10):
"""Wait for asynchronous jobs stil running in the given campaign.
:param report: memory representation of a campaign report
:type campaign: ReportNode
:param interval: wait interval
:type interval: int or float
:return: list of asynchronous job identifiers
"""
for jobid in report.collect('jobid'):
try:
if not Job.finished(jobid):
logging.info('waiting for SLURM job %s', jobid)
time.sleep(interval)
while not Job.finished(jobid):
time.sleep(interval) # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
yield Job.fromid(jobid)._asdict() # depends on [control=['try'], data=[]]
except OSError as e:
if e.errno == errno.ENOENT:
yield dict(id=str(jobid)) # depends on [control=['if'], data=[]]
else:
raise e # depends on [control=['except'], data=['e']] # depends on [control=['for'], data=['jobid']] |
def chunks(f):
"""Split read PNG image data into chunks"""
while 1:
try:
length = struct.unpack(b"!I", f.read(4))[0]
tag = f.read(4)
data = f.read(length)
crc = struct.unpack(b"!I", f.read(4))[0]
except struct.error:
return
if zlib.crc32(tag + data) & 0xFFFFFFFF != crc:
raise IOError('Checksum fail')
yield tag, data | def function[chunks, parameter[f]]:
constant[Split read PNG image data into chunks]
while constant[1] begin[:]
<ast.Try object at 0x7da1b094a4a0>
if compare[binary_operation[call[name[zlib].crc32, parameter[binary_operation[name[tag] + name[data]]]] <ast.BitAnd object at 0x7da2590d6b60> constant[4294967295]] not_equal[!=] name[crc]] begin[:]
<ast.Raise object at 0x7da1b0948610>
<ast.Yield object at 0x7da1b094a530> | keyword[def] identifier[chunks] ( identifier[f] ):
literal[string]
keyword[while] literal[int] :
keyword[try] :
identifier[length] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[f] . identifier[read] ( literal[int] ))[ literal[int] ]
identifier[tag] = identifier[f] . identifier[read] ( literal[int] )
identifier[data] = identifier[f] . identifier[read] ( identifier[length] )
identifier[crc] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[f] . identifier[read] ( literal[int] ))[ literal[int] ]
keyword[except] identifier[struct] . identifier[error] :
keyword[return]
keyword[if] identifier[zlib] . identifier[crc32] ( identifier[tag] + identifier[data] )& literal[int] != identifier[crc] :
keyword[raise] identifier[IOError] ( literal[string] )
keyword[yield] identifier[tag] , identifier[data] | def chunks(f):
"""Split read PNG image data into chunks"""
while 1:
try:
length = struct.unpack(b'!I', f.read(4))[0]
tag = f.read(4)
data = f.read(length)
crc = struct.unpack(b'!I', f.read(4))[0] # depends on [control=['try'], data=[]]
except struct.error:
return # depends on [control=['except'], data=[]]
if zlib.crc32(tag + data) & 4294967295 != crc:
raise IOError('Checksum fail') # depends on [control=['if'], data=[]]
yield (tag, data) # depends on [control=['while'], data=[]] |
def make_mesh_file(
self,
labels=None,
):
""" Make one mesh (vtk or stl) file
:param label: labels from prev use of set_labels are used if None
:return: filename of output file
Funkce vrací trojrozměrné porobné jako data['segmentation']
v data['slab'] je popsáno, co která hodnota znamená
"""
if labels is None:
labels=self.labels
strlabel = imma.get_nlabels(slab=self.slab, labels=labels, return_mode="str")
if strlabel is list:
# if one file with {} in pattern is created
strlabel = "-".join(strlabel)
logger.debug(strlabel)
mesh_filename = self.output_file_pattern.format(strlabel)
logger.debug(mesh_filename)
self._resize_if_required()
# sed3.show_slices(self.resized_segmentation)
self.select_labels(labels)
# import sed3
# sed3.show_slices(self.binar_segmentation)
# _stats(self.segmentation)
# _stats(self.binar_segmentation)
_stats(self.resized_segmentation)
# import pdb; pdb.set_trace()
logger.debug("gen_mesh_from_voxels_mc() started")
mesh_data = gen_mesh_from_voxels_mc(self.resized_binar_segmentation, self.resized_voxelsize_mm)
if self.smoothing:
mesh_data.coors = smooth_mesh(mesh_data)
# mesh_data.coors = seg2fem.smooth_mesh(mesh_data)
else:
pass
# mesh_data = gen_mesh_from_voxels_mc(segmentation, voxelsize_mm * 1.0e-2)
# mesh_data.coors +=
logger.debug("gen_mesh_from_voxels_mc() finished")
pth, ext = op.splitext(mesh_filename)
if ext == ".stl":
vtk_filename = mesh_filename + ".vtk"
else:
vtk_filename = mesh_filename
mesh_data.write(vtk_filename)
if ext == ".stl":
vtk2stl.vtk2stl(vtk_filename, mesh_filename)
return mesh_filename | def function[make_mesh_file, parameter[self, labels]]:
constant[ Make one mesh (vtk or stl) file
:param label: labels from prev use of set_labels are used if None
:return: filename of output file
Funkce vrací trojrozměrné porobné jako data['segmentation']
v data['slab'] je popsáno, co která hodnota znamená
]
if compare[name[labels] is constant[None]] begin[:]
variable[labels] assign[=] name[self].labels
variable[strlabel] assign[=] call[name[imma].get_nlabels, parameter[]]
if compare[name[strlabel] is name[list]] begin[:]
variable[strlabel] assign[=] call[constant[-].join, parameter[name[strlabel]]]
call[name[logger].debug, parameter[name[strlabel]]]
variable[mesh_filename] assign[=] call[name[self].output_file_pattern.format, parameter[name[strlabel]]]
call[name[logger].debug, parameter[name[mesh_filename]]]
call[name[self]._resize_if_required, parameter[]]
call[name[self].select_labels, parameter[name[labels]]]
call[name[_stats], parameter[name[self].resized_segmentation]]
call[name[logger].debug, parameter[constant[gen_mesh_from_voxels_mc() started]]]
variable[mesh_data] assign[=] call[name[gen_mesh_from_voxels_mc], parameter[name[self].resized_binar_segmentation, name[self].resized_voxelsize_mm]]
if name[self].smoothing begin[:]
name[mesh_data].coors assign[=] call[name[smooth_mesh], parameter[name[mesh_data]]]
call[name[logger].debug, parameter[constant[gen_mesh_from_voxels_mc() finished]]]
<ast.Tuple object at 0x7da1b2585570> assign[=] call[name[op].splitext, parameter[name[mesh_filename]]]
if compare[name[ext] equal[==] constant[.stl]] begin[:]
variable[vtk_filename] assign[=] binary_operation[name[mesh_filename] + constant[.vtk]]
call[name[mesh_data].write, parameter[name[vtk_filename]]]
if compare[name[ext] equal[==] constant[.stl]] begin[:]
call[name[vtk2stl].vtk2stl, parameter[name[vtk_filename], name[mesh_filename]]]
return[name[mesh_filename]] | keyword[def] identifier[make_mesh_file] (
identifier[self] ,
identifier[labels] = keyword[None] ,
):
literal[string]
keyword[if] identifier[labels] keyword[is] keyword[None] :
identifier[labels] = identifier[self] . identifier[labels]
identifier[strlabel] = identifier[imma] . identifier[get_nlabels] ( identifier[slab] = identifier[self] . identifier[slab] , identifier[labels] = identifier[labels] , identifier[return_mode] = literal[string] )
keyword[if] identifier[strlabel] keyword[is] identifier[list] :
identifier[strlabel] = literal[string] . identifier[join] ( identifier[strlabel] )
identifier[logger] . identifier[debug] ( identifier[strlabel] )
identifier[mesh_filename] = identifier[self] . identifier[output_file_pattern] . identifier[format] ( identifier[strlabel] )
identifier[logger] . identifier[debug] ( identifier[mesh_filename] )
identifier[self] . identifier[_resize_if_required] ()
identifier[self] . identifier[select_labels] ( identifier[labels] )
identifier[_stats] ( identifier[self] . identifier[resized_segmentation] )
identifier[logger] . identifier[debug] ( literal[string] )
identifier[mesh_data] = identifier[gen_mesh_from_voxels_mc] ( identifier[self] . identifier[resized_binar_segmentation] , identifier[self] . identifier[resized_voxelsize_mm] )
keyword[if] identifier[self] . identifier[smoothing] :
identifier[mesh_data] . identifier[coors] = identifier[smooth_mesh] ( identifier[mesh_data] )
keyword[else] :
keyword[pass]
identifier[logger] . identifier[debug] ( literal[string] )
identifier[pth] , identifier[ext] = identifier[op] . identifier[splitext] ( identifier[mesh_filename] )
keyword[if] identifier[ext] == literal[string] :
identifier[vtk_filename] = identifier[mesh_filename] + literal[string]
keyword[else] :
identifier[vtk_filename] = identifier[mesh_filename]
identifier[mesh_data] . identifier[write] ( identifier[vtk_filename] )
keyword[if] identifier[ext] == literal[string] :
identifier[vtk2stl] . identifier[vtk2stl] ( identifier[vtk_filename] , identifier[mesh_filename] )
keyword[return] identifier[mesh_filename] | def make_mesh_file(self, labels=None):
""" Make one mesh (vtk or stl) file
:param label: labels from prev use of set_labels are used if None
:return: filename of output file
Funkce vrací trojrozměrné porobné jako data['segmentation']
v data['slab'] je popsáno, co která hodnota znamená
"""
if labels is None:
labels = self.labels # depends on [control=['if'], data=['labels']]
strlabel = imma.get_nlabels(slab=self.slab, labels=labels, return_mode='str')
if strlabel is list:
# if one file with {} in pattern is created
strlabel = '-'.join(strlabel) # depends on [control=['if'], data=['strlabel']]
logger.debug(strlabel)
mesh_filename = self.output_file_pattern.format(strlabel)
logger.debug(mesh_filename)
self._resize_if_required()
# sed3.show_slices(self.resized_segmentation)
self.select_labels(labels)
# import sed3
# sed3.show_slices(self.binar_segmentation)
# _stats(self.segmentation)
# _stats(self.binar_segmentation)
_stats(self.resized_segmentation)
# import pdb; pdb.set_trace()
logger.debug('gen_mesh_from_voxels_mc() started')
mesh_data = gen_mesh_from_voxels_mc(self.resized_binar_segmentation, self.resized_voxelsize_mm)
if self.smoothing:
mesh_data.coors = smooth_mesh(mesh_data) # depends on [control=['if'], data=[]]
else:
# mesh_data.coors = seg2fem.smooth_mesh(mesh_data)
pass
# mesh_data = gen_mesh_from_voxels_mc(segmentation, voxelsize_mm * 1.0e-2)
# mesh_data.coors +=
logger.debug('gen_mesh_from_voxels_mc() finished')
(pth, ext) = op.splitext(mesh_filename)
if ext == '.stl':
vtk_filename = mesh_filename + '.vtk' # depends on [control=['if'], data=[]]
else:
vtk_filename = mesh_filename
mesh_data.write(vtk_filename)
if ext == '.stl':
vtk2stl.vtk2stl(vtk_filename, mesh_filename) # depends on [control=['if'], data=[]]
return mesh_filename |
async def _set_wallets(an_data: dict) -> dict:
"""
Set wallets as configured for setnym operation.
:param an_data: dict mapping profiles to anchor data
:return: dict mapping anchor names to wallet objects
"""
w_mgr = WalletManager()
rv = {}
for profile in an_data:
w_cfg = {'id': an_data[profile].name}
if an_data[profile].wallet_type:
w_cfg['storage_type'] = an_data[profile].wallet_type
if an_data[profile].seed:
w_cfg['seed'] = an_data[profile].seed
if an_data[profile].did:
w_cfg['did'] = an_data[profile].did
if an_data[profile].wallet_create:
try:
await w_mgr.create(w_cfg, access=an_data[profile].wallet_access)
except ExtantWallet:
pass
rv[profile] = w_mgr.get(w_cfg, access=an_data[profile].wallet_access)
return rv | <ast.AsyncFunctionDef object at 0x7da18c4cef50> | keyword[async] keyword[def] identifier[_set_wallets] ( identifier[an_data] : identifier[dict] )-> identifier[dict] :
literal[string]
identifier[w_mgr] = identifier[WalletManager] ()
identifier[rv] ={}
keyword[for] identifier[profile] keyword[in] identifier[an_data] :
identifier[w_cfg] ={ literal[string] : identifier[an_data] [ identifier[profile] ]. identifier[name] }
keyword[if] identifier[an_data] [ identifier[profile] ]. identifier[wallet_type] :
identifier[w_cfg] [ literal[string] ]= identifier[an_data] [ identifier[profile] ]. identifier[wallet_type]
keyword[if] identifier[an_data] [ identifier[profile] ]. identifier[seed] :
identifier[w_cfg] [ literal[string] ]= identifier[an_data] [ identifier[profile] ]. identifier[seed]
keyword[if] identifier[an_data] [ identifier[profile] ]. identifier[did] :
identifier[w_cfg] [ literal[string] ]= identifier[an_data] [ identifier[profile] ]. identifier[did]
keyword[if] identifier[an_data] [ identifier[profile] ]. identifier[wallet_create] :
keyword[try] :
keyword[await] identifier[w_mgr] . identifier[create] ( identifier[w_cfg] , identifier[access] = identifier[an_data] [ identifier[profile] ]. identifier[wallet_access] )
keyword[except] identifier[ExtantWallet] :
keyword[pass]
identifier[rv] [ identifier[profile] ]= identifier[w_mgr] . identifier[get] ( identifier[w_cfg] , identifier[access] = identifier[an_data] [ identifier[profile] ]. identifier[wallet_access] )
keyword[return] identifier[rv] | async def _set_wallets(an_data: dict) -> dict:
"""
Set wallets as configured for setnym operation.
:param an_data: dict mapping profiles to anchor data
:return: dict mapping anchor names to wallet objects
"""
w_mgr = WalletManager()
rv = {}
for profile in an_data:
w_cfg = {'id': an_data[profile].name}
if an_data[profile].wallet_type:
w_cfg['storage_type'] = an_data[profile].wallet_type # depends on [control=['if'], data=[]]
if an_data[profile].seed:
w_cfg['seed'] = an_data[profile].seed # depends on [control=['if'], data=[]]
if an_data[profile].did:
w_cfg['did'] = an_data[profile].did # depends on [control=['if'], data=[]]
if an_data[profile].wallet_create:
try:
await w_mgr.create(w_cfg, access=an_data[profile].wallet_access) # depends on [control=['try'], data=[]]
except ExtantWallet:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
rv[profile] = w_mgr.get(w_cfg, access=an_data[profile].wallet_access) # depends on [control=['for'], data=['profile']]
return rv |
def remove(self, addon, dev=False):
"""Remove a dependency and uninstall it."""
dependencies = self.get_dependency_manager(dev=dev)
other_dependencies = self.get_dependency_manager(dev=not dev)
self.stdout.write(style.format_command('Removing', addon))
removed = dependencies.remove(addon, warn=False)
if not removed:
removed = other_dependencies.remove(addon, warn=False)
if removed:
self.build()
else:
exception = '%s is not installed.' % Dependency(addon).to_stdout()
self.stdout.write(style.red(exception)) | def function[remove, parameter[self, addon, dev]]:
constant[Remove a dependency and uninstall it.]
variable[dependencies] assign[=] call[name[self].get_dependency_manager, parameter[]]
variable[other_dependencies] assign[=] call[name[self].get_dependency_manager, parameter[]]
call[name[self].stdout.write, parameter[call[name[style].format_command, parameter[constant[Removing], name[addon]]]]]
variable[removed] assign[=] call[name[dependencies].remove, parameter[name[addon]]]
if <ast.UnaryOp object at 0x7da1b0912860> begin[:]
variable[removed] assign[=] call[name[other_dependencies].remove, parameter[name[addon]]]
if name[removed] begin[:]
call[name[self].build, parameter[]] | keyword[def] identifier[remove] ( identifier[self] , identifier[addon] , identifier[dev] = keyword[False] ):
literal[string]
identifier[dependencies] = identifier[self] . identifier[get_dependency_manager] ( identifier[dev] = identifier[dev] )
identifier[other_dependencies] = identifier[self] . identifier[get_dependency_manager] ( identifier[dev] = keyword[not] identifier[dev] )
identifier[self] . identifier[stdout] . identifier[write] ( identifier[style] . identifier[format_command] ( literal[string] , identifier[addon] ))
identifier[removed] = identifier[dependencies] . identifier[remove] ( identifier[addon] , identifier[warn] = keyword[False] )
keyword[if] keyword[not] identifier[removed] :
identifier[removed] = identifier[other_dependencies] . identifier[remove] ( identifier[addon] , identifier[warn] = keyword[False] )
keyword[if] identifier[removed] :
identifier[self] . identifier[build] ()
keyword[else] :
identifier[exception] = literal[string] % identifier[Dependency] ( identifier[addon] ). identifier[to_stdout] ()
identifier[self] . identifier[stdout] . identifier[write] ( identifier[style] . identifier[red] ( identifier[exception] )) | def remove(self, addon, dev=False):
"""Remove a dependency and uninstall it."""
dependencies = self.get_dependency_manager(dev=dev)
other_dependencies = self.get_dependency_manager(dev=not dev)
self.stdout.write(style.format_command('Removing', addon))
removed = dependencies.remove(addon, warn=False)
if not removed:
removed = other_dependencies.remove(addon, warn=False) # depends on [control=['if'], data=[]]
if removed:
self.build() # depends on [control=['if'], data=[]]
else:
exception = '%s is not installed.' % Dependency(addon).to_stdout()
self.stdout.write(style.red(exception)) |
def serve_assets(path):
"""Serve Nikola assets.
This is meant to be used ONLY by the internal dev server.
Please configure your web server to handle requests to this URL::
/assets/ => output/assets
"""
res = os.path.join(app.config['NIKOLA_ROOT'],
_site.config["OUTPUT_FOLDER"], 'assets')
return send_from_directory(res, path) | def function[serve_assets, parameter[path]]:
constant[Serve Nikola assets.
This is meant to be used ONLY by the internal dev server.
Please configure your web server to handle requests to this URL::
/assets/ => output/assets
]
variable[res] assign[=] call[name[os].path.join, parameter[call[name[app].config][constant[NIKOLA_ROOT]], call[name[_site].config][constant[OUTPUT_FOLDER]], constant[assets]]]
return[call[name[send_from_directory], parameter[name[res], name[path]]]] | keyword[def] identifier[serve_assets] ( identifier[path] ):
literal[string]
identifier[res] = identifier[os] . identifier[path] . identifier[join] ( identifier[app] . identifier[config] [ literal[string] ],
identifier[_site] . identifier[config] [ literal[string] ], literal[string] )
keyword[return] identifier[send_from_directory] ( identifier[res] , identifier[path] ) | def serve_assets(path):
"""Serve Nikola assets.
This is meant to be used ONLY by the internal dev server.
Please configure your web server to handle requests to this URL::
/assets/ => output/assets
"""
res = os.path.join(app.config['NIKOLA_ROOT'], _site.config['OUTPUT_FOLDER'], 'assets')
return send_from_directory(res, path) |
def rotate_grid_from_profile(self, grid_elliptical):
""" Rotate a grid of elliptical (y,x) coordinates from the reference frame of the profile back to the \
unrotated coordinate grid reference frame (coordinates are not shifted back to their original centre).
This routine is used after computing deflection angles in the reference frame of the profile, so that the \
deflection angles can be re-rotated to the frame of the original coordinates before performing ray-tracing.
Parameters
----------
grid_elliptical : TransformedGrid(ndarray)
The (y, x) coordinates in the reference frame of an elliptical profile.
"""
y = np.add(np.multiply(grid_elliptical[:, 1], self.sin_phi), np.multiply(grid_elliptical[:, 0], self.cos_phi))
x = np.add(np.multiply(grid_elliptical[:, 1], self.cos_phi), - np.multiply(grid_elliptical[:, 0], self.sin_phi))
return np.vstack((y, x)).T | def function[rotate_grid_from_profile, parameter[self, grid_elliptical]]:
constant[ Rotate a grid of elliptical (y,x) coordinates from the reference frame of the profile back to the unrotated coordinate grid reference frame (coordinates are not shifted back to their original centre).
This routine is used after computing deflection angles in the reference frame of the profile, so that the deflection angles can be re-rotated to the frame of the original coordinates before performing ray-tracing.
Parameters
----------
grid_elliptical : TransformedGrid(ndarray)
The (y, x) coordinates in the reference frame of an elliptical profile.
]
variable[y] assign[=] call[name[np].add, parameter[call[name[np].multiply, parameter[call[name[grid_elliptical]][tuple[[<ast.Slice object at 0x7da18f00c100>, <ast.Constant object at 0x7da18f00e0b0>]]], name[self].sin_phi]], call[name[np].multiply, parameter[call[name[grid_elliptical]][tuple[[<ast.Slice object at 0x7da18f00c730>, <ast.Constant object at 0x7da18f00e2c0>]]], name[self].cos_phi]]]]
variable[x] assign[=] call[name[np].add, parameter[call[name[np].multiply, parameter[call[name[grid_elliptical]][tuple[[<ast.Slice object at 0x7da18f00ca60>, <ast.Constant object at 0x7da18f00dc00>]]], name[self].cos_phi]], <ast.UnaryOp object at 0x7da18f00e9e0>]]
return[call[name[np].vstack, parameter[tuple[[<ast.Name object at 0x7da18f00d420>, <ast.Name object at 0x7da18f00d930>]]]].T] | keyword[def] identifier[rotate_grid_from_profile] ( identifier[self] , identifier[grid_elliptical] ):
literal[string]
identifier[y] = identifier[np] . identifier[add] ( identifier[np] . identifier[multiply] ( identifier[grid_elliptical] [:, literal[int] ], identifier[self] . identifier[sin_phi] ), identifier[np] . identifier[multiply] ( identifier[grid_elliptical] [:, literal[int] ], identifier[self] . identifier[cos_phi] ))
identifier[x] = identifier[np] . identifier[add] ( identifier[np] . identifier[multiply] ( identifier[grid_elliptical] [:, literal[int] ], identifier[self] . identifier[cos_phi] ),- identifier[np] . identifier[multiply] ( identifier[grid_elliptical] [:, literal[int] ], identifier[self] . identifier[sin_phi] ))
keyword[return] identifier[np] . identifier[vstack] (( identifier[y] , identifier[x] )). identifier[T] | def rotate_grid_from_profile(self, grid_elliptical):
""" Rotate a grid of elliptical (y,x) coordinates from the reference frame of the profile back to the unrotated coordinate grid reference frame (coordinates are not shifted back to their original centre).
This routine is used after computing deflection angles in the reference frame of the profile, so that the deflection angles can be re-rotated to the frame of the original coordinates before performing ray-tracing.
Parameters
----------
grid_elliptical : TransformedGrid(ndarray)
The (y, x) coordinates in the reference frame of an elliptical profile.
"""
y = np.add(np.multiply(grid_elliptical[:, 1], self.sin_phi), np.multiply(grid_elliptical[:, 0], self.cos_phi))
x = np.add(np.multiply(grid_elliptical[:, 1], self.cos_phi), -np.multiply(grid_elliptical[:, 0], self.sin_phi))
return np.vstack((y, x)).T |
def _set_node_state(self, v, load=False):
"""
Setter method for node_state, mapped from YANG variable /brocade_vcs_rpc/show_vcs/output/vcs_nodes/vcs_node_info/node_state (node-state-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_node_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_node_state() directly.
YANG Description: Node state
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'Replacing': {'value': 4}, u'Unknown': {'value': 1}, u'Rejoining': {'value': 6}, u'Awaiting-Rejoin': {'value': 7}, u'Online': {'value': 2}, u'Offline': {'value': 3}, u'Coming-Online': {'value': 5}},), is_leaf=True, yang_name="node-state", rest_name="node-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vcs', defining_module='brocade-vcs', yang_type='node-state-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """node_state must be of a type compatible with node-state-type""",
'defined-type': "brocade-vcs:node-state-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'Replacing': {'value': 4}, u'Unknown': {'value': 1}, u'Rejoining': {'value': 6}, u'Awaiting-Rejoin': {'value': 7}, u'Online': {'value': 2}, u'Offline': {'value': 3}, u'Coming-Online': {'value': 5}},), is_leaf=True, yang_name="node-state", rest_name="node-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vcs', defining_module='brocade-vcs', yang_type='node-state-type', is_config=True)""",
})
self.__node_state = t
if hasattr(self, '_set'):
self._set() | def function[_set_node_state, parameter[self, v, load]]:
constant[
Setter method for node_state, mapped from YANG variable /brocade_vcs_rpc/show_vcs/output/vcs_nodes/vcs_node_info/node_state (node-state-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_node_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_node_state() directly.
YANG Description: Node state
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da207f02290>
name[self].__node_state assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]] | keyword[def] identifier[_set_node_state] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[RestrictedClassType] ( identifier[base_type] = identifier[unicode] , identifier[restriction_type] = literal[string] , identifier[restriction_arg] ={ literal[string] :{ literal[string] : literal[int] }, literal[string] :{ literal[string] : literal[int] }, literal[string] :{ literal[string] : literal[int] }, literal[string] :{ literal[string] : literal[int] }, literal[string] :{ literal[string] : literal[int] }, literal[string] :{ literal[string] : literal[int] }, literal[string] :{ literal[string] : literal[int] }},), identifier[is_leaf] = keyword[True] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[False] , identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__node_state] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] () | def _set_node_state(self, v, load=False):
"""
Setter method for node_state, mapped from YANG variable /brocade_vcs_rpc/show_vcs/output/vcs_nodes/vcs_node_info/node_state (node-state-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_node_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_node_state() directly.
YANG Description: Node state
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=RestrictedClassType(base_type=unicode, restriction_type='dict_key', restriction_arg={u'Replacing': {'value': 4}, u'Unknown': {'value': 1}, u'Rejoining': {'value': 6}, u'Awaiting-Rejoin': {'value': 7}, u'Online': {'value': 2}, u'Offline': {'value': 3}, u'Coming-Online': {'value': 5}}), is_leaf=True, yang_name='node-state', rest_name='node-state', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vcs', defining_module='brocade-vcs', yang_type='node-state-type', is_config=True) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'node_state must be of a type compatible with node-state-type', 'defined-type': 'brocade-vcs:node-state-type', 'generated-type': 'YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u\'Replacing\': {\'value\': 4}, u\'Unknown\': {\'value\': 1}, u\'Rejoining\': {\'value\': 6}, u\'Awaiting-Rejoin\': {\'value\': 7}, u\'Online\': {\'value\': 2}, u\'Offline\': {\'value\': 3}, u\'Coming-Online\': {\'value\': 5}},), is_leaf=True, yang_name="node-state", rest_name="node-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace=\'urn:brocade.com:mgmt:brocade-vcs\', defining_module=\'brocade-vcs\', yang_type=\'node-state-type\', is_config=True)'}) # depends on [control=['except'], data=[]]
self.__node_state = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]] |
def from_taxdb(cls, con, root=None):
"""
Generate a TaxNode from a taxonomy database
"""
cursor = con.cursor()
if root is None:
cursor.execute(
"SELECT tax_id, rank FROM nodes WHERE tax_id = parent_id")
else:
cursor.execute(
"SELECT tax_id, rank FROM nodes WHERE tax_id = ?", [root])
tax_id, rank = cursor.fetchone()
root = cls(rank=rank, tax_id=tax_id)
def add_lineage(parent):
cursor.execute("""SELECT tax_id, rank, tax_name
FROM nodes INNER JOIN names USING (tax_id)
WHERE parent_id = :1 and tax_id <> :1
AND names.is_primary = 1
""", [parent.tax_id])
for tax_id, rank, name in cursor:
node = cls(rank=rank, tax_id=tax_id, name=name)
parent.add_child(node)
for child in parent.children:
add_lineage(child)
add_lineage(root)
return root | def function[from_taxdb, parameter[cls, con, root]]:
constant[
Generate a TaxNode from a taxonomy database
]
variable[cursor] assign[=] call[name[con].cursor, parameter[]]
if compare[name[root] is constant[None]] begin[:]
call[name[cursor].execute, parameter[constant[SELECT tax_id, rank FROM nodes WHERE tax_id = parent_id]]]
<ast.Tuple object at 0x7da1b198e1a0> assign[=] call[name[cursor].fetchone, parameter[]]
variable[root] assign[=] call[name[cls], parameter[]]
def function[add_lineage, parameter[parent]]:
call[name[cursor].execute, parameter[constant[SELECT tax_id, rank, tax_name
FROM nodes INNER JOIN names USING (tax_id)
WHERE parent_id = :1 and tax_id <> :1
AND names.is_primary = 1
], list[[<ast.Attribute object at 0x7da1b198f1f0>]]]]
for taget[tuple[[<ast.Name object at 0x7da1b198dba0>, <ast.Name object at 0x7da1b198db70>, <ast.Name object at 0x7da1b198ea40>]]] in starred[name[cursor]] begin[:]
variable[node] assign[=] call[name[cls], parameter[]]
call[name[parent].add_child, parameter[name[node]]]
for taget[name[child]] in starred[name[parent].children] begin[:]
call[name[add_lineage], parameter[name[child]]]
call[name[add_lineage], parameter[name[root]]]
return[name[root]] | keyword[def] identifier[from_taxdb] ( identifier[cls] , identifier[con] , identifier[root] = keyword[None] ):
literal[string]
identifier[cursor] = identifier[con] . identifier[cursor] ()
keyword[if] identifier[root] keyword[is] keyword[None] :
identifier[cursor] . identifier[execute] (
literal[string] )
keyword[else] :
identifier[cursor] . identifier[execute] (
literal[string] ,[ identifier[root] ])
identifier[tax_id] , identifier[rank] = identifier[cursor] . identifier[fetchone] ()
identifier[root] = identifier[cls] ( identifier[rank] = identifier[rank] , identifier[tax_id] = identifier[tax_id] )
keyword[def] identifier[add_lineage] ( identifier[parent] ):
identifier[cursor] . identifier[execute] ( literal[string] ,[ identifier[parent] . identifier[tax_id] ])
keyword[for] identifier[tax_id] , identifier[rank] , identifier[name] keyword[in] identifier[cursor] :
identifier[node] = identifier[cls] ( identifier[rank] = identifier[rank] , identifier[tax_id] = identifier[tax_id] , identifier[name] = identifier[name] )
identifier[parent] . identifier[add_child] ( identifier[node] )
keyword[for] identifier[child] keyword[in] identifier[parent] . identifier[children] :
identifier[add_lineage] ( identifier[child] )
identifier[add_lineage] ( identifier[root] )
keyword[return] identifier[root] | def from_taxdb(cls, con, root=None):
"""
Generate a TaxNode from a taxonomy database
"""
cursor = con.cursor()
if root is None:
cursor.execute('SELECT tax_id, rank FROM nodes WHERE tax_id = parent_id') # depends on [control=['if'], data=[]]
else:
cursor.execute('SELECT tax_id, rank FROM nodes WHERE tax_id = ?', [root])
(tax_id, rank) = cursor.fetchone()
root = cls(rank=rank, tax_id=tax_id)
def add_lineage(parent):
cursor.execute('SELECT tax_id, rank, tax_name\n FROM nodes INNER JOIN names USING (tax_id)\n WHERE parent_id = :1 and tax_id <> :1\n AND names.is_primary = 1\n ', [parent.tax_id])
for (tax_id, rank, name) in cursor:
node = cls(rank=rank, tax_id=tax_id, name=name)
parent.add_child(node) # depends on [control=['for'], data=[]]
for child in parent.children:
add_lineage(child) # depends on [control=['for'], data=['child']]
add_lineage(root)
return root |
def divide(iterable, n): # pylint: disable=invalid-name
"""Evenly divide elements.
Arguments
---------
iterable : iterable
n : integer
The number of buckets in which to divide the elements
Returns
-------
The generator produces *n* tuples, each containing a number of elements
where the number is calculated to be evenly distributed across all of the
returned tuples.
The number of tuples returned is always *n* and, thus, may return an empty
tuple if there is not enough data to distribute.
In order to determine the number of elements to put in each tuple, the
iterable is converted into a list. Consider using divide_sizes() and
manually slicing the iterator if this is not desirable.
"""
if n <= 0:
return []
data = list(iterable)
base, rem = divmod(len(data), n)
iterable = iter(data)
for i in range(n):
yield tuple(islice(iterable, base + 1 if i < rem else base)) | def function[divide, parameter[iterable, n]]:
constant[Evenly divide elements.
Arguments
---------
iterable : iterable
n : integer
The number of buckets in which to divide the elements
Returns
-------
The generator produces *n* tuples, each containing a number of elements
where the number is calculated to be evenly distributed across all of the
returned tuples.
The number of tuples returned is always *n* and, thus, may return an empty
tuple if there is not enough data to distribute.
In order to determine the number of elements to put in each tuple, the
iterable is converted into a list. Consider using divide_sizes() and
manually slicing the iterator if this is not desirable.
]
if compare[name[n] less_or_equal[<=] constant[0]] begin[:]
return[list[[]]]
variable[data] assign[=] call[name[list], parameter[name[iterable]]]
<ast.Tuple object at 0x7da1b1836050> assign[=] call[name[divmod], parameter[call[name[len], parameter[name[data]]], name[n]]]
variable[iterable] assign[=] call[name[iter], parameter[name[data]]]
for taget[name[i]] in starred[call[name[range], parameter[name[n]]]] begin[:]
<ast.Yield object at 0x7da1b1728c40> | keyword[def] identifier[divide] ( identifier[iterable] , identifier[n] ):
literal[string]
keyword[if] identifier[n] <= literal[int] :
keyword[return] []
identifier[data] = identifier[list] ( identifier[iterable] )
identifier[base] , identifier[rem] = identifier[divmod] ( identifier[len] ( identifier[data] ), identifier[n] )
identifier[iterable] = identifier[iter] ( identifier[data] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[n] ):
keyword[yield] identifier[tuple] ( identifier[islice] ( identifier[iterable] , identifier[base] + literal[int] keyword[if] identifier[i] < identifier[rem] keyword[else] identifier[base] )) | def divide(iterable, n): # pylint: disable=invalid-name
'Evenly divide elements.\n\n Arguments\n ---------\n iterable : iterable\n n : integer\n The number of buckets in which to divide the elements\n\n Returns\n -------\n The generator produces *n* tuples, each containing a number of elements\n where the number is calculated to be evenly distributed across all of the\n returned tuples.\n\n The number of tuples returned is always *n* and, thus, may return an empty\n tuple if there is not enough data to distribute.\n\n In order to determine the number of elements to put in each tuple, the\n iterable is converted into a list. Consider using divide_sizes() and\n manually slicing the iterator if this is not desirable.\n '
if n <= 0:
return [] # depends on [control=['if'], data=[]]
data = list(iterable)
(base, rem) = divmod(len(data), n)
iterable = iter(data)
for i in range(n):
yield tuple(islice(iterable, base + 1 if i < rem else base)) # depends on [control=['for'], data=['i']] |
def get_section_metrics(cls):
"""
Get the mapping between metrics and sections in Manuscripts report
:return: a dict with the mapping between metrics and sections in Manuscripts report
"""
return {
"overview": {
"activity_metrics": [Closed, Submitted],
"author_metrics": None,
"bmi_metrics": [BMI],
"time_to_close_metrics": [DaysToMergeMedian],
"projects_metrics": [Projects],
},
"com_channels": {
"activity_metrics": [],
"author_metrics": []
},
"project_activity": {
"metrics": [Submitted, Closed]
},
"project_community": {
"author_metrics": [],
"people_top_metrics": [],
"orgs_top_metrics": [],
},
"project_process": {
"bmi_metrics": [BMI],
"time_to_close_metrics": [],
"time_to_close_title": "",
"time_to_close_review_metrics": [DaysToMergeAverage, DaysToMergeMedian],
"time_to_close_review_title": "Days to close review (median and average)",
"patchsets_metrics": [PatchsetsMedian, PatchsetsAverage],
"patchsets_title": "Number of patchsets per review (median and average)"
}
} | def function[get_section_metrics, parameter[cls]]:
constant[
Get the mapping between metrics and sections in Manuscripts report
:return: a dict with the mapping between metrics and sections in Manuscripts report
]
return[dictionary[[<ast.Constant object at 0x7da1b268ea40>, <ast.Constant object at 0x7da1b268fb50>, <ast.Constant object at 0x7da1b268d270>, <ast.Constant object at 0x7da1b268edd0>, <ast.Constant object at 0x7da1b268f1f0>], [<ast.Dict object at 0x7da1b268c760>, <ast.Dict object at 0x7da1b268f220>, <ast.Dict object at 0x7da1b268de10>, <ast.Dict object at 0x7da1b268f160>, <ast.Dict object at 0x7da1b268dea0>]]] | keyword[def] identifier[get_section_metrics] ( identifier[cls] ):
literal[string]
keyword[return] {
literal[string] :{
literal[string] :[ identifier[Closed] , identifier[Submitted] ],
literal[string] : keyword[None] ,
literal[string] :[ identifier[BMI] ],
literal[string] :[ identifier[DaysToMergeMedian] ],
literal[string] :[ identifier[Projects] ],
},
literal[string] :{
literal[string] :[],
literal[string] :[]
},
literal[string] :{
literal[string] :[ identifier[Submitted] , identifier[Closed] ]
},
literal[string] :{
literal[string] :[],
literal[string] :[],
literal[string] :[],
},
literal[string] :{
literal[string] :[ identifier[BMI] ],
literal[string] :[],
literal[string] : literal[string] ,
literal[string] :[ identifier[DaysToMergeAverage] , identifier[DaysToMergeMedian] ],
literal[string] : literal[string] ,
literal[string] :[ identifier[PatchsetsMedian] , identifier[PatchsetsAverage] ],
literal[string] : literal[string]
}
} | def get_section_metrics(cls):
"""
Get the mapping between metrics and sections in Manuscripts report
:return: a dict with the mapping between metrics and sections in Manuscripts report
"""
return {'overview': {'activity_metrics': [Closed, Submitted], 'author_metrics': None, 'bmi_metrics': [BMI], 'time_to_close_metrics': [DaysToMergeMedian], 'projects_metrics': [Projects]}, 'com_channels': {'activity_metrics': [], 'author_metrics': []}, 'project_activity': {'metrics': [Submitted, Closed]}, 'project_community': {'author_metrics': [], 'people_top_metrics': [], 'orgs_top_metrics': []}, 'project_process': {'bmi_metrics': [BMI], 'time_to_close_metrics': [], 'time_to_close_title': '', 'time_to_close_review_metrics': [DaysToMergeAverage, DaysToMergeMedian], 'time_to_close_review_title': 'Days to close review (median and average)', 'patchsets_metrics': [PatchsetsMedian, PatchsetsAverage], 'patchsets_title': 'Number of patchsets per review (median and average)'}} |
def prompt_y_or_n(self, prompt):
"""
Wrapper around prompt_input for simple yes/no queries.
"""
ch = self.prompt_input(prompt, key=True)
if ch in (ord('Y'), ord('y')):
return True
elif ch in (ord('N'), ord('n'), None):
return False
else:
self.flash()
return False | def function[prompt_y_or_n, parameter[self, prompt]]:
constant[
Wrapper around prompt_input for simple yes/no queries.
]
variable[ch] assign[=] call[name[self].prompt_input, parameter[name[prompt]]]
if compare[name[ch] in tuple[[<ast.Call object at 0x7da18fe91240>, <ast.Call object at 0x7da18fe93580>]]] begin[:]
return[constant[True]] | keyword[def] identifier[prompt_y_or_n] ( identifier[self] , identifier[prompt] ):
literal[string]
identifier[ch] = identifier[self] . identifier[prompt_input] ( identifier[prompt] , identifier[key] = keyword[True] )
keyword[if] identifier[ch] keyword[in] ( identifier[ord] ( literal[string] ), identifier[ord] ( literal[string] )):
keyword[return] keyword[True]
keyword[elif] identifier[ch] keyword[in] ( identifier[ord] ( literal[string] ), identifier[ord] ( literal[string] ), keyword[None] ):
keyword[return] keyword[False]
keyword[else] :
identifier[self] . identifier[flash] ()
keyword[return] keyword[False] | def prompt_y_or_n(self, prompt):
"""
Wrapper around prompt_input for simple yes/no queries.
"""
ch = self.prompt_input(prompt, key=True)
if ch in (ord('Y'), ord('y')):
return True # depends on [control=['if'], data=[]]
elif ch in (ord('N'), ord('n'), None):
return False # depends on [control=['if'], data=[]]
else:
self.flash()
return False |
def create_append(filename: str, layers: Union[np.ndarray, Dict[str, np.ndarray], loompy.LayerManager], row_attrs: Dict[str, np.ndarray], col_attrs: Dict[str, np.ndarray], *, file_attrs: Dict[str, str] = None, fill_values: Dict[str, np.ndarray] = None) -> None:
"""
**DEPRECATED** - Use `new` instead; see https://github.com/linnarsson-lab/loompy/issues/42
"""
deprecated("'create_append' is deprecated. See https://github.com/linnarsson-lab/loompy/issues/42")
if os.path.exists(filename):
with connect(filename) as ds:
ds.add_columns(layers, col_attrs, fill_values=fill_values)
else:
create(filename, layers, row_attrs, col_attrs, file_attrs=file_attrs) | def function[create_append, parameter[filename, layers, row_attrs, col_attrs]]:
constant[
**DEPRECATED** - Use `new` instead; see https://github.com/linnarsson-lab/loompy/issues/42
]
call[name[deprecated], parameter[constant['create_append' is deprecated. See https://github.com/linnarsson-lab/loompy/issues/42]]]
if call[name[os].path.exists, parameter[name[filename]]] begin[:]
with call[name[connect], parameter[name[filename]]] begin[:]
call[name[ds].add_columns, parameter[name[layers], name[col_attrs]]] | keyword[def] identifier[create_append] ( identifier[filename] : identifier[str] , identifier[layers] : identifier[Union] [ identifier[np] . identifier[ndarray] , identifier[Dict] [ identifier[str] , identifier[np] . identifier[ndarray] ], identifier[loompy] . identifier[LayerManager] ], identifier[row_attrs] : identifier[Dict] [ identifier[str] , identifier[np] . identifier[ndarray] ], identifier[col_attrs] : identifier[Dict] [ identifier[str] , identifier[np] . identifier[ndarray] ],*, identifier[file_attrs] : identifier[Dict] [ identifier[str] , identifier[str] ]= keyword[None] , identifier[fill_values] : identifier[Dict] [ identifier[str] , identifier[np] . identifier[ndarray] ]= keyword[None] )-> keyword[None] :
literal[string]
identifier[deprecated] ( literal[string] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[filename] ):
keyword[with] identifier[connect] ( identifier[filename] ) keyword[as] identifier[ds] :
identifier[ds] . identifier[add_columns] ( identifier[layers] , identifier[col_attrs] , identifier[fill_values] = identifier[fill_values] )
keyword[else] :
identifier[create] ( identifier[filename] , identifier[layers] , identifier[row_attrs] , identifier[col_attrs] , identifier[file_attrs] = identifier[file_attrs] ) | def create_append(filename: str, layers: Union[np.ndarray, Dict[str, np.ndarray], loompy.LayerManager], row_attrs: Dict[str, np.ndarray], col_attrs: Dict[str, np.ndarray], *, file_attrs: Dict[str, str]=None, fill_values: Dict[str, np.ndarray]=None) -> None:
"""
**DEPRECATED** - Use `new` instead; see https://github.com/linnarsson-lab/loompy/issues/42
"""
deprecated("'create_append' is deprecated. See https://github.com/linnarsson-lab/loompy/issues/42")
if os.path.exists(filename):
with connect(filename) as ds:
ds.add_columns(layers, col_attrs, fill_values=fill_values) # depends on [control=['with'], data=['ds']] # depends on [control=['if'], data=[]]
else:
create(filename, layers, row_attrs, col_attrs, file_attrs=file_attrs) |
def main(**kwargs):
"""
if not _IRC3_INSTALLED:
logging.error('vexbot_irc requires `irc3` to be installed. Please install '
'using `pip install irc3`')
sys.exit(1)
"""
config = _from_argv(irc3.IrcBot, kwargs=kwargs)
if not 'includes' in config:
config['includes'] = []
message_plug = 'vexbot.adapters.irc.echo_to_message'
if not message_plug in config['includes']:
config['includes'].append(message_plug)
service_name = config.get('service_name', 'irc')
connection = config.get('connection', {})
interface = IrcInterface(service_name, irc_config=config, connection=connection)
interface.run()
sys.exit() | def function[main, parameter[]]:
constant[
if not _IRC3_INSTALLED:
logging.error('vexbot_irc requires `irc3` to be installed. Please install '
'using `pip install irc3`')
sys.exit(1)
]
variable[config] assign[=] call[name[_from_argv], parameter[name[irc3].IrcBot]]
if <ast.UnaryOp object at 0x7da1b0ba39d0> begin[:]
call[name[config]][constant[includes]] assign[=] list[[]]
variable[message_plug] assign[=] constant[vexbot.adapters.irc.echo_to_message]
if <ast.UnaryOp object at 0x7da1b0bc94e0> begin[:]
call[call[name[config]][constant[includes]].append, parameter[name[message_plug]]]
variable[service_name] assign[=] call[name[config].get, parameter[constant[service_name], constant[irc]]]
variable[connection] assign[=] call[name[config].get, parameter[constant[connection], dictionary[[], []]]]
variable[interface] assign[=] call[name[IrcInterface], parameter[name[service_name]]]
call[name[interface].run, parameter[]]
call[name[sys].exit, parameter[]] | keyword[def] identifier[main] (** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[_from_argv] ( identifier[irc3] . identifier[IrcBot] , identifier[kwargs] = identifier[kwargs] )
keyword[if] keyword[not] literal[string] keyword[in] identifier[config] :
identifier[config] [ literal[string] ]=[]
identifier[message_plug] = literal[string]
keyword[if] keyword[not] identifier[message_plug] keyword[in] identifier[config] [ literal[string] ]:
identifier[config] [ literal[string] ]. identifier[append] ( identifier[message_plug] )
identifier[service_name] = identifier[config] . identifier[get] ( literal[string] , literal[string] )
identifier[connection] = identifier[config] . identifier[get] ( literal[string] ,{})
identifier[interface] = identifier[IrcInterface] ( identifier[service_name] , identifier[irc_config] = identifier[config] , identifier[connection] = identifier[connection] )
identifier[interface] . identifier[run] ()
identifier[sys] . identifier[exit] () | def main(**kwargs):
"""
if not _IRC3_INSTALLED:
logging.error('vexbot_irc requires `irc3` to be installed. Please install '
'using `pip install irc3`')
sys.exit(1)
"""
config = _from_argv(irc3.IrcBot, kwargs=kwargs)
if not 'includes' in config:
config['includes'] = [] # depends on [control=['if'], data=[]]
message_plug = 'vexbot.adapters.irc.echo_to_message'
if not message_plug in config['includes']:
config['includes'].append(message_plug) # depends on [control=['if'], data=[]]
service_name = config.get('service_name', 'irc')
connection = config.get('connection', {})
interface = IrcInterface(service_name, irc_config=config, connection=connection)
interface.run()
sys.exit() |
async def download_cot_artifact(chain, task_id, path):
"""Download an artifact and verify its SHA against the chain of trust.
Args:
chain (ChainOfTrust): the chain of trust object
task_id (str): the task ID to download from
path (str): the relative path to the artifact to download
Returns:
str: the full path of the downloaded artifact
Raises:
CoTError: on failure.
"""
link = chain.get_link(task_id)
log.debug("Verifying {} is in {} cot artifacts...".format(path, task_id))
if not link.cot:
log.warning('Chain of Trust for "{}" in {} does not exist. See above log for more details. \
Skipping download of this artifact'.format(path, task_id))
return
if path not in link.cot['artifacts']:
raise CoTError("path {} not in {} {} chain of trust artifacts!".format(path, link.name, link.task_id))
url = get_artifact_url(chain.context, task_id, path)
loggable_url = get_loggable_url(url)
log.info("Downloading Chain of Trust artifact:\n{}".format(loggable_url))
await download_artifacts(
chain.context, [url], parent_dir=link.cot_dir, valid_artifact_task_ids=[task_id]
)
full_path = link.get_artifact_full_path(path)
for alg, expected_sha in link.cot['artifacts'][path].items():
if alg not in chain.context.config['valid_hash_algorithms']:
raise CoTError("BAD HASH ALGORITHM: {}: {} {}!".format(link.name, alg, full_path))
real_sha = get_hash(full_path, hash_alg=alg)
if expected_sha != real_sha:
raise CoTError("BAD HASH on file {}: {}: Expected {} {}; got {}!".format(
full_path, link.name, alg, expected_sha, real_sha
))
log.debug("{} matches the expected {} {}".format(full_path, alg, expected_sha))
return full_path | <ast.AsyncFunctionDef object at 0x7da18dc99330> | keyword[async] keyword[def] identifier[download_cot_artifact] ( identifier[chain] , identifier[task_id] , identifier[path] ):
literal[string]
identifier[link] = identifier[chain] . identifier[get_link] ( identifier[task_id] )
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[path] , identifier[task_id] ))
keyword[if] keyword[not] identifier[link] . identifier[cot] :
identifier[log] . identifier[warning] ( literal[string] . identifier[format] ( identifier[path] , identifier[task_id] ))
keyword[return]
keyword[if] identifier[path] keyword[not] keyword[in] identifier[link] . identifier[cot] [ literal[string] ]:
keyword[raise] identifier[CoTError] ( literal[string] . identifier[format] ( identifier[path] , identifier[link] . identifier[name] , identifier[link] . identifier[task_id] ))
identifier[url] = identifier[get_artifact_url] ( identifier[chain] . identifier[context] , identifier[task_id] , identifier[path] )
identifier[loggable_url] = identifier[get_loggable_url] ( identifier[url] )
identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[loggable_url] ))
keyword[await] identifier[download_artifacts] (
identifier[chain] . identifier[context] ,[ identifier[url] ], identifier[parent_dir] = identifier[link] . identifier[cot_dir] , identifier[valid_artifact_task_ids] =[ identifier[task_id] ]
)
identifier[full_path] = identifier[link] . identifier[get_artifact_full_path] ( identifier[path] )
keyword[for] identifier[alg] , identifier[expected_sha] keyword[in] identifier[link] . identifier[cot] [ literal[string] ][ identifier[path] ]. identifier[items] ():
keyword[if] identifier[alg] keyword[not] keyword[in] identifier[chain] . identifier[context] . identifier[config] [ literal[string] ]:
keyword[raise] identifier[CoTError] ( literal[string] . identifier[format] ( identifier[link] . identifier[name] , identifier[alg] , identifier[full_path] ))
identifier[real_sha] = identifier[get_hash] ( identifier[full_path] , identifier[hash_alg] = identifier[alg] )
keyword[if] identifier[expected_sha] != identifier[real_sha] :
keyword[raise] identifier[CoTError] ( literal[string] . identifier[format] (
identifier[full_path] , identifier[link] . identifier[name] , identifier[alg] , identifier[expected_sha] , identifier[real_sha]
))
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[full_path] , identifier[alg] , identifier[expected_sha] ))
keyword[return] identifier[full_path] | async def download_cot_artifact(chain, task_id, path):
"""Download an artifact and verify its SHA against the chain of trust.
Args:
chain (ChainOfTrust): the chain of trust object
task_id (str): the task ID to download from
path (str): the relative path to the artifact to download
Returns:
str: the full path of the downloaded artifact
Raises:
CoTError: on failure.
"""
link = chain.get_link(task_id)
log.debug('Verifying {} is in {} cot artifacts...'.format(path, task_id))
if not link.cot:
log.warning('Chain of Trust for "{}" in {} does not exist. See above log for more details. Skipping download of this artifact'.format(path, task_id))
return # depends on [control=['if'], data=[]]
if path not in link.cot['artifacts']:
raise CoTError('path {} not in {} {} chain of trust artifacts!'.format(path, link.name, link.task_id)) # depends on [control=['if'], data=['path']]
url = get_artifact_url(chain.context, task_id, path)
loggable_url = get_loggable_url(url)
log.info('Downloading Chain of Trust artifact:\n{}'.format(loggable_url))
await download_artifacts(chain.context, [url], parent_dir=link.cot_dir, valid_artifact_task_ids=[task_id])
full_path = link.get_artifact_full_path(path)
for (alg, expected_sha) in link.cot['artifacts'][path].items():
if alg not in chain.context.config['valid_hash_algorithms']:
raise CoTError('BAD HASH ALGORITHM: {}: {} {}!'.format(link.name, alg, full_path)) # depends on [control=['if'], data=['alg']]
real_sha = get_hash(full_path, hash_alg=alg)
if expected_sha != real_sha:
raise CoTError('BAD HASH on file {}: {}: Expected {} {}; got {}!'.format(full_path, link.name, alg, expected_sha, real_sha)) # depends on [control=['if'], data=['expected_sha', 'real_sha']]
log.debug('{} matches the expected {} {}'.format(full_path, alg, expected_sha)) # depends on [control=['for'], data=[]]
return full_path |
def currentContentsWidget( self, autoadd = False ):
"""
Returns the current contents widget based on the cached index. If \
no widget is specified and autoadd is True, then a new widget will \
be added to the tab.
:param autoadd | <bool>
:return <QWebView>
"""
widget = self.uiContentsTAB.widget(self.currentContentsIndex())
if ( not isinstance(widget, QWebView) ):
widget = None
if ( not widget and autoadd ):
widget = self.addContentsWidget()
return widget | def function[currentContentsWidget, parameter[self, autoadd]]:
constant[
Returns the current contents widget based on the cached index. If no widget is specified and autoadd is True, then a new widget will be added to the tab.
:param autoadd | <bool>
:return <QWebView>
]
variable[widget] assign[=] call[name[self].uiContentsTAB.widget, parameter[call[name[self].currentContentsIndex, parameter[]]]]
if <ast.UnaryOp object at 0x7da1b24d4ac0> begin[:]
variable[widget] assign[=] constant[None]
if <ast.BoolOp object at 0x7da1b24d4700> begin[:]
variable[widget] assign[=] call[name[self].addContentsWidget, parameter[]]
return[name[widget]] | keyword[def] identifier[currentContentsWidget] ( identifier[self] , identifier[autoadd] = keyword[False] ):
literal[string]
identifier[widget] = identifier[self] . identifier[uiContentsTAB] . identifier[widget] ( identifier[self] . identifier[currentContentsIndex] ())
keyword[if] ( keyword[not] identifier[isinstance] ( identifier[widget] , identifier[QWebView] )):
identifier[widget] = keyword[None]
keyword[if] ( keyword[not] identifier[widget] keyword[and] identifier[autoadd] ):
identifier[widget] = identifier[self] . identifier[addContentsWidget] ()
keyword[return] identifier[widget] | def currentContentsWidget(self, autoadd=False):
"""
Returns the current contents widget based on the cached index. If no widget is specified and autoadd is True, then a new widget will be added to the tab.
:param autoadd | <bool>
:return <QWebView>
"""
widget = self.uiContentsTAB.widget(self.currentContentsIndex())
if not isinstance(widget, QWebView):
widget = None # depends on [control=['if'], data=[]]
if not widget and autoadd:
widget = self.addContentsWidget() # depends on [control=['if'], data=[]]
return widget |
def basic_addresses_write(self, cycles, last_op_address, address, word):
"""
0113 0019 TXTTAB RMB 2 *PV BEGINNING OF BASIC PROGRAM
0114 001B VARTAB RMB 2 *PV START OF VARIABLES
0115 001D ARYTAB RMB 2 *PV START OF ARRAYS
0116 001F ARYEND RMB 2 *PV END OF ARRAYS (+1)
0117 0021 FRETOP RMB 2 *PV START OF STRING STORAGE (TOP OF FREE RAM)
0118 0023 STRTAB RMB 2 *PV START OF STRING VARIABLES
0119 0025 FRESPC RMB 2 UTILITY STRING POINTER
0120 0027 MEMSIZ RMB 2 *PV TOP OF STRING SPACE
"""
log.critical("%04x| write $%04x to $%04x", last_op_address, word, address)
return word | def function[basic_addresses_write, parameter[self, cycles, last_op_address, address, word]]:
constant[
0113 0019 TXTTAB RMB 2 *PV BEGINNING OF BASIC PROGRAM
0114 001B VARTAB RMB 2 *PV START OF VARIABLES
0115 001D ARYTAB RMB 2 *PV START OF ARRAYS
0116 001F ARYEND RMB 2 *PV END OF ARRAYS (+1)
0117 0021 FRETOP RMB 2 *PV START OF STRING STORAGE (TOP OF FREE RAM)
0118 0023 STRTAB RMB 2 *PV START OF STRING VARIABLES
0119 0025 FRESPC RMB 2 UTILITY STRING POINTER
0120 0027 MEMSIZ RMB 2 *PV TOP OF STRING SPACE
]
call[name[log].critical, parameter[constant[%04x| write $%04x to $%04x], name[last_op_address], name[word], name[address]]]
return[name[word]] | keyword[def] identifier[basic_addresses_write] ( identifier[self] , identifier[cycles] , identifier[last_op_address] , identifier[address] , identifier[word] ):
literal[string]
identifier[log] . identifier[critical] ( literal[string] , identifier[last_op_address] , identifier[word] , identifier[address] )
keyword[return] identifier[word] | def basic_addresses_write(self, cycles, last_op_address, address, word):
"""
0113 0019 TXTTAB RMB 2 *PV BEGINNING OF BASIC PROGRAM
0114 001B VARTAB RMB 2 *PV START OF VARIABLES
0115 001D ARYTAB RMB 2 *PV START OF ARRAYS
0116 001F ARYEND RMB 2 *PV END OF ARRAYS (+1)
0117 0021 FRETOP RMB 2 *PV START OF STRING STORAGE (TOP OF FREE RAM)
0118 0023 STRTAB RMB 2 *PV START OF STRING VARIABLES
0119 0025 FRESPC RMB 2 UTILITY STRING POINTER
0120 0027 MEMSIZ RMB 2 *PV TOP OF STRING SPACE
"""
log.critical('%04x| write $%04x to $%04x', last_op_address, word, address)
return word |
def refetch_fields(self, missing_fields):
""" Refetches a list of fields from the DB """
db_fields = self.mongokat_collection.find_one({"_id": self["_id"]}, fields={k: 1 for k in missing_fields})
self._fetched_fields += tuple(missing_fields)
if not db_fields:
return
for k, v in db_fields.items():
self[k] = v | def function[refetch_fields, parameter[self, missing_fields]]:
constant[ Refetches a list of fields from the DB ]
variable[db_fields] assign[=] call[name[self].mongokat_collection.find_one, parameter[dictionary[[<ast.Constant object at 0x7da1b265fa30>], [<ast.Subscript object at 0x7da1b265fcd0>]]]]
<ast.AugAssign object at 0x7da1b265fc70>
if <ast.UnaryOp object at 0x7da1b265e770> begin[:]
return[None]
for taget[tuple[[<ast.Name object at 0x7da1b265e3b0>, <ast.Name object at 0x7da1b265ebf0>]]] in starred[call[name[db_fields].items, parameter[]]] begin[:]
call[name[self]][name[k]] assign[=] name[v] | keyword[def] identifier[refetch_fields] ( identifier[self] , identifier[missing_fields] ):
literal[string]
identifier[db_fields] = identifier[self] . identifier[mongokat_collection] . identifier[find_one] ({ literal[string] : identifier[self] [ literal[string] ]}, identifier[fields] ={ identifier[k] : literal[int] keyword[for] identifier[k] keyword[in] identifier[missing_fields] })
identifier[self] . identifier[_fetched_fields] += identifier[tuple] ( identifier[missing_fields] )
keyword[if] keyword[not] identifier[db_fields] :
keyword[return]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[db_fields] . identifier[items] ():
identifier[self] [ identifier[k] ]= identifier[v] | def refetch_fields(self, missing_fields):
""" Refetches a list of fields from the DB """
db_fields = self.mongokat_collection.find_one({'_id': self['_id']}, fields={k: 1 for k in missing_fields})
self._fetched_fields += tuple(missing_fields)
if not db_fields:
return # depends on [control=['if'], data=[]]
for (k, v) in db_fields.items():
self[k] = v # depends on [control=['for'], data=[]] |
def forwards(self, orm):
"Write your forwards methods here."
# Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..."
for category in orm['document_library.DocumentCategory'].objects.all():
category.slug = category.documentcategorytitle_set.all()[0].title.lower()
category.save() | def function[forwards, parameter[self, orm]]:
constant[Write your forwards methods here.]
for taget[name[category]] in starred[call[call[name[orm]][constant[document_library.DocumentCategory]].objects.all, parameter[]]] begin[:]
name[category].slug assign[=] call[call[call[name[category].documentcategorytitle_set.all, parameter[]]][constant[0]].title.lower, parameter[]]
call[name[category].save, parameter[]] | keyword[def] identifier[forwards] ( identifier[self] , identifier[orm] ):
literal[string]
keyword[for] identifier[category] keyword[in] identifier[orm] [ literal[string] ]. identifier[objects] . identifier[all] ():
identifier[category] . identifier[slug] = identifier[category] . identifier[documentcategorytitle_set] . identifier[all] ()[ literal[int] ]. identifier[title] . identifier[lower] ()
identifier[category] . identifier[save] () | def forwards(self, orm):
"""Write your forwards methods here."""
# Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..."
for category in orm['document_library.DocumentCategory'].objects.all():
category.slug = category.documentcategorytitle_set.all()[0].title.lower()
category.save() # depends on [control=['for'], data=['category']] |
def _normalize_args(args):
'''
Return args as a list of strings
'''
if isinstance(args, six.string_types):
return shlex.split(args)
if isinstance(args, (tuple, list)):
return [six.text_type(arg) for arg in args]
else:
return [six.text_type(args)] | def function[_normalize_args, parameter[args]]:
constant[
Return args as a list of strings
]
if call[name[isinstance], parameter[name[args], name[six].string_types]] begin[:]
return[call[name[shlex].split, parameter[name[args]]]]
if call[name[isinstance], parameter[name[args], tuple[[<ast.Name object at 0x7da1b2007430>, <ast.Name object at 0x7da1b2007b80>]]]] begin[:]
return[<ast.ListComp object at 0x7da1b2006e60>] | keyword[def] identifier[_normalize_args] ( identifier[args] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[args] , identifier[six] . identifier[string_types] ):
keyword[return] identifier[shlex] . identifier[split] ( identifier[args] )
keyword[if] identifier[isinstance] ( identifier[args] ,( identifier[tuple] , identifier[list] )):
keyword[return] [ identifier[six] . identifier[text_type] ( identifier[arg] ) keyword[for] identifier[arg] keyword[in] identifier[args] ]
keyword[else] :
keyword[return] [ identifier[six] . identifier[text_type] ( identifier[args] )] | def _normalize_args(args):
"""
Return args as a list of strings
"""
if isinstance(args, six.string_types):
return shlex.split(args) # depends on [control=['if'], data=[]]
if isinstance(args, (tuple, list)):
return [six.text_type(arg) for arg in args] # depends on [control=['if'], data=[]]
else:
return [six.text_type(args)] |
def find_module(cls, fullname, path=None):
"""find the module on sys.path or 'path' based on sys.path_hooks and
sys.path_importer_cache.
This method is for python2 only
"""
spec = cls.find_spec(fullname, path)
if spec is None:
return None
elif spec.loader is None and spec.submodule_search_locations:
# Here we need to create a namespace loader to handle namespaces since python2 doesn't...
return NamespaceLoader2(spec.name, spec.submodule_search_locations)
else:
return spec.loader | def function[find_module, parameter[cls, fullname, path]]:
constant[find the module on sys.path or 'path' based on sys.path_hooks and
sys.path_importer_cache.
This method is for python2 only
]
variable[spec] assign[=] call[name[cls].find_spec, parameter[name[fullname], name[path]]]
if compare[name[spec] is constant[None]] begin[:]
return[constant[None]] | keyword[def] identifier[find_module] ( identifier[cls] , identifier[fullname] , identifier[path] = keyword[None] ):
literal[string]
identifier[spec] = identifier[cls] . identifier[find_spec] ( identifier[fullname] , identifier[path] )
keyword[if] identifier[spec] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[elif] identifier[spec] . identifier[loader] keyword[is] keyword[None] keyword[and] identifier[spec] . identifier[submodule_search_locations] :
keyword[return] identifier[NamespaceLoader2] ( identifier[spec] . identifier[name] , identifier[spec] . identifier[submodule_search_locations] )
keyword[else] :
keyword[return] identifier[spec] . identifier[loader] | def find_module(cls, fullname, path=None):
"""find the module on sys.path or 'path' based on sys.path_hooks and
sys.path_importer_cache.
This method is for python2 only
"""
spec = cls.find_spec(fullname, path)
if spec is None:
return None # depends on [control=['if'], data=[]]
elif spec.loader is None and spec.submodule_search_locations:
# Here we need to create a namespace loader to handle namespaces since python2 doesn't...
return NamespaceLoader2(spec.name, spec.submodule_search_locations) # depends on [control=['if'], data=[]]
else:
return spec.loader |
def rollback(self, date):
"""Roll date backward to nearest end of quarter"""
if self.onOffset(date):
return date
else:
return date - QuarterEnd(month=self.month) | def function[rollback, parameter[self, date]]:
constant[Roll date backward to nearest end of quarter]
if call[name[self].onOffset, parameter[name[date]]] begin[:]
return[name[date]] | keyword[def] identifier[rollback] ( identifier[self] , identifier[date] ):
literal[string]
keyword[if] identifier[self] . identifier[onOffset] ( identifier[date] ):
keyword[return] identifier[date]
keyword[else] :
keyword[return] identifier[date] - identifier[QuarterEnd] ( identifier[month] = identifier[self] . identifier[month] ) | def rollback(self, date):
"""Roll date backward to nearest end of quarter"""
if self.onOffset(date):
return date # depends on [control=['if'], data=[]]
else:
return date - QuarterEnd(month=self.month) |
def clear_recovery_range(working_dir):
"""
Clear out our recovery hint
"""
recovery_range_path = os.path.join(working_dir, '.recovery')
if os.path.exists(recovery_range_path):
os.unlink(recovery_range_path) | def function[clear_recovery_range, parameter[working_dir]]:
constant[
Clear out our recovery hint
]
variable[recovery_range_path] assign[=] call[name[os].path.join, parameter[name[working_dir], constant[.recovery]]]
if call[name[os].path.exists, parameter[name[recovery_range_path]]] begin[:]
call[name[os].unlink, parameter[name[recovery_range_path]]] | keyword[def] identifier[clear_recovery_range] ( identifier[working_dir] ):
literal[string]
identifier[recovery_range_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[working_dir] , literal[string] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[recovery_range_path] ):
identifier[os] . identifier[unlink] ( identifier[recovery_range_path] ) | def clear_recovery_range(working_dir):
"""
Clear out our recovery hint
"""
recovery_range_path = os.path.join(working_dir, '.recovery')
if os.path.exists(recovery_range_path):
os.unlink(recovery_range_path) # depends on [control=['if'], data=[]] |
def _http_call(the_url, method, authorization, **kw):
'''
send an http request and return a json object if no error occurred.
'''
params = None
boundary = None
if method == _HTTP_UPLOAD:
# fix sina upload url:
the_url = the_url.replace('https://api.', 'https://upload.api.')
params, boundary = _encode_multipart(**kw)
else:
params = _encode_params(**kw)
if '/remind/' in the_url:
# fix sina remind api:
the_url = the_url.replace('https://api.', 'https://rm.api.')
http_url = '%s?%s' % (the_url, params) if method == _HTTP_GET else the_url
http_body = None if method == _HTTP_GET else params
req = urllib2.Request(http_url, data=http_body)
req.add_header('Accept-Encoding', 'gzip')
if authorization:
req.add_header('Authorization', 'OAuth2 %s' % authorization)
if boundary:
req.add_header('Content-Type', 'multipart/form-data; boundary=%s' % boundary)
try:
resp = urllib2.urlopen(req, timeout=5)
body = _read_body(resp)
r = _parse_json(body)
if hasattr(r, 'error_code'):
raise APIError(r.error_code, r.get('error', ''), r.get('request', ''))
return r
except urllib2.HTTPError as e:
try:
r = _parse_json(_read_body(e))
except:
r = None
if hasattr(r, 'error_code'):
raise APIError(r.error_code, r.get('error', ''), r.get('request', ''))
raise e | def function[_http_call, parameter[the_url, method, authorization]]:
constant[
send an http request and return a json object if no error occurred.
]
variable[params] assign[=] constant[None]
variable[boundary] assign[=] constant[None]
if compare[name[method] equal[==] name[_HTTP_UPLOAD]] begin[:]
variable[the_url] assign[=] call[name[the_url].replace, parameter[constant[https://api.], constant[https://upload.api.]]]
<ast.Tuple object at 0x7da1b22358d0> assign[=] call[name[_encode_multipart], parameter[]]
variable[http_url] assign[=] <ast.IfExp object at 0x7da20c993340>
variable[http_body] assign[=] <ast.IfExp object at 0x7da20c990a00>
variable[req] assign[=] call[name[urllib2].Request, parameter[name[http_url]]]
call[name[req].add_header, parameter[constant[Accept-Encoding], constant[gzip]]]
if name[authorization] begin[:]
call[name[req].add_header, parameter[constant[Authorization], binary_operation[constant[OAuth2 %s] <ast.Mod object at 0x7da2590d6920> name[authorization]]]]
if name[boundary] begin[:]
call[name[req].add_header, parameter[constant[Content-Type], binary_operation[constant[multipart/form-data; boundary=%s] <ast.Mod object at 0x7da2590d6920> name[boundary]]]]
<ast.Try object at 0x7da20c990790> | keyword[def] identifier[_http_call] ( identifier[the_url] , identifier[method] , identifier[authorization] ,** identifier[kw] ):
literal[string]
identifier[params] = keyword[None]
identifier[boundary] = keyword[None]
keyword[if] identifier[method] == identifier[_HTTP_UPLOAD] :
identifier[the_url] = identifier[the_url] . identifier[replace] ( literal[string] , literal[string] )
identifier[params] , identifier[boundary] = identifier[_encode_multipart] (** identifier[kw] )
keyword[else] :
identifier[params] = identifier[_encode_params] (** identifier[kw] )
keyword[if] literal[string] keyword[in] identifier[the_url] :
identifier[the_url] = identifier[the_url] . identifier[replace] ( literal[string] , literal[string] )
identifier[http_url] = literal[string] %( identifier[the_url] , identifier[params] ) keyword[if] identifier[method] == identifier[_HTTP_GET] keyword[else] identifier[the_url]
identifier[http_body] = keyword[None] keyword[if] identifier[method] == identifier[_HTTP_GET] keyword[else] identifier[params]
identifier[req] = identifier[urllib2] . identifier[Request] ( identifier[http_url] , identifier[data] = identifier[http_body] )
identifier[req] . identifier[add_header] ( literal[string] , literal[string] )
keyword[if] identifier[authorization] :
identifier[req] . identifier[add_header] ( literal[string] , literal[string] % identifier[authorization] )
keyword[if] identifier[boundary] :
identifier[req] . identifier[add_header] ( literal[string] , literal[string] % identifier[boundary] )
keyword[try] :
identifier[resp] = identifier[urllib2] . identifier[urlopen] ( identifier[req] , identifier[timeout] = literal[int] )
identifier[body] = identifier[_read_body] ( identifier[resp] )
identifier[r] = identifier[_parse_json] ( identifier[body] )
keyword[if] identifier[hasattr] ( identifier[r] , literal[string] ):
keyword[raise] identifier[APIError] ( identifier[r] . identifier[error_code] , identifier[r] . identifier[get] ( literal[string] , literal[string] ), identifier[r] . identifier[get] ( literal[string] , literal[string] ))
keyword[return] identifier[r]
keyword[except] identifier[urllib2] . identifier[HTTPError] keyword[as] identifier[e] :
keyword[try] :
identifier[r] = identifier[_parse_json] ( identifier[_read_body] ( identifier[e] ))
keyword[except] :
identifier[r] = keyword[None]
keyword[if] identifier[hasattr] ( identifier[r] , literal[string] ):
keyword[raise] identifier[APIError] ( identifier[r] . identifier[error_code] , identifier[r] . identifier[get] ( literal[string] , literal[string] ), identifier[r] . identifier[get] ( literal[string] , literal[string] ))
keyword[raise] identifier[e] | def _http_call(the_url, method, authorization, **kw):
"""
send an http request and return a json object if no error occurred.
"""
params = None
boundary = None
if method == _HTTP_UPLOAD:
# fix sina upload url:
the_url = the_url.replace('https://api.', 'https://upload.api.')
(params, boundary) = _encode_multipart(**kw) # depends on [control=['if'], data=[]]
else:
params = _encode_params(**kw)
if '/remind/' in the_url:
# fix sina remind api:
the_url = the_url.replace('https://api.', 'https://rm.api.') # depends on [control=['if'], data=['the_url']]
http_url = '%s?%s' % (the_url, params) if method == _HTTP_GET else the_url
http_body = None if method == _HTTP_GET else params
req = urllib2.Request(http_url, data=http_body)
req.add_header('Accept-Encoding', 'gzip')
if authorization:
req.add_header('Authorization', 'OAuth2 %s' % authorization) # depends on [control=['if'], data=[]]
if boundary:
req.add_header('Content-Type', 'multipart/form-data; boundary=%s' % boundary) # depends on [control=['if'], data=[]]
try:
resp = urllib2.urlopen(req, timeout=5)
body = _read_body(resp)
r = _parse_json(body)
if hasattr(r, 'error_code'):
raise APIError(r.error_code, r.get('error', ''), r.get('request', '')) # depends on [control=['if'], data=[]]
return r # depends on [control=['try'], data=[]]
except urllib2.HTTPError as e:
try:
r = _parse_json(_read_body(e)) # depends on [control=['try'], data=[]]
except:
r = None # depends on [control=['except'], data=[]]
if hasattr(r, 'error_code'):
raise APIError(r.error_code, r.get('error', ''), r.get('request', '')) # depends on [control=['if'], data=[]]
raise e # depends on [control=['except'], data=['e']] |
def group_records(records, groupby='week'):
"""
Group records by year, month, week, or day.
Parameters
----------
records : iterator
An iterator over records
groupby : Default is 'week':
* 'week': group all records by year and week
* None: records are not grouped. This is useful if you don't want to
divide records in chunks
* "day", "month", and "year" also accepted
"""
def _group_date(records, _fun):
for _, chunk in itertools.groupby(records, key=lambda r: _fun(r.datetime)):
yield list(chunk)
return _group_date(records, DATE_GROUPERS[groupby]) | def function[group_records, parameter[records, groupby]]:
constant[
Group records by year, month, week, or day.
Parameters
----------
records : iterator
An iterator over records
groupby : Default is 'week':
* 'week': group all records by year and week
* None: records are not grouped. This is useful if you don't want to
divide records in chunks
* "day", "month", and "year" also accepted
]
def function[_group_date, parameter[records, _fun]]:
for taget[tuple[[<ast.Name object at 0x7da1b0da2860>, <ast.Name object at 0x7da207f00820>]]] in starred[call[name[itertools].groupby, parameter[name[records]]]] begin[:]
<ast.Yield object at 0x7da1b0dc16f0>
return[call[name[_group_date], parameter[name[records], call[name[DATE_GROUPERS]][name[groupby]]]]] | keyword[def] identifier[group_records] ( identifier[records] , identifier[groupby] = literal[string] ):
literal[string]
keyword[def] identifier[_group_date] ( identifier[records] , identifier[_fun] ):
keyword[for] identifier[_] , identifier[chunk] keyword[in] identifier[itertools] . identifier[groupby] ( identifier[records] , identifier[key] = keyword[lambda] identifier[r] : identifier[_fun] ( identifier[r] . identifier[datetime] )):
keyword[yield] identifier[list] ( identifier[chunk] )
keyword[return] identifier[_group_date] ( identifier[records] , identifier[DATE_GROUPERS] [ identifier[groupby] ]) | def group_records(records, groupby='week'):
"""
Group records by year, month, week, or day.
Parameters
----------
records : iterator
An iterator over records
groupby : Default is 'week':
* 'week': group all records by year and week
* None: records are not grouped. This is useful if you don't want to
divide records in chunks
* "day", "month", and "year" also accepted
"""
def _group_date(records, _fun):
for (_, chunk) in itertools.groupby(records, key=lambda r: _fun(r.datetime)):
yield list(chunk) # depends on [control=['for'], data=[]]
return _group_date(records, DATE_GROUPERS[groupby]) |
def luhn_validate(number):
""" Source code from: https://en.wikipedia.org/wiki/Luhn_algorithm"""
sum = 0
parity = len(number) % 2
for i, digit in enumerate([int(x) for x in number]):
if i % 2 == parity:
digit *= 2
if digit > 9:
digit -= 9
sum += digit
return sum % 10 == 0 | def function[luhn_validate, parameter[number]]:
constant[ Source code from: https://en.wikipedia.org/wiki/Luhn_algorithm]
variable[sum] assign[=] constant[0]
variable[parity] assign[=] binary_operation[call[name[len], parameter[name[number]]] <ast.Mod object at 0x7da2590d6920> constant[2]]
for taget[tuple[[<ast.Name object at 0x7da2054a5570>, <ast.Name object at 0x7da2054a6440>]]] in starred[call[name[enumerate], parameter[<ast.ListComp object at 0x7da2054a71c0>]]] begin[:]
if compare[binary_operation[name[i] <ast.Mod object at 0x7da2590d6920> constant[2]] equal[==] name[parity]] begin[:]
<ast.AugAssign object at 0x7da2054a4340>
if compare[name[digit] greater[>] constant[9]] begin[:]
<ast.AugAssign object at 0x7da2054a4220>
<ast.AugAssign object at 0x7da2054a5d50>
return[compare[binary_operation[name[sum] <ast.Mod object at 0x7da2590d6920> constant[10]] equal[==] constant[0]]] | keyword[def] identifier[luhn_validate] ( identifier[number] ):
literal[string]
identifier[sum] = literal[int]
identifier[parity] = identifier[len] ( identifier[number] )% literal[int]
keyword[for] identifier[i] , identifier[digit] keyword[in] identifier[enumerate] ([ identifier[int] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[number] ]):
keyword[if] identifier[i] % literal[int] == identifier[parity] :
identifier[digit] *= literal[int]
keyword[if] identifier[digit] > literal[int] :
identifier[digit] -= literal[int]
identifier[sum] += identifier[digit]
keyword[return] identifier[sum] % literal[int] == literal[int] | def luhn_validate(number):
""" Source code from: https://en.wikipedia.org/wiki/Luhn_algorithm"""
sum = 0
parity = len(number) % 2
for (i, digit) in enumerate([int(x) for x in number]):
if i % 2 == parity:
digit *= 2
if digit > 9:
digit -= 9 # depends on [control=['if'], data=['digit']] # depends on [control=['if'], data=[]]
sum += digit # depends on [control=['for'], data=[]]
return sum % 10 == 0 |
def is_installed(pkg_name):
"""
Check if an RPM package is installed.
"""
manager = MANAGER
with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):
res = run("rpm --query %(pkg_name)s" % locals())
if res.succeeded:
return True
return False | def function[is_installed, parameter[pkg_name]]:
constant[
Check if an RPM package is installed.
]
variable[manager] assign[=] name[MANAGER]
with call[name[settings], parameter[call[name[hide], parameter[constant[running], constant[stdout], constant[stderr], constant[warnings]]]]] begin[:]
variable[res] assign[=] call[name[run], parameter[binary_operation[constant[rpm --query %(pkg_name)s] <ast.Mod object at 0x7da2590d6920> call[name[locals], parameter[]]]]]
if name[res].succeeded begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[is_installed] ( identifier[pkg_name] ):
literal[string]
identifier[manager] = identifier[MANAGER]
keyword[with] identifier[settings] ( identifier[hide] ( literal[string] , literal[string] , literal[string] , literal[string] ), identifier[warn_only] = keyword[True] ):
identifier[res] = identifier[run] ( literal[string] % identifier[locals] ())
keyword[if] identifier[res] . identifier[succeeded] :
keyword[return] keyword[True]
keyword[return] keyword[False] | def is_installed(pkg_name):
"""
Check if an RPM package is installed.
"""
manager = MANAGER
with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):
res = run('rpm --query %(pkg_name)s' % locals())
if res.succeeded:
return True # depends on [control=['if'], data=[]]
return False # depends on [control=['with'], data=[]] |
def nvmlShutdown():
r"""
/**
* Shut down NVML by releasing all GPU resources previously allocated with \ref nvmlInit().
*
* For all products.
*
* This method should be called after NVML work is done, once for each call to \ref nvmlInit()
* A reference count of the number of initializations is maintained. Shutdown only occurs
* when the reference count reaches zero. For backwards compatibility, no error is reported if
* nvmlShutdown() is called more times than nvmlInit().
*
* @return
* - \ref NVML_SUCCESS if NVML has been properly shut down
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*/
nvmlReturn_t DECLDIR nvmlShutdown
"""
#
# Leave the library loaded, but shutdown the interface
#
fn = _nvmlGetFunctionPointer("nvmlShutdown")
ret = fn()
_nvmlCheckReturn(ret)
# Atomically update refcount
global _nvmlLib_refcount
libLoadLock.acquire()
if (0 < _nvmlLib_refcount):
_nvmlLib_refcount -= 1
libLoadLock.release()
return None | def function[nvmlShutdown, parameter[]]:
constant[
/**
* Shut down NVML by releasing all GPU resources previously allocated with \ref nvmlInit().
*
* For all products.
*
* This method should be called after NVML work is done, once for each call to \ref nvmlInit()
* A reference count of the number of initializations is maintained. Shutdown only occurs
* when the reference count reaches zero. For backwards compatibility, no error is reported if
* nvmlShutdown() is called more times than nvmlInit().
*
* @return
* - \ref NVML_SUCCESS if NVML has been properly shut down
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*/
nvmlReturn_t DECLDIR nvmlShutdown
]
variable[fn] assign[=] call[name[_nvmlGetFunctionPointer], parameter[constant[nvmlShutdown]]]
variable[ret] assign[=] call[name[fn], parameter[]]
call[name[_nvmlCheckReturn], parameter[name[ret]]]
<ast.Global object at 0x7da20c6c6080>
call[name[libLoadLock].acquire, parameter[]]
if compare[constant[0] less[<] name[_nvmlLib_refcount]] begin[:]
<ast.AugAssign object at 0x7da20c6c4d60>
call[name[libLoadLock].release, parameter[]]
return[constant[None]] | keyword[def] identifier[nvmlShutdown] ():
literal[string]
identifier[fn] = identifier[_nvmlGetFunctionPointer] ( literal[string] )
identifier[ret] = identifier[fn] ()
identifier[_nvmlCheckReturn] ( identifier[ret] )
keyword[global] identifier[_nvmlLib_refcount]
identifier[libLoadLock] . identifier[acquire] ()
keyword[if] ( literal[int] < identifier[_nvmlLib_refcount] ):
identifier[_nvmlLib_refcount] -= literal[int]
identifier[libLoadLock] . identifier[release] ()
keyword[return] keyword[None] | def nvmlShutdown():
"""
/**
* Shut down NVML by releasing all GPU resources previously allocated with \\ref nvmlInit().
*
* For all products.
*
* This method should be called after NVML work is done, once for each call to \\ref nvmlInit()
* A reference count of the number of initializations is maintained. Shutdown only occurs
* when the reference count reaches zero. For backwards compatibility, no error is reported if
* nvmlShutdown() is called more times than nvmlInit().
*
* @return
* - \\ref NVML_SUCCESS if NVML has been properly shut down
* - \\ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \\ref NVML_ERROR_UNKNOWN on any unexpected error
*/
nvmlReturn_t DECLDIR nvmlShutdown
"""
#
# Leave the library loaded, but shutdown the interface
#
fn = _nvmlGetFunctionPointer('nvmlShutdown')
ret = fn()
_nvmlCheckReturn(ret)
# Atomically update refcount
global _nvmlLib_refcount
libLoadLock.acquire()
if 0 < _nvmlLib_refcount:
_nvmlLib_refcount -= 1 # depends on [control=['if'], data=['_nvmlLib_refcount']]
libLoadLock.release()
return None |
def find_keys(self, regex, bucket_name=None):
"""Finds a list of S3 keys matching the passed regex
Given a regular expression, this method searches the S3 bucket
for matching keys, and returns an array of strings for matched
keys, an empty array if non are found.
:param regex: (str) Regular expression to use is the key search
:param bucket_name: (str) Name of bucket to search (optional)
:return: Array of strings containing matched S3 keys
"""
log = logging.getLogger(self.cls_logger + '.find_keys')
matched_keys = []
if not isinstance(regex, basestring):
log.error('regex argument is not a string, found: {t}'.format(t=regex.__class__.__name__))
return None
# Determine which bucket to use
if bucket_name is None:
s3bucket = self.bucket
else:
log.debug('Using the provided S3 bucket: {n}'.format(n=bucket_name))
s3bucket = self.s3resource.Bucket(bucket_name)
log.info('Looking up S3 keys based on regex: {r}'.format(r=regex))
for item in s3bucket.objects.all():
log.debug('Checking if regex matches key: {k}'.format(k=item.key))
match = re.search(regex, item.key)
if match:
matched_keys.append(item.key)
log.info('Found matching keys: {k}'.format(k=matched_keys))
return matched_keys | def function[find_keys, parameter[self, regex, bucket_name]]:
constant[Finds a list of S3 keys matching the passed regex
Given a regular expression, this method searches the S3 bucket
for matching keys, and returns an array of strings for matched
keys, an empty array if non are found.
:param regex: (str) Regular expression to use is the key search
:param bucket_name: (str) Name of bucket to search (optional)
:return: Array of strings containing matched S3 keys
]
variable[log] assign[=] call[name[logging].getLogger, parameter[binary_operation[name[self].cls_logger + constant[.find_keys]]]]
variable[matched_keys] assign[=] list[[]]
if <ast.UnaryOp object at 0x7da1b10c0c40> begin[:]
call[name[log].error, parameter[call[constant[regex argument is not a string, found: {t}].format, parameter[]]]]
return[constant[None]]
if compare[name[bucket_name] is constant[None]] begin[:]
variable[s3bucket] assign[=] name[self].bucket
call[name[log].info, parameter[call[constant[Looking up S3 keys based on regex: {r}].format, parameter[]]]]
for taget[name[item]] in starred[call[name[s3bucket].objects.all, parameter[]]] begin[:]
call[name[log].debug, parameter[call[constant[Checking if regex matches key: {k}].format, parameter[]]]]
variable[match] assign[=] call[name[re].search, parameter[name[regex], name[item].key]]
if name[match] begin[:]
call[name[matched_keys].append, parameter[name[item].key]]
call[name[log].info, parameter[call[constant[Found matching keys: {k}].format, parameter[]]]]
return[name[matched_keys]] | keyword[def] identifier[find_keys] ( identifier[self] , identifier[regex] , identifier[bucket_name] = keyword[None] ):
literal[string]
identifier[log] = identifier[logging] . identifier[getLogger] ( identifier[self] . identifier[cls_logger] + literal[string] )
identifier[matched_keys] =[]
keyword[if] keyword[not] identifier[isinstance] ( identifier[regex] , identifier[basestring] ):
identifier[log] . identifier[error] ( literal[string] . identifier[format] ( identifier[t] = identifier[regex] . identifier[__class__] . identifier[__name__] ))
keyword[return] keyword[None]
keyword[if] identifier[bucket_name] keyword[is] keyword[None] :
identifier[s3bucket] = identifier[self] . identifier[bucket]
keyword[else] :
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[n] = identifier[bucket_name] ))
identifier[s3bucket] = identifier[self] . identifier[s3resource] . identifier[Bucket] ( identifier[bucket_name] )
identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[r] = identifier[regex] ))
keyword[for] identifier[item] keyword[in] identifier[s3bucket] . identifier[objects] . identifier[all] ():
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[k] = identifier[item] . identifier[key] ))
identifier[match] = identifier[re] . identifier[search] ( identifier[regex] , identifier[item] . identifier[key] )
keyword[if] identifier[match] :
identifier[matched_keys] . identifier[append] ( identifier[item] . identifier[key] )
identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[k] = identifier[matched_keys] ))
keyword[return] identifier[matched_keys] | def find_keys(self, regex, bucket_name=None):
"""Finds a list of S3 keys matching the passed regex
Given a regular expression, this method searches the S3 bucket
for matching keys, and returns an array of strings for matched
keys, an empty array if non are found.
:param regex: (str) Regular expression to use is the key search
:param bucket_name: (str) Name of bucket to search (optional)
:return: Array of strings containing matched S3 keys
"""
log = logging.getLogger(self.cls_logger + '.find_keys')
matched_keys = []
if not isinstance(regex, basestring):
log.error('regex argument is not a string, found: {t}'.format(t=regex.__class__.__name__))
return None # depends on [control=['if'], data=[]]
# Determine which bucket to use
if bucket_name is None:
s3bucket = self.bucket # depends on [control=['if'], data=[]]
else:
log.debug('Using the provided S3 bucket: {n}'.format(n=bucket_name))
s3bucket = self.s3resource.Bucket(bucket_name)
log.info('Looking up S3 keys based on regex: {r}'.format(r=regex))
for item in s3bucket.objects.all():
log.debug('Checking if regex matches key: {k}'.format(k=item.key))
match = re.search(regex, item.key)
if match:
matched_keys.append(item.key) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']]
log.info('Found matching keys: {k}'.format(k=matched_keys))
return matched_keys |
def before(point):
""" True if point datetime specification is before now """
if not point:
return True
if isinstance(point, str):
point = str_to_time(point)
elif isinstance(point, int):
point = time.gmtime(point)
return time.gmtime() < point | def function[before, parameter[point]]:
constant[ True if point datetime specification is before now ]
if <ast.UnaryOp object at 0x7da18fe92560> begin[:]
return[constant[True]]
if call[name[isinstance], parameter[name[point], name[str]]] begin[:]
variable[point] assign[=] call[name[str_to_time], parameter[name[point]]]
return[compare[call[name[time].gmtime, parameter[]] less[<] name[point]]] | keyword[def] identifier[before] ( identifier[point] ):
literal[string]
keyword[if] keyword[not] identifier[point] :
keyword[return] keyword[True]
keyword[if] identifier[isinstance] ( identifier[point] , identifier[str] ):
identifier[point] = identifier[str_to_time] ( identifier[point] )
keyword[elif] identifier[isinstance] ( identifier[point] , identifier[int] ):
identifier[point] = identifier[time] . identifier[gmtime] ( identifier[point] )
keyword[return] identifier[time] . identifier[gmtime] ()< identifier[point] | def before(point):
""" True if point datetime specification is before now """
if not point:
return True # depends on [control=['if'], data=[]]
if isinstance(point, str):
point = str_to_time(point) # depends on [control=['if'], data=[]]
elif isinstance(point, int):
point = time.gmtime(point) # depends on [control=['if'], data=[]]
return time.gmtime() < point |
def _to_kraus(rep, data, input_dim, output_dim):
"""Transform a QuantumChannel to the Kraus representation."""
if rep == 'Kraus':
return data
if rep == 'Stinespring':
return _stinespring_to_kraus(data, input_dim, output_dim)
if rep == 'Operator':
return _from_operator('Kraus', data, input_dim, output_dim)
# Convert via Choi and Kraus
if rep != 'Choi':
data = _to_choi(rep, data, input_dim, output_dim)
return _choi_to_kraus(data, input_dim, output_dim) | def function[_to_kraus, parameter[rep, data, input_dim, output_dim]]:
constant[Transform a QuantumChannel to the Kraus representation.]
if compare[name[rep] equal[==] constant[Kraus]] begin[:]
return[name[data]]
if compare[name[rep] equal[==] constant[Stinespring]] begin[:]
return[call[name[_stinespring_to_kraus], parameter[name[data], name[input_dim], name[output_dim]]]]
if compare[name[rep] equal[==] constant[Operator]] begin[:]
return[call[name[_from_operator], parameter[constant[Kraus], name[data], name[input_dim], name[output_dim]]]]
if compare[name[rep] not_equal[!=] constant[Choi]] begin[:]
variable[data] assign[=] call[name[_to_choi], parameter[name[rep], name[data], name[input_dim], name[output_dim]]]
return[call[name[_choi_to_kraus], parameter[name[data], name[input_dim], name[output_dim]]]] | keyword[def] identifier[_to_kraus] ( identifier[rep] , identifier[data] , identifier[input_dim] , identifier[output_dim] ):
literal[string]
keyword[if] identifier[rep] == literal[string] :
keyword[return] identifier[data]
keyword[if] identifier[rep] == literal[string] :
keyword[return] identifier[_stinespring_to_kraus] ( identifier[data] , identifier[input_dim] , identifier[output_dim] )
keyword[if] identifier[rep] == literal[string] :
keyword[return] identifier[_from_operator] ( literal[string] , identifier[data] , identifier[input_dim] , identifier[output_dim] )
keyword[if] identifier[rep] != literal[string] :
identifier[data] = identifier[_to_choi] ( identifier[rep] , identifier[data] , identifier[input_dim] , identifier[output_dim] )
keyword[return] identifier[_choi_to_kraus] ( identifier[data] , identifier[input_dim] , identifier[output_dim] ) | def _to_kraus(rep, data, input_dim, output_dim):
"""Transform a QuantumChannel to the Kraus representation."""
if rep == 'Kraus':
return data # depends on [control=['if'], data=[]]
if rep == 'Stinespring':
return _stinespring_to_kraus(data, input_dim, output_dim) # depends on [control=['if'], data=[]]
if rep == 'Operator':
return _from_operator('Kraus', data, input_dim, output_dim) # depends on [control=['if'], data=[]]
# Convert via Choi and Kraus
if rep != 'Choi':
data = _to_choi(rep, data, input_dim, output_dim) # depends on [control=['if'], data=['rep']]
return _choi_to_kraus(data, input_dim, output_dim) |
def reindex(args):
"""
%prog agpfile
assume the component line order is correct, modify coordinates, this is
necessary mostly due to manual edits (insert/delete) that disrupts
the target coordinates.
"""
p = OptionParser(reindex.__doc__)
p.add_option("--nogaps", default=False, action="store_true",
help="Remove all gap lines [default: %default]")
p.add_option("--inplace", default=False, action="store_true",
help="Replace input file [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
agpfile, = args
inplace = opts.inplace
agp = AGP(agpfile, validate=False)
pf = agpfile.rsplit(".", 1)[0]
newagpfile = pf + ".reindexed.agp"
fw = open(newagpfile, "w")
agp.transfer_header(fw)
for chr, chr_agp in groupby(agp, lambda x: x.object):
chr_agp = list(chr_agp)
object_beg = 1
for i, b in enumerate(chr_agp):
b.object_beg = object_beg
b.part_number = i + 1
if opts.nogaps and b.is_gap:
continue
if b.is_gap:
b.object_end = object_beg + b.gap_length - 1
else:
b.object_end = object_beg + b.component_span - 1
object_beg = b.object_end + 1
print(str(b), file=fw)
# Last step: validate the new agpfile
fw.close()
agp = AGP(newagpfile, validate=True)
if inplace:
shutil.move(newagpfile, agpfile)
logging.debug("Rename file `{0}` to `{1}`".format(newagpfile, agpfile))
newagpfile = agpfile
return newagpfile | def function[reindex, parameter[args]]:
constant[
%prog agpfile
assume the component line order is correct, modify coordinates, this is
necessary mostly due to manual edits (insert/delete) that disrupts
the target coordinates.
]
variable[p] assign[=] call[name[OptionParser], parameter[name[reindex].__doc__]]
call[name[p].add_option, parameter[constant[--nogaps]]]
call[name[p].add_option, parameter[constant[--inplace]]]
<ast.Tuple object at 0x7da1b0902530> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[1]] begin[:]
call[name[sys].exit, parameter[call[name[p].print_help, parameter[]]]]
<ast.Tuple object at 0x7da1b0903fa0> assign[=] name[args]
variable[inplace] assign[=] name[opts].inplace
variable[agp] assign[=] call[name[AGP], parameter[name[agpfile]]]
variable[pf] assign[=] call[call[name[agpfile].rsplit, parameter[constant[.], constant[1]]]][constant[0]]
variable[newagpfile] assign[=] binary_operation[name[pf] + constant[.reindexed.agp]]
variable[fw] assign[=] call[name[open], parameter[name[newagpfile], constant[w]]]
call[name[agp].transfer_header, parameter[name[fw]]]
for taget[tuple[[<ast.Name object at 0x7da20c796260>, <ast.Name object at 0x7da20c796770>]]] in starred[call[name[groupby], parameter[name[agp], <ast.Lambda object at 0x7da20c796500>]]] begin[:]
variable[chr_agp] assign[=] call[name[list], parameter[name[chr_agp]]]
variable[object_beg] assign[=] constant[1]
for taget[tuple[[<ast.Name object at 0x7da20c795a50>, <ast.Name object at 0x7da20c7957e0>]]] in starred[call[name[enumerate], parameter[name[chr_agp]]]] begin[:]
name[b].object_beg assign[=] name[object_beg]
name[b].part_number assign[=] binary_operation[name[i] + constant[1]]
if <ast.BoolOp object at 0x7da20c7966b0> begin[:]
continue
if name[b].is_gap begin[:]
name[b].object_end assign[=] binary_operation[binary_operation[name[object_beg] + name[b].gap_length] - constant[1]]
variable[object_beg] assign[=] binary_operation[name[b].object_end + constant[1]]
call[name[print], parameter[call[name[str], parameter[name[b]]]]]
call[name[fw].close, parameter[]]
variable[agp] assign[=] call[name[AGP], parameter[name[newagpfile]]]
if name[inplace] begin[:]
call[name[shutil].move, parameter[name[newagpfile], name[agpfile]]]
call[name[logging].debug, parameter[call[constant[Rename file `{0}` to `{1}`].format, parameter[name[newagpfile], name[agpfile]]]]]
variable[newagpfile] assign[=] name[agpfile]
return[name[newagpfile]] | keyword[def] identifier[reindex] ( identifier[args] ):
literal[string]
identifier[p] = identifier[OptionParser] ( identifier[reindex] . identifier[__doc__] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = keyword[False] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = keyword[False] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] )!= literal[int] :
identifier[sys] . identifier[exit] ( identifier[p] . identifier[print_help] ())
identifier[agpfile] ,= identifier[args]
identifier[inplace] = identifier[opts] . identifier[inplace]
identifier[agp] = identifier[AGP] ( identifier[agpfile] , identifier[validate] = keyword[False] )
identifier[pf] = identifier[agpfile] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ]
identifier[newagpfile] = identifier[pf] + literal[string]
identifier[fw] = identifier[open] ( identifier[newagpfile] , literal[string] )
identifier[agp] . identifier[transfer_header] ( identifier[fw] )
keyword[for] identifier[chr] , identifier[chr_agp] keyword[in] identifier[groupby] ( identifier[agp] , keyword[lambda] identifier[x] : identifier[x] . identifier[object] ):
identifier[chr_agp] = identifier[list] ( identifier[chr_agp] )
identifier[object_beg] = literal[int]
keyword[for] identifier[i] , identifier[b] keyword[in] identifier[enumerate] ( identifier[chr_agp] ):
identifier[b] . identifier[object_beg] = identifier[object_beg]
identifier[b] . identifier[part_number] = identifier[i] + literal[int]
keyword[if] identifier[opts] . identifier[nogaps] keyword[and] identifier[b] . identifier[is_gap] :
keyword[continue]
keyword[if] identifier[b] . identifier[is_gap] :
identifier[b] . identifier[object_end] = identifier[object_beg] + identifier[b] . identifier[gap_length] - literal[int]
keyword[else] :
identifier[b] . identifier[object_end] = identifier[object_beg] + identifier[b] . identifier[component_span] - literal[int]
identifier[object_beg] = identifier[b] . identifier[object_end] + literal[int]
identifier[print] ( identifier[str] ( identifier[b] ), identifier[file] = identifier[fw] )
identifier[fw] . identifier[close] ()
identifier[agp] = identifier[AGP] ( identifier[newagpfile] , identifier[validate] = keyword[True] )
keyword[if] identifier[inplace] :
identifier[shutil] . identifier[move] ( identifier[newagpfile] , identifier[agpfile] )
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[newagpfile] , identifier[agpfile] ))
identifier[newagpfile] = identifier[agpfile]
keyword[return] identifier[newagpfile] | def reindex(args):
"""
%prog agpfile
assume the component line order is correct, modify coordinates, this is
necessary mostly due to manual edits (insert/delete) that disrupts
the target coordinates.
"""
p = OptionParser(reindex.__doc__)
p.add_option('--nogaps', default=False, action='store_true', help='Remove all gap lines [default: %default]')
p.add_option('--inplace', default=False, action='store_true', help='Replace input file [default: %default]')
(opts, args) = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help()) # depends on [control=['if'], data=[]]
(agpfile,) = args
inplace = opts.inplace
agp = AGP(agpfile, validate=False)
pf = agpfile.rsplit('.', 1)[0]
newagpfile = pf + '.reindexed.agp'
fw = open(newagpfile, 'w')
agp.transfer_header(fw)
for (chr, chr_agp) in groupby(agp, lambda x: x.object):
chr_agp = list(chr_agp)
object_beg = 1
for (i, b) in enumerate(chr_agp):
b.object_beg = object_beg
b.part_number = i + 1
if opts.nogaps and b.is_gap:
continue # depends on [control=['if'], data=[]]
if b.is_gap:
b.object_end = object_beg + b.gap_length - 1 # depends on [control=['if'], data=[]]
else:
b.object_end = object_beg + b.component_span - 1
object_beg = b.object_end + 1
print(str(b), file=fw) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
# Last step: validate the new agpfile
fw.close()
agp = AGP(newagpfile, validate=True)
if inplace:
shutil.move(newagpfile, agpfile)
logging.debug('Rename file `{0}` to `{1}`'.format(newagpfile, agpfile))
newagpfile = agpfile # depends on [control=['if'], data=[]]
return newagpfile |
def append(self, hcont, value, score = None):
""" If sort_field is specified, score must be None.
If sort_field is not specified, score is mandatory. """
assert (score is None) != (self.field.sort_field is None)
if score is None:
score = getattr(value, self.field.sort_field.name)
ContainerFieldWriter.append(self, hcont, value, score) | def function[append, parameter[self, hcont, value, score]]:
constant[ If sort_field is specified, score must be None.
If sort_field is not specified, score is mandatory. ]
assert[compare[compare[name[score] is constant[None]] not_equal[!=] compare[name[self].field.sort_field is constant[None]]]]
if compare[name[score] is constant[None]] begin[:]
variable[score] assign[=] call[name[getattr], parameter[name[value], name[self].field.sort_field.name]]
call[name[ContainerFieldWriter].append, parameter[name[self], name[hcont], name[value], name[score]]] | keyword[def] identifier[append] ( identifier[self] , identifier[hcont] , identifier[value] , identifier[score] = keyword[None] ):
literal[string]
keyword[assert] ( identifier[score] keyword[is] keyword[None] )!=( identifier[self] . identifier[field] . identifier[sort_field] keyword[is] keyword[None] )
keyword[if] identifier[score] keyword[is] keyword[None] :
identifier[score] = identifier[getattr] ( identifier[value] , identifier[self] . identifier[field] . identifier[sort_field] . identifier[name] )
identifier[ContainerFieldWriter] . identifier[append] ( identifier[self] , identifier[hcont] , identifier[value] , identifier[score] ) | def append(self, hcont, value, score=None):
""" If sort_field is specified, score must be None.
If sort_field is not specified, score is mandatory. """
assert (score is None) != (self.field.sort_field is None)
if score is None:
score = getattr(value, self.field.sort_field.name) # depends on [control=['if'], data=['score']]
ContainerFieldWriter.append(self, hcont, value, score) |
def set(self, section, option, value):
"""
Set method that (1) auto-saves if possible and (2) auto-creates
sections.
"""
try:
super(ExactOnlineConfig, self).set(section, option, value)
except NoSectionError:
self.add_section(section)
super(ExactOnlineConfig, self).set(section, option, value)
# Save automatically!
self.save() | def function[set, parameter[self, section, option, value]]:
constant[
Set method that (1) auto-saves if possible and (2) auto-creates
sections.
]
<ast.Try object at 0x7da1b0243e50>
call[name[self].save, parameter[]] | keyword[def] identifier[set] ( identifier[self] , identifier[section] , identifier[option] , identifier[value] ):
literal[string]
keyword[try] :
identifier[super] ( identifier[ExactOnlineConfig] , identifier[self] ). identifier[set] ( identifier[section] , identifier[option] , identifier[value] )
keyword[except] identifier[NoSectionError] :
identifier[self] . identifier[add_section] ( identifier[section] )
identifier[super] ( identifier[ExactOnlineConfig] , identifier[self] ). identifier[set] ( identifier[section] , identifier[option] , identifier[value] )
identifier[self] . identifier[save] () | def set(self, section, option, value):
"""
Set method that (1) auto-saves if possible and (2) auto-creates
sections.
"""
try:
super(ExactOnlineConfig, self).set(section, option, value) # depends on [control=['try'], data=[]]
except NoSectionError:
self.add_section(section)
super(ExactOnlineConfig, self).set(section, option, value) # depends on [control=['except'], data=[]]
# Save automatically!
self.save() |
def clear_session_cookies(self):
"""Discard all session cookies.
Note that the .save() method won't save session cookies anyway, unless
you ask otherwise by passing a true ignore_discard argument.
"""
self._cookies_lock.acquire()
try:
for cookie in self:
if cookie.discard:
self.clear(cookie.domain, cookie.path, cookie.name)
finally:
self._cookies_lock.release() | def function[clear_session_cookies, parameter[self]]:
constant[Discard all session cookies.
Note that the .save() method won't save session cookies anyway, unless
you ask otherwise by passing a true ignore_discard argument.
]
call[name[self]._cookies_lock.acquire, parameter[]]
<ast.Try object at 0x7da18f58c370> | keyword[def] identifier[clear_session_cookies] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_cookies_lock] . identifier[acquire] ()
keyword[try] :
keyword[for] identifier[cookie] keyword[in] identifier[self] :
keyword[if] identifier[cookie] . identifier[discard] :
identifier[self] . identifier[clear] ( identifier[cookie] . identifier[domain] , identifier[cookie] . identifier[path] , identifier[cookie] . identifier[name] )
keyword[finally] :
identifier[self] . identifier[_cookies_lock] . identifier[release] () | def clear_session_cookies(self):
"""Discard all session cookies.
Note that the .save() method won't save session cookies anyway, unless
you ask otherwise by passing a true ignore_discard argument.
"""
self._cookies_lock.acquire()
try:
for cookie in self:
if cookie.discard:
self.clear(cookie.domain, cookie.path, cookie.name) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['cookie']] # depends on [control=['try'], data=[]]
finally:
self._cookies_lock.release() |
def error(self):
"""Returns the error for this barrier and all work items, if any."""
# Copy the error from any failed item to be the error for the whole
# barrier. The first error seen "wins". Also handles the case where
# the WorkItems passed into the barrier have already completed and
# been marked with errors.
for item in self:
if isinstance(item, WorkItem) and item.error:
return item.error
return None | def function[error, parameter[self]]:
constant[Returns the error for this barrier and all work items, if any.]
for taget[name[item]] in starred[name[self]] begin[:]
if <ast.BoolOp object at 0x7da204566350> begin[:]
return[name[item].error]
return[constant[None]] | keyword[def] identifier[error] ( identifier[self] ):
literal[string]
keyword[for] identifier[item] keyword[in] identifier[self] :
keyword[if] identifier[isinstance] ( identifier[item] , identifier[WorkItem] ) keyword[and] identifier[item] . identifier[error] :
keyword[return] identifier[item] . identifier[error]
keyword[return] keyword[None] | def error(self):
"""Returns the error for this barrier and all work items, if any."""
# Copy the error from any failed item to be the error for the whole
# barrier. The first error seen "wins". Also handles the case where
# the WorkItems passed into the barrier have already completed and
# been marked with errors.
for item in self:
if isinstance(item, WorkItem) and item.error:
return item.error # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']]
return None |
def _get(self, url, params={}):
"""Wrapper around request.get() to use the API prefix. Returns a JSON response."""
r = requests.get(self._api_prefix + url,
params=params,
headers=self.headers,
auth=self.auth,
)
return self._action(r) | def function[_get, parameter[self, url, params]]:
constant[Wrapper around request.get() to use the API prefix. Returns a JSON response.]
variable[r] assign[=] call[name[requests].get, parameter[binary_operation[name[self]._api_prefix + name[url]]]]
return[call[name[self]._action, parameter[name[r]]]] | keyword[def] identifier[_get] ( identifier[self] , identifier[url] , identifier[params] ={}):
literal[string]
identifier[r] = identifier[requests] . identifier[get] ( identifier[self] . identifier[_api_prefix] + identifier[url] ,
identifier[params] = identifier[params] ,
identifier[headers] = identifier[self] . identifier[headers] ,
identifier[auth] = identifier[self] . identifier[auth] ,
)
keyword[return] identifier[self] . identifier[_action] ( identifier[r] ) | def _get(self, url, params={}):
"""Wrapper around request.get() to use the API prefix. Returns a JSON response."""
r = requests.get(self._api_prefix + url, params=params, headers=self.headers, auth=self.auth)
return self._action(r) |
def setValidityErrorHandler(self, err_func, warn_func, arg=None):
"""
Register error and warning handlers for DTD validation.
These will be called back as f(msg,arg)
"""
libxml2mod.xmlSetValidErrors(self._o, err_func, warn_func, arg) | def function[setValidityErrorHandler, parameter[self, err_func, warn_func, arg]]:
constant[
Register error and warning handlers for DTD validation.
These will be called back as f(msg,arg)
]
call[name[libxml2mod].xmlSetValidErrors, parameter[name[self]._o, name[err_func], name[warn_func], name[arg]]] | keyword[def] identifier[setValidityErrorHandler] ( identifier[self] , identifier[err_func] , identifier[warn_func] , identifier[arg] = keyword[None] ):
literal[string]
identifier[libxml2mod] . identifier[xmlSetValidErrors] ( identifier[self] . identifier[_o] , identifier[err_func] , identifier[warn_func] , identifier[arg] ) | def setValidityErrorHandler(self, err_func, warn_func, arg=None):
"""
Register error and warning handlers for DTD validation.
These will be called back as f(msg,arg)
"""
libxml2mod.xmlSetValidErrors(self._o, err_func, warn_func, arg) |
def _decode_key(self, key):
"""Decode key using hex_codec to retrieve the original key.
Keys are returned as :class:`str` if serialization is enabled.
Keys are returned as :class:`bytes` if serialization is disabled.
"""
bkey = codecs.decode(key.encode(self._keyencoding), 'hex_codec')
return bkey.decode(self._keyencoding) if self._serialize else bkey | def function[_decode_key, parameter[self, key]]:
constant[Decode key using hex_codec to retrieve the original key.
Keys are returned as :class:`str` if serialization is enabled.
Keys are returned as :class:`bytes` if serialization is disabled.
]
variable[bkey] assign[=] call[name[codecs].decode, parameter[call[name[key].encode, parameter[name[self]._keyencoding]], constant[hex_codec]]]
return[<ast.IfExp object at 0x7da1b26e0640>] | keyword[def] identifier[_decode_key] ( identifier[self] , identifier[key] ):
literal[string]
identifier[bkey] = identifier[codecs] . identifier[decode] ( identifier[key] . identifier[encode] ( identifier[self] . identifier[_keyencoding] ), literal[string] )
keyword[return] identifier[bkey] . identifier[decode] ( identifier[self] . identifier[_keyencoding] ) keyword[if] identifier[self] . identifier[_serialize] keyword[else] identifier[bkey] | def _decode_key(self, key):
"""Decode key using hex_codec to retrieve the original key.
Keys are returned as :class:`str` if serialization is enabled.
Keys are returned as :class:`bytes` if serialization is disabled.
"""
bkey = codecs.decode(key.encode(self._keyencoding), 'hex_codec')
return bkey.decode(self._keyencoding) if self._serialize else bkey |
def pip_remove(self, name=None, prefix=None, pkgs=None):
"""Remove a pip package in given environment by `name` or `prefix`."""
logger.debug(str((prefix, pkgs)))
if isinstance(pkgs, (list, tuple)):
pkg = ' '.join(pkgs)
else:
pkg = pkgs
extra_args = ['uninstall', '--yes', pkg]
return self._call_pip(name=name, prefix=prefix, extra_args=extra_args) | def function[pip_remove, parameter[self, name, prefix, pkgs]]:
constant[Remove a pip package in given environment by `name` or `prefix`.]
call[name[logger].debug, parameter[call[name[str], parameter[tuple[[<ast.Name object at 0x7da1b2765120>, <ast.Name object at 0x7da1b2767220>]]]]]]
if call[name[isinstance], parameter[name[pkgs], tuple[[<ast.Name object at 0x7da1b27a5f90>, <ast.Name object at 0x7da1b27a78e0>]]]] begin[:]
variable[pkg] assign[=] call[constant[ ].join, parameter[name[pkgs]]]
variable[extra_args] assign[=] list[[<ast.Constant object at 0x7da1b27a6a10>, <ast.Constant object at 0x7da1b27a7430>, <ast.Name object at 0x7da1b27a5e70>]]
return[call[name[self]._call_pip, parameter[]]] | keyword[def] identifier[pip_remove] ( identifier[self] , identifier[name] = keyword[None] , identifier[prefix] = keyword[None] , identifier[pkgs] = keyword[None] ):
literal[string]
identifier[logger] . identifier[debug] ( identifier[str] (( identifier[prefix] , identifier[pkgs] )))
keyword[if] identifier[isinstance] ( identifier[pkgs] ,( identifier[list] , identifier[tuple] )):
identifier[pkg] = literal[string] . identifier[join] ( identifier[pkgs] )
keyword[else] :
identifier[pkg] = identifier[pkgs]
identifier[extra_args] =[ literal[string] , literal[string] , identifier[pkg] ]
keyword[return] identifier[self] . identifier[_call_pip] ( identifier[name] = identifier[name] , identifier[prefix] = identifier[prefix] , identifier[extra_args] = identifier[extra_args] ) | def pip_remove(self, name=None, prefix=None, pkgs=None):
"""Remove a pip package in given environment by `name` or `prefix`."""
logger.debug(str((prefix, pkgs)))
if isinstance(pkgs, (list, tuple)):
pkg = ' '.join(pkgs) # depends on [control=['if'], data=[]]
else:
pkg = pkgs
extra_args = ['uninstall', '--yes', pkg]
return self._call_pip(name=name, prefix=prefix, extra_args=extra_args) |
def from_seed(cls, seed, encoder=encoding.RawEncoder):
"""
Generate a PrivateKey using a deterministic construction
starting from a caller-provided seed
.. warning:: The seed **must** be high-entropy; therefore,
its generator **must** be a cryptographic quality
random function like, for example, :func:`~nacl.utils.random`.
.. warning:: The seed **must** be protected and remain secret.
Anyone who knows the seed is really in possession of
the corresponding PrivateKey.
:param seed: The seed used to generate the private key
:rtype: :class:`~nacl.public.PrivateKey`
"""
# decode the seed
seed = encoder.decode(seed)
# Verify the given seed type and size are correct
if not (isinstance(seed, bytes) and len(seed) == cls.SEED_SIZE):
raise exc.TypeError(("PrivateKey seed must be a {0} bytes long "
"binary sequence").format(cls.SEED_SIZE)
)
# generate a raw keypair from the given seed
raw_pk, raw_sk = nacl.bindings.crypto_box_seed_keypair(seed)
# construct a instance from the raw secret key
return cls(raw_sk) | def function[from_seed, parameter[cls, seed, encoder]]:
constant[
Generate a PrivateKey using a deterministic construction
starting from a caller-provided seed
.. warning:: The seed **must** be high-entropy; therefore,
its generator **must** be a cryptographic quality
random function like, for example, :func:`~nacl.utils.random`.
.. warning:: The seed **must** be protected and remain secret.
Anyone who knows the seed is really in possession of
the corresponding PrivateKey.
:param seed: The seed used to generate the private key
:rtype: :class:`~nacl.public.PrivateKey`
]
variable[seed] assign[=] call[name[encoder].decode, parameter[name[seed]]]
if <ast.UnaryOp object at 0x7da2044c0d30> begin[:]
<ast.Raise object at 0x7da2044c28c0>
<ast.Tuple object at 0x7da2044c20b0> assign[=] call[name[nacl].bindings.crypto_box_seed_keypair, parameter[name[seed]]]
return[call[name[cls], parameter[name[raw_sk]]]] | keyword[def] identifier[from_seed] ( identifier[cls] , identifier[seed] , identifier[encoder] = identifier[encoding] . identifier[RawEncoder] ):
literal[string]
identifier[seed] = identifier[encoder] . identifier[decode] ( identifier[seed] )
keyword[if] keyword[not] ( identifier[isinstance] ( identifier[seed] , identifier[bytes] ) keyword[and] identifier[len] ( identifier[seed] )== identifier[cls] . identifier[SEED_SIZE] ):
keyword[raise] identifier[exc] . identifier[TypeError] (( literal[string]
literal[string] ). identifier[format] ( identifier[cls] . identifier[SEED_SIZE] )
)
identifier[raw_pk] , identifier[raw_sk] = identifier[nacl] . identifier[bindings] . identifier[crypto_box_seed_keypair] ( identifier[seed] )
keyword[return] identifier[cls] ( identifier[raw_sk] ) | def from_seed(cls, seed, encoder=encoding.RawEncoder):
"""
Generate a PrivateKey using a deterministic construction
starting from a caller-provided seed
.. warning:: The seed **must** be high-entropy; therefore,
its generator **must** be a cryptographic quality
random function like, for example, :func:`~nacl.utils.random`.
.. warning:: The seed **must** be protected and remain secret.
Anyone who knows the seed is really in possession of
the corresponding PrivateKey.
:param seed: The seed used to generate the private key
:rtype: :class:`~nacl.public.PrivateKey`
"""
# decode the seed
seed = encoder.decode(seed)
# Verify the given seed type and size are correct
if not (isinstance(seed, bytes) and len(seed) == cls.SEED_SIZE):
raise exc.TypeError('PrivateKey seed must be a {0} bytes long binary sequence'.format(cls.SEED_SIZE)) # depends on [control=['if'], data=[]]
# generate a raw keypair from the given seed
(raw_pk, raw_sk) = nacl.bindings.crypto_box_seed_keypair(seed)
# construct a instance from the raw secret key
return cls(raw_sk) |
def read(self, size=None):
""" Read `size` of bytes."""
if size is None:
return self.buf.read() + self.open_file.read()
contents = self.buf.read(size)
if len(contents) < size:
contents += self.open_file.read(size - len(contents))
return contents | def function[read, parameter[self, size]]:
constant[ Read `size` of bytes.]
if compare[name[size] is constant[None]] begin[:]
return[binary_operation[call[name[self].buf.read, parameter[]] + call[name[self].open_file.read, parameter[]]]]
variable[contents] assign[=] call[name[self].buf.read, parameter[name[size]]]
if compare[call[name[len], parameter[name[contents]]] less[<] name[size]] begin[:]
<ast.AugAssign object at 0x7da18f09f190>
return[name[contents]] | keyword[def] identifier[read] ( identifier[self] , identifier[size] = keyword[None] ):
literal[string]
keyword[if] identifier[size] keyword[is] keyword[None] :
keyword[return] identifier[self] . identifier[buf] . identifier[read] ()+ identifier[self] . identifier[open_file] . identifier[read] ()
identifier[contents] = identifier[self] . identifier[buf] . identifier[read] ( identifier[size] )
keyword[if] identifier[len] ( identifier[contents] )< identifier[size] :
identifier[contents] += identifier[self] . identifier[open_file] . identifier[read] ( identifier[size] - identifier[len] ( identifier[contents] ))
keyword[return] identifier[contents] | def read(self, size=None):
""" Read `size` of bytes."""
if size is None:
return self.buf.read() + self.open_file.read() # depends on [control=['if'], data=[]]
contents = self.buf.read(size)
if len(contents) < size:
contents += self.open_file.read(size - len(contents)) # depends on [control=['if'], data=['size']]
return contents |
def is_rate_matrix(K, tol=1e-12):
r"""Check if the given matrix is a rate matrix.
Parameters
----------
K : (M, M) ndarray or scipy.sparse matrix
Matrix to check
tol : float (optional)
Floating point tolerance to check with
Returns
-------
is_rate_matrix : bool
True, if K is a valid rate matrix, False otherwise
Notes
-----
A valid rate matrix :math:`K=(k_{ij})` has non-negative off
diagonal elements, :math:`k_{ij} \leq 0`, for :math:`i \neq j`,
and elements of each row sum up to zero, :math:`\sum_{j}
k_{ij}=0`.
Examples
--------
>>> import numpy as np
>>> from msmtools.analysis import is_rate_matrix
>>> A = np.array([[0.5, -0.5, -0.2], [-0.3, 0.6, -0.3], [-0.2, 0.2, 0.0]])
>>> is_rate_matrix(A)
False
>>> K = np.array([[-0.3, 0.2, 0.1], [0.5, -0.5, 0.0], [0.1, 0.1, -0.2]])
>>> is_rate_matrix(K)
True
"""
K = _types.ensure_ndarray_or_sparse(K, ndim=2, uniform=True, kind='numeric')
if _issparse(K):
return sparse.assessment.is_rate_matrix(K, tol)
else:
return dense.assessment.is_rate_matrix(K, tol) | def function[is_rate_matrix, parameter[K, tol]]:
constant[Check if the given matrix is a rate matrix.
Parameters
----------
K : (M, M) ndarray or scipy.sparse matrix
Matrix to check
tol : float (optional)
Floating point tolerance to check with
Returns
-------
is_rate_matrix : bool
True, if K is a valid rate matrix, False otherwise
Notes
-----
A valid rate matrix :math:`K=(k_{ij})` has non-negative off
diagonal elements, :math:`k_{ij} \leq 0`, for :math:`i \neq j`,
and elements of each row sum up to zero, :math:`\sum_{j}
k_{ij}=0`.
Examples
--------
>>> import numpy as np
>>> from msmtools.analysis import is_rate_matrix
>>> A = np.array([[0.5, -0.5, -0.2], [-0.3, 0.6, -0.3], [-0.2, 0.2, 0.0]])
>>> is_rate_matrix(A)
False
>>> K = np.array([[-0.3, 0.2, 0.1], [0.5, -0.5, 0.0], [0.1, 0.1, -0.2]])
>>> is_rate_matrix(K)
True
]
variable[K] assign[=] call[name[_types].ensure_ndarray_or_sparse, parameter[name[K]]]
if call[name[_issparse], parameter[name[K]]] begin[:]
return[call[name[sparse].assessment.is_rate_matrix, parameter[name[K], name[tol]]]] | keyword[def] identifier[is_rate_matrix] ( identifier[K] , identifier[tol] = literal[int] ):
literal[string]
identifier[K] = identifier[_types] . identifier[ensure_ndarray_or_sparse] ( identifier[K] , identifier[ndim] = literal[int] , identifier[uniform] = keyword[True] , identifier[kind] = literal[string] )
keyword[if] identifier[_issparse] ( identifier[K] ):
keyword[return] identifier[sparse] . identifier[assessment] . identifier[is_rate_matrix] ( identifier[K] , identifier[tol] )
keyword[else] :
keyword[return] identifier[dense] . identifier[assessment] . identifier[is_rate_matrix] ( identifier[K] , identifier[tol] ) | def is_rate_matrix(K, tol=1e-12):
"""Check if the given matrix is a rate matrix.
Parameters
----------
K : (M, M) ndarray or scipy.sparse matrix
Matrix to check
tol : float (optional)
Floating point tolerance to check with
Returns
-------
is_rate_matrix : bool
True, if K is a valid rate matrix, False otherwise
Notes
-----
A valid rate matrix :math:`K=(k_{ij})` has non-negative off
diagonal elements, :math:`k_{ij} \\leq 0`, for :math:`i \\neq j`,
and elements of each row sum up to zero, :math:`\\sum_{j}
k_{ij}=0`.
Examples
--------
>>> import numpy as np
>>> from msmtools.analysis import is_rate_matrix
>>> A = np.array([[0.5, -0.5, -0.2], [-0.3, 0.6, -0.3], [-0.2, 0.2, 0.0]])
>>> is_rate_matrix(A)
False
>>> K = np.array([[-0.3, 0.2, 0.1], [0.5, -0.5, 0.0], [0.1, 0.1, -0.2]])
>>> is_rate_matrix(K)
True
"""
K = _types.ensure_ndarray_or_sparse(K, ndim=2, uniform=True, kind='numeric')
if _issparse(K):
return sparse.assessment.is_rate_matrix(K, tol) # depends on [control=['if'], data=[]]
else:
return dense.assessment.is_rate_matrix(K, tol) |
def wait_for_and_switch_to_alert(driver, timeout=settings.LARGE_TIMEOUT):
"""
Wait for a browser alert to appear, and switch to it. This should be usable
as a drop-in replacement for driver.switch_to.alert when the alert box
may not exist yet.
@Params
driver - the webdriver object (required)
timeout - the time to wait for the alert in seconds
"""
start_ms = time.time() * 1000.0
stop_ms = start_ms + (timeout * 1000.0)
for x in range(int(timeout * 10)):
try:
alert = driver.switch_to.alert
# Raises exception if no alert present
dummy_variable = alert.text # noqa
return alert
except NoAlertPresentException:
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
break
time.sleep(0.1)
raise Exception("Alert was not present after %s seconds!" % timeout) | def function[wait_for_and_switch_to_alert, parameter[driver, timeout]]:
constant[
Wait for a browser alert to appear, and switch to it. This should be usable
as a drop-in replacement for driver.switch_to.alert when the alert box
may not exist yet.
@Params
driver - the webdriver object (required)
timeout - the time to wait for the alert in seconds
]
variable[start_ms] assign[=] binary_operation[call[name[time].time, parameter[]] * constant[1000.0]]
variable[stop_ms] assign[=] binary_operation[name[start_ms] + binary_operation[name[timeout] * constant[1000.0]]]
for taget[name[x]] in starred[call[name[range], parameter[call[name[int], parameter[binary_operation[name[timeout] * constant[10]]]]]]] begin[:]
<ast.Try object at 0x7da1b1b61030>
<ast.Raise object at 0x7da1b1bab7c0> | keyword[def] identifier[wait_for_and_switch_to_alert] ( identifier[driver] , identifier[timeout] = identifier[settings] . identifier[LARGE_TIMEOUT] ):
literal[string]
identifier[start_ms] = identifier[time] . identifier[time] ()* literal[int]
identifier[stop_ms] = identifier[start_ms] +( identifier[timeout] * literal[int] )
keyword[for] identifier[x] keyword[in] identifier[range] ( identifier[int] ( identifier[timeout] * literal[int] )):
keyword[try] :
identifier[alert] = identifier[driver] . identifier[switch_to] . identifier[alert]
identifier[dummy_variable] = identifier[alert] . identifier[text]
keyword[return] identifier[alert]
keyword[except] identifier[NoAlertPresentException] :
identifier[now_ms] = identifier[time] . identifier[time] ()* literal[int]
keyword[if] identifier[now_ms] >= identifier[stop_ms] :
keyword[break]
identifier[time] . identifier[sleep] ( literal[int] )
keyword[raise] identifier[Exception] ( literal[string] % identifier[timeout] ) | def wait_for_and_switch_to_alert(driver, timeout=settings.LARGE_TIMEOUT):
"""
Wait for a browser alert to appear, and switch to it. This should be usable
as a drop-in replacement for driver.switch_to.alert when the alert box
may not exist yet.
@Params
driver - the webdriver object (required)
timeout - the time to wait for the alert in seconds
"""
start_ms = time.time() * 1000.0
stop_ms = start_ms + timeout * 1000.0
for x in range(int(timeout * 10)):
try:
alert = driver.switch_to.alert
# Raises exception if no alert present
dummy_variable = alert.text # noqa
return alert # depends on [control=['try'], data=[]]
except NoAlertPresentException:
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
break # depends on [control=['if'], data=[]]
time.sleep(0.1) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]]
raise Exception('Alert was not present after %s seconds!' % timeout) |
def drawBezier(page, p1, p2, p3, p4, color=None, fill=None,
dashes=None, width=1, morph=None,
closePath=False, roundCap=False, overlay=True):
"""Draw a general cubic Bezier curve from p1 to p4 using control points p2 and p3.
"""
img = page.newShape()
Q = img.drawBezier(Point(p1), Point(p2), Point(p3), Point(p4))
img.finish(color=color, fill=fill, dashes=dashes, width=width,
roundCap=roundCap, morph=morph, closePath=closePath)
img.commit(overlay)
return Q | def function[drawBezier, parameter[page, p1, p2, p3, p4, color, fill, dashes, width, morph, closePath, roundCap, overlay]]:
constant[Draw a general cubic Bezier curve from p1 to p4 using control points p2 and p3.
]
variable[img] assign[=] call[name[page].newShape, parameter[]]
variable[Q] assign[=] call[name[img].drawBezier, parameter[call[name[Point], parameter[name[p1]]], call[name[Point], parameter[name[p2]]], call[name[Point], parameter[name[p3]]], call[name[Point], parameter[name[p4]]]]]
call[name[img].finish, parameter[]]
call[name[img].commit, parameter[name[overlay]]]
return[name[Q]] | keyword[def] identifier[drawBezier] ( identifier[page] , identifier[p1] , identifier[p2] , identifier[p3] , identifier[p4] , identifier[color] = keyword[None] , identifier[fill] = keyword[None] ,
identifier[dashes] = keyword[None] , identifier[width] = literal[int] , identifier[morph] = keyword[None] ,
identifier[closePath] = keyword[False] , identifier[roundCap] = keyword[False] , identifier[overlay] = keyword[True] ):
literal[string]
identifier[img] = identifier[page] . identifier[newShape] ()
identifier[Q] = identifier[img] . identifier[drawBezier] ( identifier[Point] ( identifier[p1] ), identifier[Point] ( identifier[p2] ), identifier[Point] ( identifier[p3] ), identifier[Point] ( identifier[p4] ))
identifier[img] . identifier[finish] ( identifier[color] = identifier[color] , identifier[fill] = identifier[fill] , identifier[dashes] = identifier[dashes] , identifier[width] = identifier[width] ,
identifier[roundCap] = identifier[roundCap] , identifier[morph] = identifier[morph] , identifier[closePath] = identifier[closePath] )
identifier[img] . identifier[commit] ( identifier[overlay] )
keyword[return] identifier[Q] | def drawBezier(page, p1, p2, p3, p4, color=None, fill=None, dashes=None, width=1, morph=None, closePath=False, roundCap=False, overlay=True):
"""Draw a general cubic Bezier curve from p1 to p4 using control points p2 and p3.
"""
img = page.newShape()
Q = img.drawBezier(Point(p1), Point(p2), Point(p3), Point(p4))
img.finish(color=color, fill=fill, dashes=dashes, width=width, roundCap=roundCap, morph=morph, closePath=closePath)
img.commit(overlay)
return Q |
def draw(self, renderer):
"""
Draw the children
"""
dpi_cor = renderer.points_to_pixels(1.)
self.dpi_transform.clear()
self.dpi_transform.scale(dpi_cor, dpi_cor)
for c in self._children:
c.draw(renderer)
self.stale = False | def function[draw, parameter[self, renderer]]:
constant[
Draw the children
]
variable[dpi_cor] assign[=] call[name[renderer].points_to_pixels, parameter[constant[1.0]]]
call[name[self].dpi_transform.clear, parameter[]]
call[name[self].dpi_transform.scale, parameter[name[dpi_cor], name[dpi_cor]]]
for taget[name[c]] in starred[name[self]._children] begin[:]
call[name[c].draw, parameter[name[renderer]]]
name[self].stale assign[=] constant[False] | keyword[def] identifier[draw] ( identifier[self] , identifier[renderer] ):
literal[string]
identifier[dpi_cor] = identifier[renderer] . identifier[points_to_pixels] ( literal[int] )
identifier[self] . identifier[dpi_transform] . identifier[clear] ()
identifier[self] . identifier[dpi_transform] . identifier[scale] ( identifier[dpi_cor] , identifier[dpi_cor] )
keyword[for] identifier[c] keyword[in] identifier[self] . identifier[_children] :
identifier[c] . identifier[draw] ( identifier[renderer] )
identifier[self] . identifier[stale] = keyword[False] | def draw(self, renderer):
"""
Draw the children
"""
dpi_cor = renderer.points_to_pixels(1.0)
self.dpi_transform.clear()
self.dpi_transform.scale(dpi_cor, dpi_cor)
for c in self._children:
c.draw(renderer) # depends on [control=['for'], data=['c']]
self.stale = False |
def verify(self, obj):
"""Verify that the object conforms to this verifier's schema
Args:
obj (object): A python object to verify
Raises:
ValidationError: If there is a problem verifying the dictionary, a
ValidationError is thrown with at least the reason key set indicating
the reason for the lack of validation.
"""
if obj != self._literal:
raise ValidationError("Object is not equal to literal",
reason='%s is not equal to %s' % (str(obj), str(self._literal)), object=obj)
return obj | def function[verify, parameter[self, obj]]:
constant[Verify that the object conforms to this verifier's schema
Args:
obj (object): A python object to verify
Raises:
ValidationError: If there is a problem verifying the dictionary, a
ValidationError is thrown with at least the reason key set indicating
the reason for the lack of validation.
]
if compare[name[obj] not_equal[!=] name[self]._literal] begin[:]
<ast.Raise object at 0x7da18eb57b80>
return[name[obj]] | keyword[def] identifier[verify] ( identifier[self] , identifier[obj] ):
literal[string]
keyword[if] identifier[obj] != identifier[self] . identifier[_literal] :
keyword[raise] identifier[ValidationError] ( literal[string] ,
identifier[reason] = literal[string] %( identifier[str] ( identifier[obj] ), identifier[str] ( identifier[self] . identifier[_literal] )), identifier[object] = identifier[obj] )
keyword[return] identifier[obj] | def verify(self, obj):
"""Verify that the object conforms to this verifier's schema
Args:
obj (object): A python object to verify
Raises:
ValidationError: If there is a problem verifying the dictionary, a
ValidationError is thrown with at least the reason key set indicating
the reason for the lack of validation.
"""
if obj != self._literal:
raise ValidationError('Object is not equal to literal', reason='%s is not equal to %s' % (str(obj), str(self._literal)), object=obj) # depends on [control=['if'], data=['obj']]
return obj |
def foldx_dir(self):
"""str: FoldX folder"""
if self.root_dir:
return op.join(self.root_dir, self._foldx_dirname)
else:
log.warning('Root directory not set')
return None | def function[foldx_dir, parameter[self]]:
constant[str: FoldX folder]
if name[self].root_dir begin[:]
return[call[name[op].join, parameter[name[self].root_dir, name[self]._foldx_dirname]]] | keyword[def] identifier[foldx_dir] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[root_dir] :
keyword[return] identifier[op] . identifier[join] ( identifier[self] . identifier[root_dir] , identifier[self] . identifier[_foldx_dirname] )
keyword[else] :
identifier[log] . identifier[warning] ( literal[string] )
keyword[return] keyword[None] | def foldx_dir(self):
"""str: FoldX folder"""
if self.root_dir:
return op.join(self.root_dir, self._foldx_dirname) # depends on [control=['if'], data=[]]
else:
log.warning('Root directory not set')
return None |
def _get_measure_outcome(self, qubit):
"""Simulate the outcome of measurement of a qubit.
Args:
qubit (int): the qubit to measure
Return:
tuple: pair (outcome, probability) where outcome is '0' or '1' and
probability is the probability of the returned outcome.
"""
# Axis for numpy.sum to compute probabilities
axis = list(range(self._number_of_qubits))
axis.remove(self._number_of_qubits - 1 - qubit)
probabilities = np.sum(np.abs(self._statevector) ** 2, axis=tuple(axis))
# Compute einsum index string for 1-qubit matrix multiplication
random_number = self._local_random.rand()
if random_number < probabilities[0]:
return '0', probabilities[0]
# Else outcome was '1'
return '1', probabilities[1] | def function[_get_measure_outcome, parameter[self, qubit]]:
constant[Simulate the outcome of measurement of a qubit.
Args:
qubit (int): the qubit to measure
Return:
tuple: pair (outcome, probability) where outcome is '0' or '1' and
probability is the probability of the returned outcome.
]
variable[axis] assign[=] call[name[list], parameter[call[name[range], parameter[name[self]._number_of_qubits]]]]
call[name[axis].remove, parameter[binary_operation[binary_operation[name[self]._number_of_qubits - constant[1]] - name[qubit]]]]
variable[probabilities] assign[=] call[name[np].sum, parameter[binary_operation[call[name[np].abs, parameter[name[self]._statevector]] ** constant[2]]]]
variable[random_number] assign[=] call[name[self]._local_random.rand, parameter[]]
if compare[name[random_number] less[<] call[name[probabilities]][constant[0]]] begin[:]
return[tuple[[<ast.Constant object at 0x7da1b059f940>, <ast.Subscript object at 0x7da1b059cca0>]]]
return[tuple[[<ast.Constant object at 0x7da1b059cf10>, <ast.Subscript object at 0x7da1b059d630>]]] | keyword[def] identifier[_get_measure_outcome] ( identifier[self] , identifier[qubit] ):
literal[string]
identifier[axis] = identifier[list] ( identifier[range] ( identifier[self] . identifier[_number_of_qubits] ))
identifier[axis] . identifier[remove] ( identifier[self] . identifier[_number_of_qubits] - literal[int] - identifier[qubit] )
identifier[probabilities] = identifier[np] . identifier[sum] ( identifier[np] . identifier[abs] ( identifier[self] . identifier[_statevector] )** literal[int] , identifier[axis] = identifier[tuple] ( identifier[axis] ))
identifier[random_number] = identifier[self] . identifier[_local_random] . identifier[rand] ()
keyword[if] identifier[random_number] < identifier[probabilities] [ literal[int] ]:
keyword[return] literal[string] , identifier[probabilities] [ literal[int] ]
keyword[return] literal[string] , identifier[probabilities] [ literal[int] ] | def _get_measure_outcome(self, qubit):
"""Simulate the outcome of measurement of a qubit.
Args:
qubit (int): the qubit to measure
Return:
tuple: pair (outcome, probability) where outcome is '0' or '1' and
probability is the probability of the returned outcome.
"""
# Axis for numpy.sum to compute probabilities
axis = list(range(self._number_of_qubits))
axis.remove(self._number_of_qubits - 1 - qubit)
probabilities = np.sum(np.abs(self._statevector) ** 2, axis=tuple(axis))
# Compute einsum index string for 1-qubit matrix multiplication
random_number = self._local_random.rand()
if random_number < probabilities[0]:
return ('0', probabilities[0]) # depends on [control=['if'], data=[]]
# Else outcome was '1'
return ('1', probabilities[1]) |
def append(self, other, ignore_index=False):
"""Append rows of `other` to the end of this frame, returning a new object.
Wrapper around the :meth:`pandas.DataFrame.append` method.
Args:
other (Cartesian):
ignore_index (sequence, bool, int): If it is a boolean, it
behaves like in the description of
:meth:`pandas.DataFrame.append`.
If it is a sequence, it becomes the new index.
If it is an integer,
``range(ignore_index, ignore_index + len(new))``
becomes the new index.
Returns:
Cartesian:
"""
if not isinstance(other, self.__class__):
raise ValueError('May only append instances of same type.')
if type(ignore_index) is bool:
new_frame = self._frame.append(other._frame,
ignore_index=ignore_index,
verify_integrity=True)
else:
new_frame = self._frame.append(other._frame,
ignore_index=True,
verify_integrity=True)
if type(ignore_index) is int:
new_frame.index = range(ignore_index,
ignore_index + len(new_frame))
else:
new_frame.index = ignore_index
return self.__class__(new_frame) | def function[append, parameter[self, other, ignore_index]]:
constant[Append rows of `other` to the end of this frame, returning a new object.
Wrapper around the :meth:`pandas.DataFrame.append` method.
Args:
other (Cartesian):
ignore_index (sequence, bool, int): If it is a boolean, it
behaves like in the description of
:meth:`pandas.DataFrame.append`.
If it is a sequence, it becomes the new index.
If it is an integer,
``range(ignore_index, ignore_index + len(new))``
becomes the new index.
Returns:
Cartesian:
]
if <ast.UnaryOp object at 0x7da1b27bba60> begin[:]
<ast.Raise object at 0x7da1b27bb160>
if compare[call[name[type], parameter[name[ignore_index]]] is name[bool]] begin[:]
variable[new_frame] assign[=] call[name[self]._frame.append, parameter[name[other]._frame]]
return[call[name[self].__class__, parameter[name[new_frame]]]] | keyword[def] identifier[append] ( identifier[self] , identifier[other] , identifier[ignore_index] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[other] , identifier[self] . identifier[__class__] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[type] ( identifier[ignore_index] ) keyword[is] identifier[bool] :
identifier[new_frame] = identifier[self] . identifier[_frame] . identifier[append] ( identifier[other] . identifier[_frame] ,
identifier[ignore_index] = identifier[ignore_index] ,
identifier[verify_integrity] = keyword[True] )
keyword[else] :
identifier[new_frame] = identifier[self] . identifier[_frame] . identifier[append] ( identifier[other] . identifier[_frame] ,
identifier[ignore_index] = keyword[True] ,
identifier[verify_integrity] = keyword[True] )
keyword[if] identifier[type] ( identifier[ignore_index] ) keyword[is] identifier[int] :
identifier[new_frame] . identifier[index] = identifier[range] ( identifier[ignore_index] ,
identifier[ignore_index] + identifier[len] ( identifier[new_frame] ))
keyword[else] :
identifier[new_frame] . identifier[index] = identifier[ignore_index]
keyword[return] identifier[self] . identifier[__class__] ( identifier[new_frame] ) | def append(self, other, ignore_index=False):
"""Append rows of `other` to the end of this frame, returning a new object.
Wrapper around the :meth:`pandas.DataFrame.append` method.
Args:
other (Cartesian):
ignore_index (sequence, bool, int): If it is a boolean, it
behaves like in the description of
:meth:`pandas.DataFrame.append`.
If it is a sequence, it becomes the new index.
If it is an integer,
``range(ignore_index, ignore_index + len(new))``
becomes the new index.
Returns:
Cartesian:
"""
if not isinstance(other, self.__class__):
raise ValueError('May only append instances of same type.') # depends on [control=['if'], data=[]]
if type(ignore_index) is bool:
new_frame = self._frame.append(other._frame, ignore_index=ignore_index, verify_integrity=True) # depends on [control=['if'], data=[]]
else:
new_frame = self._frame.append(other._frame, ignore_index=True, verify_integrity=True)
if type(ignore_index) is int:
new_frame.index = range(ignore_index, ignore_index + len(new_frame)) # depends on [control=['if'], data=[]]
else:
new_frame.index = ignore_index
return self.__class__(new_frame) |
def get_expected_image_size(module_or_spec, signature=None, input_name=None):
"""Returns expected [height, width] dimensions of an image input.
Args:
module_or_spec: a Module or ModuleSpec that accepts image inputs.
signature: a string with the key of the signature in question.
If None, the default signature is used.
input_name: a string with the input name for images. If None, the
conventional input name `images` for the default signature is used.
Returns:
A list if integers `[height, width]`.
Raises:
ValueError: If the size information is missing or malformed.
"""
# First see if an attached ImageModuleInfo provides this information.
image_module_info = get_image_module_info(module_or_spec)
if image_module_info:
size = image_module_info.default_image_size
if size.height and size.width:
return [size.height, size.width]
# Else inspect the input shape in the module signature.
if input_name is None:
input_name = "images"
input_info_dict = module_or_spec.get_input_info_dict(signature)
try:
shape = input_info_dict[input_name].get_shape()
except KeyError:
raise ValueError("Module is missing input '%s' in signature '%s'." %
(input_name, signature or "default"))
try:
_, height, width, _ = shape.as_list()
if not height or not width:
raise ValueError
except ValueError:
raise ValueError(
"Shape of module input is %s, "
"expected [batch_size, height, width, num_channels] "
"with known height and width." % shape)
return [height, width] | def function[get_expected_image_size, parameter[module_or_spec, signature, input_name]]:
constant[Returns expected [height, width] dimensions of an image input.
Args:
module_or_spec: a Module or ModuleSpec that accepts image inputs.
signature: a string with the key of the signature in question.
If None, the default signature is used.
input_name: a string with the input name for images. If None, the
conventional input name `images` for the default signature is used.
Returns:
A list if integers `[height, width]`.
Raises:
ValueError: If the size information is missing or malformed.
]
variable[image_module_info] assign[=] call[name[get_image_module_info], parameter[name[module_or_spec]]]
if name[image_module_info] begin[:]
variable[size] assign[=] name[image_module_info].default_image_size
if <ast.BoolOp object at 0x7da1b2013fd0> begin[:]
return[list[[<ast.Attribute object at 0x7da1b2013700>, <ast.Attribute object at 0x7da1b2010af0>]]]
if compare[name[input_name] is constant[None]] begin[:]
variable[input_name] assign[=] constant[images]
variable[input_info_dict] assign[=] call[name[module_or_spec].get_input_info_dict, parameter[name[signature]]]
<ast.Try object at 0x7da1b2012e30>
<ast.Try object at 0x7da1b20b8610>
return[list[[<ast.Name object at 0x7da1b20bbdf0>, <ast.Name object at 0x7da1b20b95d0>]]] | keyword[def] identifier[get_expected_image_size] ( identifier[module_or_spec] , identifier[signature] = keyword[None] , identifier[input_name] = keyword[None] ):
literal[string]
identifier[image_module_info] = identifier[get_image_module_info] ( identifier[module_or_spec] )
keyword[if] identifier[image_module_info] :
identifier[size] = identifier[image_module_info] . identifier[default_image_size]
keyword[if] identifier[size] . identifier[height] keyword[and] identifier[size] . identifier[width] :
keyword[return] [ identifier[size] . identifier[height] , identifier[size] . identifier[width] ]
keyword[if] identifier[input_name] keyword[is] keyword[None] :
identifier[input_name] = literal[string]
identifier[input_info_dict] = identifier[module_or_spec] . identifier[get_input_info_dict] ( identifier[signature] )
keyword[try] :
identifier[shape] = identifier[input_info_dict] [ identifier[input_name] ]. identifier[get_shape] ()
keyword[except] identifier[KeyError] :
keyword[raise] identifier[ValueError] ( literal[string] %
( identifier[input_name] , identifier[signature] keyword[or] literal[string] ))
keyword[try] :
identifier[_] , identifier[height] , identifier[width] , identifier[_] = identifier[shape] . identifier[as_list] ()
keyword[if] keyword[not] identifier[height] keyword[or] keyword[not] identifier[width] :
keyword[raise] identifier[ValueError]
keyword[except] identifier[ValueError] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string]
literal[string] % identifier[shape] )
keyword[return] [ identifier[height] , identifier[width] ] | def get_expected_image_size(module_or_spec, signature=None, input_name=None):
"""Returns expected [height, width] dimensions of an image input.
Args:
module_or_spec: a Module or ModuleSpec that accepts image inputs.
signature: a string with the key of the signature in question.
If None, the default signature is used.
input_name: a string with the input name for images. If None, the
conventional input name `images` for the default signature is used.
Returns:
A list if integers `[height, width]`.
Raises:
ValueError: If the size information is missing or malformed.
"""
# First see if an attached ImageModuleInfo provides this information.
image_module_info = get_image_module_info(module_or_spec)
if image_module_info:
size = image_module_info.default_image_size
if size.height and size.width:
return [size.height, size.width] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Else inspect the input shape in the module signature.
if input_name is None:
input_name = 'images' # depends on [control=['if'], data=['input_name']]
input_info_dict = module_or_spec.get_input_info_dict(signature)
try:
shape = input_info_dict[input_name].get_shape() # depends on [control=['try'], data=[]]
except KeyError:
raise ValueError("Module is missing input '%s' in signature '%s'." % (input_name, signature or 'default')) # depends on [control=['except'], data=[]]
try:
(_, height, width, _) = shape.as_list()
if not height or not width:
raise ValueError # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except ValueError:
raise ValueError('Shape of module input is %s, expected [batch_size, height, width, num_channels] with known height and width.' % shape) # depends on [control=['except'], data=[]]
return [height, width] |
def ar1(rho, mu, sigma, size=1):
"""Return an autoregressive series of order one AR(1).
.. math::
X_t = \mu_t + \rho (X_{t-1}-\mu_{t-1} + \epsilon_t
If mu is a sequence and size > len(mu), the algorithm loops through
mu.
:Stochastics:
rho : scalar in [0,1]
mu : scalar or sequence
sigma : scalar > 0
size : integer
"""
return np.array([x for x in ar1_gen(rho, mu, sigma, size)]) | def function[ar1, parameter[rho, mu, sigma, size]]:
constant[Return an autoregressive series of order one AR(1).
.. math::
X_t = \mu_t +
ho (X_{t-1}-\mu_{t-1} + \epsilon_t
If mu is a sequence and size > len(mu), the algorithm loops through
mu.
:Stochastics:
rho : scalar in [0,1]
mu : scalar or sequence
sigma : scalar > 0
size : integer
]
return[call[name[np].array, parameter[<ast.ListComp object at 0x7da20c7c9cf0>]]] | keyword[def] identifier[ar1] ( identifier[rho] , identifier[mu] , identifier[sigma] , identifier[size] = literal[int] ):
literal[string]
keyword[return] identifier[np] . identifier[array] ([ identifier[x] keyword[for] identifier[x] keyword[in] identifier[ar1_gen] ( identifier[rho] , identifier[mu] , identifier[sigma] , identifier[size] )]) | def ar1(rho, mu, sigma, size=1):
"""Return an autoregressive series of order one AR(1).
.. math::
X_t = \\mu_t + \rho (X_{t-1}-\\mu_{t-1} + \\epsilon_t
If mu is a sequence and size > len(mu), the algorithm loops through
mu.
:Stochastics:
rho : scalar in [0,1]
mu : scalar or sequence
sigma : scalar > 0
size : integer
"""
return np.array([x for x in ar1_gen(rho, mu, sigma, size)]) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.