code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def compilearg(self):
"""This method compiles the parameter into syntax that can be used on the shell, such as -paramflag=value"""
if isinstance(self.value,list):
value = self.delimiter.join(self.value)
else:
value = self.value
if value.find(" ") >= 0:
value = '"' + value + '"' #wrap all in quotes
#for some odd, reason this produced an error, as if we're not an instance of choiceparameter
#return super(ChoiceParameter,self).compilearg(value)
#workaround:
if self.paramflag and self.paramflag[-1] == '=' or self.nospace:
sep = ''
elif self.paramflag:
sep = ' '
else:
return str(value)
return self.paramflag + sep + str(value) | def function[compilearg, parameter[self]]:
constant[This method compiles the parameter into syntax that can be used on the shell, such as -paramflag=value]
if call[name[isinstance], parameter[name[self].value, name[list]]] begin[:]
variable[value] assign[=] call[name[self].delimiter.join, parameter[name[self].value]]
if compare[call[name[value].find, parameter[constant[ ]]] greater_or_equal[>=] constant[0]] begin[:]
variable[value] assign[=] binary_operation[binary_operation[constant["] + name[value]] + constant["]]
if <ast.BoolOp object at 0x7da18dc9a7d0> begin[:]
variable[sep] assign[=] constant[]
return[binary_operation[binary_operation[name[self].paramflag + name[sep]] + call[name[str], parameter[name[value]]]]] | keyword[def] identifier[compilearg] ( identifier[self] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[self] . identifier[value] , identifier[list] ):
identifier[value] = identifier[self] . identifier[delimiter] . identifier[join] ( identifier[self] . identifier[value] )
keyword[else] :
identifier[value] = identifier[self] . identifier[value]
keyword[if] identifier[value] . identifier[find] ( literal[string] )>= literal[int] :
identifier[value] = literal[string] + identifier[value] + literal[string]
keyword[if] identifier[self] . identifier[paramflag] keyword[and] identifier[self] . identifier[paramflag] [- literal[int] ]== literal[string] keyword[or] identifier[self] . identifier[nospace] :
identifier[sep] = literal[string]
keyword[elif] identifier[self] . identifier[paramflag] :
identifier[sep] = literal[string]
keyword[else] :
keyword[return] identifier[str] ( identifier[value] )
keyword[return] identifier[self] . identifier[paramflag] + identifier[sep] + identifier[str] ( identifier[value] ) | def compilearg(self):
"""This method compiles the parameter into syntax that can be used on the shell, such as -paramflag=value"""
if isinstance(self.value, list):
value = self.delimiter.join(self.value) # depends on [control=['if'], data=[]]
else:
value = self.value
if value.find(' ') >= 0:
value = '"' + value + '"' #wrap all in quotes # depends on [control=['if'], data=[]]
#for some odd, reason this produced an error, as if we're not an instance of choiceparameter
#return super(ChoiceParameter,self).compilearg(value)
#workaround:
if self.paramflag and self.paramflag[-1] == '=' or self.nospace:
sep = '' # depends on [control=['if'], data=[]]
elif self.paramflag:
sep = ' ' # depends on [control=['if'], data=[]]
else:
return str(value)
return self.paramflag + sep + str(value) |
def AddAnalogShortIdMsecRecordNoStatus(site_service, tag, time_value, msec,
value):
"""
This function will add an analog value to the specified eDNA service and
tag, without an associated point status.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param msec: The additional milliseconds for the time_value
:param value: The value associated with the above time.
:return: 0, if the data push is successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
dValue = c_double(value)
usMsec = c_ushort(msec)
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddAnalogShortIdMsecRecordNoStatus(szService,
szPointId, tTime, dValue, usMsec)
return nRet | def function[AddAnalogShortIdMsecRecordNoStatus, parameter[site_service, tag, time_value, msec, value]]:
constant[
This function will add an analog value to the specified eDNA service and
tag, without an associated point status.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param msec: The additional milliseconds for the time_value
:param value: The value associated with the above time.
:return: 0, if the data push is successful
]
variable[szService] assign[=] call[name[c_char_p], parameter[call[name[site_service].encode, parameter[constant[utf-8]]]]]
variable[szPointId] assign[=] call[name[c_char_p], parameter[call[name[tag].encode, parameter[constant[utf-8]]]]]
variable[tTime] assign[=] call[name[c_long], parameter[call[name[int], parameter[name[time_value]]]]]
variable[dValue] assign[=] call[name[c_double], parameter[name[value]]]
variable[usMsec] assign[=] call[name[c_ushort], parameter[name[msec]]]
variable[nRet] assign[=] call[name[dnaserv_dll].DnaAddAnalogShortIdMsecRecordNoStatus, parameter[name[szService], name[szPointId], name[tTime], name[dValue], name[usMsec]]]
return[name[nRet]] | keyword[def] identifier[AddAnalogShortIdMsecRecordNoStatus] ( identifier[site_service] , identifier[tag] , identifier[time_value] , identifier[msec] ,
identifier[value] ):
literal[string]
identifier[szService] = identifier[c_char_p] ( identifier[site_service] . identifier[encode] ( literal[string] ))
identifier[szPointId] = identifier[c_char_p] ( identifier[tag] . identifier[encode] ( literal[string] ))
identifier[tTime] = identifier[c_long] ( identifier[int] ( identifier[time_value] ))
identifier[dValue] = identifier[c_double] ( identifier[value] )
identifier[usMsec] = identifier[c_ushort] ( identifier[msec] )
identifier[nRet] = identifier[dnaserv_dll] . identifier[DnaAddAnalogShortIdMsecRecordNoStatus] ( identifier[szService] ,
identifier[szPointId] , identifier[tTime] , identifier[dValue] , identifier[usMsec] )
keyword[return] identifier[nRet] | def AddAnalogShortIdMsecRecordNoStatus(site_service, tag, time_value, msec, value):
"""
This function will add an analog value to the specified eDNA service and
tag, without an associated point status.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param msec: The additional milliseconds for the time_value
:param value: The value associated with the above time.
:return: 0, if the data push is successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
dValue = c_double(value)
usMsec = c_ushort(msec)
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddAnalogShortIdMsecRecordNoStatus(szService, szPointId, tTime, dValue, usMsec)
return nRet |
def __convert(root, tag, values, func):
"""Converts the tag type found in the root and converts them using the func
and appends them to the values.
"""
elements = root.getElementsByTagName(tag)
for element in elements:
converted = func(element)
# Append to the list
__append_list(values, converted) | def function[__convert, parameter[root, tag, values, func]]:
constant[Converts the tag type found in the root and converts them using the func
and appends them to the values.
]
variable[elements] assign[=] call[name[root].getElementsByTagName, parameter[name[tag]]]
for taget[name[element]] in starred[name[elements]] begin[:]
variable[converted] assign[=] call[name[func], parameter[name[element]]]
call[name[__append_list], parameter[name[values], name[converted]]] | keyword[def] identifier[__convert] ( identifier[root] , identifier[tag] , identifier[values] , identifier[func] ):
literal[string]
identifier[elements] = identifier[root] . identifier[getElementsByTagName] ( identifier[tag] )
keyword[for] identifier[element] keyword[in] identifier[elements] :
identifier[converted] = identifier[func] ( identifier[element] )
identifier[__append_list] ( identifier[values] , identifier[converted] ) | def __convert(root, tag, values, func):
"""Converts the tag type found in the root and converts them using the func
and appends them to the values.
"""
elements = root.getElementsByTagName(tag)
for element in elements:
converted = func(element)
# Append to the list
__append_list(values, converted) # depends on [control=['for'], data=['element']] |
def print_issue(self, service_name, limit, crits, warns):
"""
:param service_name: the name of the service
:type service_name: str
:param limit: the Limit this relates to
:type limit: :py:class:`~.AwsLimit`
:param crits: the specific usage values that crossed the critical
threshold
:type usage: :py:obj:`list` of :py:class:`~.AwsLimitUsage`
:param crits: the specific usage values that crossed the warning
threshold
:type usage: :py:obj:`list` of :py:class:`~.AwsLimitUsage`
"""
usage_str = ''
if len(crits) > 0:
tmp = 'CRITICAL: '
tmp += ', '.join([str(x) for x in sorted(crits)])
usage_str += self.color_output(tmp, 'red')
if len(warns) > 0:
if len(crits) > 0:
usage_str += ' '
tmp = 'WARNING: '
tmp += ', '.join([str(x) for x in sorted(warns)])
usage_str += self.color_output(tmp, 'yellow')
k = "{s}/{l}".format(
s=service_name,
l=limit.name,
)
v = "(limit {v}) {u}".format(
v=limit.get_limit(),
u=usage_str,
)
return (k, v) | def function[print_issue, parameter[self, service_name, limit, crits, warns]]:
constant[
:param service_name: the name of the service
:type service_name: str
:param limit: the Limit this relates to
:type limit: :py:class:`~.AwsLimit`
:param crits: the specific usage values that crossed the critical
threshold
:type usage: :py:obj:`list` of :py:class:`~.AwsLimitUsage`
:param crits: the specific usage values that crossed the warning
threshold
:type usage: :py:obj:`list` of :py:class:`~.AwsLimitUsage`
]
variable[usage_str] assign[=] constant[]
if compare[call[name[len], parameter[name[crits]]] greater[>] constant[0]] begin[:]
variable[tmp] assign[=] constant[CRITICAL: ]
<ast.AugAssign object at 0x7da18f812bf0>
<ast.AugAssign object at 0x7da18f8100d0>
if compare[call[name[len], parameter[name[warns]]] greater[>] constant[0]] begin[:]
if compare[call[name[len], parameter[name[crits]]] greater[>] constant[0]] begin[:]
<ast.AugAssign object at 0x7da18f813a60>
variable[tmp] assign[=] constant[WARNING: ]
<ast.AugAssign object at 0x7da18f810280>
<ast.AugAssign object at 0x7da18f810820>
variable[k] assign[=] call[constant[{s}/{l}].format, parameter[]]
variable[v] assign[=] call[constant[(limit {v}) {u}].format, parameter[]]
return[tuple[[<ast.Name object at 0x7da18f8130d0>, <ast.Name object at 0x7da18f8121a0>]]] | keyword[def] identifier[print_issue] ( identifier[self] , identifier[service_name] , identifier[limit] , identifier[crits] , identifier[warns] ):
literal[string]
identifier[usage_str] = literal[string]
keyword[if] identifier[len] ( identifier[crits] )> literal[int] :
identifier[tmp] = literal[string]
identifier[tmp] += literal[string] . identifier[join] ([ identifier[str] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[sorted] ( identifier[crits] )])
identifier[usage_str] += identifier[self] . identifier[color_output] ( identifier[tmp] , literal[string] )
keyword[if] identifier[len] ( identifier[warns] )> literal[int] :
keyword[if] identifier[len] ( identifier[crits] )> literal[int] :
identifier[usage_str] += literal[string]
identifier[tmp] = literal[string]
identifier[tmp] += literal[string] . identifier[join] ([ identifier[str] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[sorted] ( identifier[warns] )])
identifier[usage_str] += identifier[self] . identifier[color_output] ( identifier[tmp] , literal[string] )
identifier[k] = literal[string] . identifier[format] (
identifier[s] = identifier[service_name] ,
identifier[l] = identifier[limit] . identifier[name] ,
)
identifier[v] = literal[string] . identifier[format] (
identifier[v] = identifier[limit] . identifier[get_limit] (),
identifier[u] = identifier[usage_str] ,
)
keyword[return] ( identifier[k] , identifier[v] ) | def print_issue(self, service_name, limit, crits, warns):
"""
:param service_name: the name of the service
:type service_name: str
:param limit: the Limit this relates to
:type limit: :py:class:`~.AwsLimit`
:param crits: the specific usage values that crossed the critical
threshold
:type usage: :py:obj:`list` of :py:class:`~.AwsLimitUsage`
:param crits: the specific usage values that crossed the warning
threshold
:type usage: :py:obj:`list` of :py:class:`~.AwsLimitUsage`
"""
usage_str = ''
if len(crits) > 0:
tmp = 'CRITICAL: '
tmp += ', '.join([str(x) for x in sorted(crits)])
usage_str += self.color_output(tmp, 'red') # depends on [control=['if'], data=[]]
if len(warns) > 0:
if len(crits) > 0:
usage_str += ' ' # depends on [control=['if'], data=[]]
tmp = 'WARNING: '
tmp += ', '.join([str(x) for x in sorted(warns)])
usage_str += self.color_output(tmp, 'yellow') # depends on [control=['if'], data=[]]
k = '{s}/{l}'.format(s=service_name, l=limit.name)
v = '(limit {v}) {u}'.format(v=limit.get_limit(), u=usage_str)
return (k, v) |
def watermark_image(image, wtrmrk_path, corner=2):
'''Adds a watermark image to an instance of a PIL Image.
If the provided watermark image (wtrmrk_path) is
larger than the provided base image (image), then
the watermark image will be automatically resized to
roughly 1/8 the size of the base image.
Args:
image: An instance of a PIL Image. This is the base image.
wtrmrk_path: Path to the watermark image to use.
corner: An integer between 0 and 3 representing the corner
where the watermark image should be placed on top of the
base image. 0 is top left, 1 is top right, 2 is bottom
right and 3 is bottom left. NOTE: Right now, this is
permanently set to 2 (bottom right) but this can be
changed in the future by either creating a new cmd-line
flag or putting this in the config file.
Returns: The watermarked image
'''
padding = 2
wtrmrk_img = Image.open(wtrmrk_path)
#Need to perform size check in here rather than in options.py because this is
# the only place where we know the size of the image that the watermark is
# being placed onto
if wtrmrk_img.width > (image.width - padding * 2) or wtrmrk_img.height > (
image.height - padding * 2):
res = (int(image.width / 8.0), int(image.height / 8.0))
resize_in_place(wtrmrk_img, res)
pos = get_pos(corner, image.size, wtrmrk_img.size, padding)
was_P = image.mode == 'P'
was_L = image.mode == 'L'
# Fix PIL palette issue by converting palette images to RGBA
if image.mode not in ['RGB', 'RGBA']:
if image.format in ['JPG', 'JPEG']:
image = image.convert('RGB')
else:
image = image.convert('RGBA')
image.paste(wtrmrk_img.convert('RGBA'), pos, wtrmrk_img.convert('RGBA'))
if was_P:
image = image.convert('P', palette=Image.ADAPTIVE, colors=256)
elif was_L:
image = image.convert('L')
return image | def function[watermark_image, parameter[image, wtrmrk_path, corner]]:
constant[Adds a watermark image to an instance of a PIL Image.
If the provided watermark image (wtrmrk_path) is
larger than the provided base image (image), then
the watermark image will be automatically resized to
roughly 1/8 the size of the base image.
Args:
image: An instance of a PIL Image. This is the base image.
wtrmrk_path: Path to the watermark image to use.
corner: An integer between 0 and 3 representing the corner
where the watermark image should be placed on top of the
base image. 0 is top left, 1 is top right, 2 is bottom
right and 3 is bottom left. NOTE: Right now, this is
permanently set to 2 (bottom right) but this can be
changed in the future by either creating a new cmd-line
flag or putting this in the config file.
Returns: The watermarked image
]
variable[padding] assign[=] constant[2]
variable[wtrmrk_img] assign[=] call[name[Image].open, parameter[name[wtrmrk_path]]]
if <ast.BoolOp object at 0x7da1b14e5d50> begin[:]
variable[res] assign[=] tuple[[<ast.Call object at 0x7da1b14e4880>, <ast.Call object at 0x7da1b14e7190>]]
call[name[resize_in_place], parameter[name[wtrmrk_img], name[res]]]
variable[pos] assign[=] call[name[get_pos], parameter[name[corner], name[image].size, name[wtrmrk_img].size, name[padding]]]
variable[was_P] assign[=] compare[name[image].mode equal[==] constant[P]]
variable[was_L] assign[=] compare[name[image].mode equal[==] constant[L]]
if compare[name[image].mode <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da1b14e6e90>, <ast.Constant object at 0x7da1b14e72e0>]]] begin[:]
if compare[name[image].format in list[[<ast.Constant object at 0x7da1b14e4d00>, <ast.Constant object at 0x7da1b14e5a50>]]] begin[:]
variable[image] assign[=] call[name[image].convert, parameter[constant[RGB]]]
call[name[image].paste, parameter[call[name[wtrmrk_img].convert, parameter[constant[RGBA]]], name[pos], call[name[wtrmrk_img].convert, parameter[constant[RGBA]]]]]
if name[was_P] begin[:]
variable[image] assign[=] call[name[image].convert, parameter[constant[P]]]
return[name[image]] | keyword[def] identifier[watermark_image] ( identifier[image] , identifier[wtrmrk_path] , identifier[corner] = literal[int] ):
literal[string]
identifier[padding] = literal[int]
identifier[wtrmrk_img] = identifier[Image] . identifier[open] ( identifier[wtrmrk_path] )
keyword[if] identifier[wtrmrk_img] . identifier[width] >( identifier[image] . identifier[width] - identifier[padding] * literal[int] ) keyword[or] identifier[wtrmrk_img] . identifier[height] >(
identifier[image] . identifier[height] - identifier[padding] * literal[int] ):
identifier[res] =( identifier[int] ( identifier[image] . identifier[width] / literal[int] ), identifier[int] ( identifier[image] . identifier[height] / literal[int] ))
identifier[resize_in_place] ( identifier[wtrmrk_img] , identifier[res] )
identifier[pos] = identifier[get_pos] ( identifier[corner] , identifier[image] . identifier[size] , identifier[wtrmrk_img] . identifier[size] , identifier[padding] )
identifier[was_P] = identifier[image] . identifier[mode] == literal[string]
identifier[was_L] = identifier[image] . identifier[mode] == literal[string]
keyword[if] identifier[image] . identifier[mode] keyword[not] keyword[in] [ literal[string] , literal[string] ]:
keyword[if] identifier[image] . identifier[format] keyword[in] [ literal[string] , literal[string] ]:
identifier[image] = identifier[image] . identifier[convert] ( literal[string] )
keyword[else] :
identifier[image] = identifier[image] . identifier[convert] ( literal[string] )
identifier[image] . identifier[paste] ( identifier[wtrmrk_img] . identifier[convert] ( literal[string] ), identifier[pos] , identifier[wtrmrk_img] . identifier[convert] ( literal[string] ))
keyword[if] identifier[was_P] :
identifier[image] = identifier[image] . identifier[convert] ( literal[string] , identifier[palette] = identifier[Image] . identifier[ADAPTIVE] , identifier[colors] = literal[int] )
keyword[elif] identifier[was_L] :
identifier[image] = identifier[image] . identifier[convert] ( literal[string] )
keyword[return] identifier[image] | def watermark_image(image, wtrmrk_path, corner=2):
"""Adds a watermark image to an instance of a PIL Image.
If the provided watermark image (wtrmrk_path) is
larger than the provided base image (image), then
the watermark image will be automatically resized to
roughly 1/8 the size of the base image.
Args:
image: An instance of a PIL Image. This is the base image.
wtrmrk_path: Path to the watermark image to use.
corner: An integer between 0 and 3 representing the corner
where the watermark image should be placed on top of the
base image. 0 is top left, 1 is top right, 2 is bottom
right and 3 is bottom left. NOTE: Right now, this is
permanently set to 2 (bottom right) but this can be
changed in the future by either creating a new cmd-line
flag or putting this in the config file.
Returns: The watermarked image
"""
padding = 2
wtrmrk_img = Image.open(wtrmrk_path)
#Need to perform size check in here rather than in options.py because this is
# the only place where we know the size of the image that the watermark is
# being placed onto
if wtrmrk_img.width > image.width - padding * 2 or wtrmrk_img.height > image.height - padding * 2:
res = (int(image.width / 8.0), int(image.height / 8.0))
resize_in_place(wtrmrk_img, res) # depends on [control=['if'], data=[]]
pos = get_pos(corner, image.size, wtrmrk_img.size, padding)
was_P = image.mode == 'P'
was_L = image.mode == 'L'
# Fix PIL palette issue by converting palette images to RGBA
if image.mode not in ['RGB', 'RGBA']:
if image.format in ['JPG', 'JPEG']:
image = image.convert('RGB') # depends on [control=['if'], data=[]]
else:
image = image.convert('RGBA') # depends on [control=['if'], data=[]]
image.paste(wtrmrk_img.convert('RGBA'), pos, wtrmrk_img.convert('RGBA'))
if was_P:
image = image.convert('P', palette=Image.ADAPTIVE, colors=256) # depends on [control=['if'], data=[]]
elif was_L:
image = image.convert('L') # depends on [control=['if'], data=[]]
return image |
def clear(self, actors=()):
"""Delete specified list of actors, by default delete all."""
if not utils.isSequence(actors):
actors = [actors]
if len(actors):
for a in actors:
self.removeActor(a)
else:
for a in settings.collectable_actors:
self.removeActor(a)
settings.collectable_actors = []
self.actors = []
for a in self.getActors():
self.renderer.RemoveActor(a)
for a in self.getVolumes():
self.renderer.RemoveVolume(a)
for s in self.sliders:
s.EnabledOff()
for b in self.buttons:
self.renderer.RemoveActor(b)
for w in self.widgets:
w.EnabledOff()
for c in self.scalarbars:
self.renderer.RemoveActor(c) | def function[clear, parameter[self, actors]]:
constant[Delete specified list of actors, by default delete all.]
if <ast.UnaryOp object at 0x7da1b2345a50> begin[:]
variable[actors] assign[=] list[[<ast.Name object at 0x7da1b2345240>]]
if call[name[len], parameter[name[actors]]] begin[:]
for taget[name[a]] in starred[name[actors]] begin[:]
call[name[self].removeActor, parameter[name[a]]] | keyword[def] identifier[clear] ( identifier[self] , identifier[actors] =()):
literal[string]
keyword[if] keyword[not] identifier[utils] . identifier[isSequence] ( identifier[actors] ):
identifier[actors] =[ identifier[actors] ]
keyword[if] identifier[len] ( identifier[actors] ):
keyword[for] identifier[a] keyword[in] identifier[actors] :
identifier[self] . identifier[removeActor] ( identifier[a] )
keyword[else] :
keyword[for] identifier[a] keyword[in] identifier[settings] . identifier[collectable_actors] :
identifier[self] . identifier[removeActor] ( identifier[a] )
identifier[settings] . identifier[collectable_actors] =[]
identifier[self] . identifier[actors] =[]
keyword[for] identifier[a] keyword[in] identifier[self] . identifier[getActors] ():
identifier[self] . identifier[renderer] . identifier[RemoveActor] ( identifier[a] )
keyword[for] identifier[a] keyword[in] identifier[self] . identifier[getVolumes] ():
identifier[self] . identifier[renderer] . identifier[RemoveVolume] ( identifier[a] )
keyword[for] identifier[s] keyword[in] identifier[self] . identifier[sliders] :
identifier[s] . identifier[EnabledOff] ()
keyword[for] identifier[b] keyword[in] identifier[self] . identifier[buttons] :
identifier[self] . identifier[renderer] . identifier[RemoveActor] ( identifier[b] )
keyword[for] identifier[w] keyword[in] identifier[self] . identifier[widgets] :
identifier[w] . identifier[EnabledOff] ()
keyword[for] identifier[c] keyword[in] identifier[self] . identifier[scalarbars] :
identifier[self] . identifier[renderer] . identifier[RemoveActor] ( identifier[c] ) | def clear(self, actors=()):
"""Delete specified list of actors, by default delete all."""
if not utils.isSequence(actors):
actors = [actors] # depends on [control=['if'], data=[]]
if len(actors):
for a in actors:
self.removeActor(a) # depends on [control=['for'], data=['a']] # depends on [control=['if'], data=[]]
else:
for a in settings.collectable_actors:
self.removeActor(a) # depends on [control=['for'], data=['a']]
settings.collectable_actors = []
self.actors = []
for a in self.getActors():
self.renderer.RemoveActor(a) # depends on [control=['for'], data=['a']]
for a in self.getVolumes():
self.renderer.RemoveVolume(a) # depends on [control=['for'], data=['a']]
for s in self.sliders:
s.EnabledOff() # depends on [control=['for'], data=['s']]
for b in self.buttons:
self.renderer.RemoveActor(b) # depends on [control=['for'], data=['b']]
for w in self.widgets:
w.EnabledOff() # depends on [control=['for'], data=['w']]
for c in self.scalarbars:
self.renderer.RemoveActor(c) # depends on [control=['for'], data=['c']] |
def activate_absence_with_duration(self, duration: int):
""" activates the absence mode for a given time
Args:
duration(int): the absence duration in minutes
"""
data = {"duration": duration}
return self._restCall(
"home/heating/activateAbsenceWithDuration", json.dumps(data)
) | def function[activate_absence_with_duration, parameter[self, duration]]:
constant[ activates the absence mode for a given time
Args:
duration(int): the absence duration in minutes
]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da204565240>], [<ast.Name object at 0x7da204564580>]]
return[call[name[self]._restCall, parameter[constant[home/heating/activateAbsenceWithDuration], call[name[json].dumps, parameter[name[data]]]]]] | keyword[def] identifier[activate_absence_with_duration] ( identifier[self] , identifier[duration] : identifier[int] ):
literal[string]
identifier[data] ={ literal[string] : identifier[duration] }
keyword[return] identifier[self] . identifier[_restCall] (
literal[string] , identifier[json] . identifier[dumps] ( identifier[data] )
) | def activate_absence_with_duration(self, duration: int):
""" activates the absence mode for a given time
Args:
duration(int): the absence duration in minutes
"""
data = {'duration': duration}
return self._restCall('home/heating/activateAbsenceWithDuration', json.dumps(data)) |
def shift_left(self, times=1):
"""
Finds Location shifted left by 1
:rtype: Location
"""
try:
return Location(self._rank, self._file - times)
except IndexError as e:
raise IndexError(e) | def function[shift_left, parameter[self, times]]:
constant[
Finds Location shifted left by 1
:rtype: Location
]
<ast.Try object at 0x7da18fe905e0> | keyword[def] identifier[shift_left] ( identifier[self] , identifier[times] = literal[int] ):
literal[string]
keyword[try] :
keyword[return] identifier[Location] ( identifier[self] . identifier[_rank] , identifier[self] . identifier[_file] - identifier[times] )
keyword[except] identifier[IndexError] keyword[as] identifier[e] :
keyword[raise] identifier[IndexError] ( identifier[e] ) | def shift_left(self, times=1):
"""
Finds Location shifted left by 1
:rtype: Location
"""
try:
return Location(self._rank, self._file - times) # depends on [control=['try'], data=[]]
except IndexError as e:
raise IndexError(e) # depends on [control=['except'], data=['e']] |
def _suffixArrayWithTrace(s, SA, n, K, operations, totalOperations):
"""
This function is a rewrite in Python of the C implementation proposed in Kärkkäinen and Sanders paper.
Find the suffix array SA of s[0..n-1] in {1..K}^n
Require s[n]=s[n+1]=s[n+2]=0, n>=2
"""
if _trace:
_traceSuffixArray(operations, totalOperations)
n0 = (n + 2) // 3
n1 = (n + 1) // 3
n2 = n // 3
n02 = n0 + n2
SA12 = _array("i", [0] * (n02 + 3))
SA0 = _array("i", [0] * n0)
s0 = _array("i", [0] * n0)
# s12 : positions of mod 1 and mod 2 suffixes
s12 = _array("i", [i for i in range(n + (n0 - n1)) if i % 3]) # <- writing i%3 is more efficient than i%3!=0
s12.extend([0] * 3)
# lsb radix sort the mod 1 and mod 2 triples
_radixPass(s12, SA12, s[2:], n02, K)
if _trace:
operations += n02
_traceSuffixArray(operations, totalOperations)
_radixPass(SA12, s12, s[1:], n02, K)
if _trace:
operations += n02
_traceSuffixArray(operations, totalOperations)
_radixPass(s12, SA12, s, n02, K)
if _trace:
operations += n02
_traceSuffixArray(operations, totalOperations)
# find lexicographic names of triples
name = 0
c = _array("i", [-1] * 3)
for i in range(n02):
cSA12 = s[SA12[i]:SA12[i] + 3]
if cSA12 != c:
name += 1
c = cSA12
if SA12[i] % 3 == 1:
s12[SA12[i] // 3] = name # left half
else:
s12[(SA12[i] // 3) + n0] = name # right half
if name < n02: # recurse if names are not yet unique
operations = _suffixArrayWithTrace(s12, SA12, n02, name + 1, operations, totalOperations)
if _trace:
_traceSuffixArray(operations, totalOperations)
# store unique names in s12 using the suffix array
for i, SA12_i in enumerate(SA12[:n02]):
s12[SA12_i] = i + 1
else: # generate the suffix array of s12 directly
if _trace:
operations += _nbOperations(n02)
_traceSuffixArray(operations, totalOperations)
for i, s12_i in enumerate(s12[:n02]):
SA12[s12_i - 1] = i
# stably sort the mod 0 suffixes from SA12 by their first character
j = 0
for SA12_i in SA12[:n02]:
if (SA12_i < n0):
s0[j] = 3 * SA12_i
j += 1
_radixPass(s0, SA0, s, n0, K)
if _trace:
operations += n0
_traceSuffixArray(operations, totalOperations)
# merge sorted SA0 suffixes and sorted SA12 suffixes
p = j = k = 0
t = n0 - n1
while k < n:
if SA12[t] < n0: # pos of current offset 12 suffix
i = SA12[t] * 3 + 1
else:
i = (SA12[t] - n0) * 3 + 2
j = SA0[p] # pos of current offset 0 suffix
if SA12[t] < n0:
bool = (s[i], s12[SA12[t] + n0]) <= (s[j], s12[int(j / 3)])
else:
bool = (s[i], s[i + 1], s12[SA12[t] - n0 + 1]) <= (s[j], s[j + 1], s12[int(j / 3) + n0])
if (bool):
SA[k] = i
t += 1
if t == n02: # done --- only SA0 suffixes left
k += 1
while p < n0:
SA[k] = SA0[p]
p += 1
k += 1
else:
SA[k] = j
p += 1
if p == n0: # done --- only SA12 suffixes left
k += 1
while t < n02:
if SA12[t] < n0: # pos of current offset 12 suffix
SA[k] = (SA12[t] * 3) + 1
else:
SA[k] = ((SA12[t] - n0) * 3) + 2
t += 1
k += 1
k += 1
return operations | def function[_suffixArrayWithTrace, parameter[s, SA, n, K, operations, totalOperations]]:
constant[
This function is a rewrite in Python of the C implementation proposed in Kärkkäinen and Sanders paper.
Find the suffix array SA of s[0..n-1] in {1..K}^n
Require s[n]=s[n+1]=s[n+2]=0, n>=2
]
if name[_trace] begin[:]
call[name[_traceSuffixArray], parameter[name[operations], name[totalOperations]]]
variable[n0] assign[=] binary_operation[binary_operation[name[n] + constant[2]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[3]]
variable[n1] assign[=] binary_operation[binary_operation[name[n] + constant[1]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[3]]
variable[n2] assign[=] binary_operation[name[n] <ast.FloorDiv object at 0x7da2590d6bc0> constant[3]]
variable[n02] assign[=] binary_operation[name[n0] + name[n2]]
variable[SA12] assign[=] call[name[_array], parameter[constant[i], binary_operation[list[[<ast.Constant object at 0x7da1b1142110>]] * binary_operation[name[n02] + constant[3]]]]]
variable[SA0] assign[=] call[name[_array], parameter[constant[i], binary_operation[list[[<ast.Constant object at 0x7da1b11400d0>]] * name[n0]]]]
variable[s0] assign[=] call[name[_array], parameter[constant[i], binary_operation[list[[<ast.Constant object at 0x7da1b1141c60>]] * name[n0]]]]
variable[s12] assign[=] call[name[_array], parameter[constant[i], <ast.ListComp object at 0x7da1b1141c00>]]
call[name[s12].extend, parameter[binary_operation[list[[<ast.Constant object at 0x7da1b1038fa0>]] * constant[3]]]]
call[name[_radixPass], parameter[name[s12], name[SA12], call[name[s]][<ast.Slice object at 0x7da1b1039120>], name[n02], name[K]]]
if name[_trace] begin[:]
<ast.AugAssign object at 0x7da1b103b0d0>
call[name[_traceSuffixArray], parameter[name[operations], name[totalOperations]]]
call[name[_radixPass], parameter[name[SA12], name[s12], call[name[s]][<ast.Slice object at 0x7da1b103bc70>], name[n02], name[K]]]
if name[_trace] begin[:]
<ast.AugAssign object at 0x7da1b1038610>
call[name[_traceSuffixArray], parameter[name[operations], name[totalOperations]]]
call[name[_radixPass], parameter[name[s12], name[SA12], name[s], name[n02], name[K]]]
if name[_trace] begin[:]
<ast.AugAssign object at 0x7da1b1038bb0>
call[name[_traceSuffixArray], parameter[name[operations], name[totalOperations]]]
variable[name] assign[=] constant[0]
variable[c] assign[=] call[name[_array], parameter[constant[i], binary_operation[list[[<ast.UnaryOp object at 0x7da1b1038490>]] * constant[3]]]]
for taget[name[i]] in starred[call[name[range], parameter[name[n02]]]] begin[:]
variable[cSA12] assign[=] call[name[s]][<ast.Slice object at 0x7da1b103bca0>]
if compare[name[cSA12] not_equal[!=] name[c]] begin[:]
<ast.AugAssign object at 0x7da1b10385b0>
variable[c] assign[=] name[cSA12]
if compare[binary_operation[call[name[SA12]][name[i]] <ast.Mod object at 0x7da2590d6920> constant[3]] equal[==] constant[1]] begin[:]
call[name[s12]][binary_operation[call[name[SA12]][name[i]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[3]]] assign[=] name[name]
if compare[name[name] less[<] name[n02]] begin[:]
variable[operations] assign[=] call[name[_suffixArrayWithTrace], parameter[name[s12], name[SA12], name[n02], binary_operation[name[name] + constant[1]], name[operations], name[totalOperations]]]
if name[_trace] begin[:]
call[name[_traceSuffixArray], parameter[name[operations], name[totalOperations]]]
for taget[tuple[[<ast.Name object at 0x7da1b103b8e0>, <ast.Name object at 0x7da1b103a590>]]] in starred[call[name[enumerate], parameter[call[name[SA12]][<ast.Slice object at 0x7da1b1038430>]]]] begin[:]
call[name[s12]][name[SA12_i]] assign[=] binary_operation[name[i] + constant[1]]
variable[j] assign[=] constant[0]
for taget[name[SA12_i]] in starred[call[name[SA12]][<ast.Slice object at 0x7da1b1038f40>]] begin[:]
if compare[name[SA12_i] less[<] name[n0]] begin[:]
call[name[s0]][name[j]] assign[=] binary_operation[constant[3] * name[SA12_i]]
<ast.AugAssign object at 0x7da1b10985b0>
call[name[_radixPass], parameter[name[s0], name[SA0], name[s], name[n0], name[K]]]
if name[_trace] begin[:]
<ast.AugAssign object at 0x7da1b1098af0>
call[name[_traceSuffixArray], parameter[name[operations], name[totalOperations]]]
variable[p] assign[=] constant[0]
variable[t] assign[=] binary_operation[name[n0] - name[n1]]
while compare[name[k] less[<] name[n]] begin[:]
if compare[call[name[SA12]][name[t]] less[<] name[n0]] begin[:]
variable[i] assign[=] binary_operation[binary_operation[call[name[SA12]][name[t]] * constant[3]] + constant[1]]
variable[j] assign[=] call[name[SA0]][name[p]]
if compare[call[name[SA12]][name[t]] less[<] name[n0]] begin[:]
variable[bool] assign[=] compare[tuple[[<ast.Subscript object at 0x7da1b1098730>, <ast.Subscript object at 0x7da1b1099030>]] less_or_equal[<=] tuple[[<ast.Subscript object at 0x7da1b1098100>, <ast.Subscript object at 0x7da1b109a5c0>]]]
if name[bool] begin[:]
call[name[SA]][name[k]] assign[=] name[i]
<ast.AugAssign object at 0x7da1b1099000>
if compare[name[t] equal[==] name[n02]] begin[:]
<ast.AugAssign object at 0x7da1b1098eb0>
while compare[name[p] less[<] name[n0]] begin[:]
call[name[SA]][name[k]] assign[=] call[name[SA0]][name[p]]
<ast.AugAssign object at 0x7da1b1098490>
<ast.AugAssign object at 0x7da1b1099d50>
<ast.AugAssign object at 0x7da1b1022ce0>
return[name[operations]] | keyword[def] identifier[_suffixArrayWithTrace] ( identifier[s] , identifier[SA] , identifier[n] , identifier[K] , identifier[operations] , identifier[totalOperations] ):
literal[string]
keyword[if] identifier[_trace] :
identifier[_traceSuffixArray] ( identifier[operations] , identifier[totalOperations] )
identifier[n0] =( identifier[n] + literal[int] )// literal[int]
identifier[n1] =( identifier[n] + literal[int] )// literal[int]
identifier[n2] = identifier[n] // literal[int]
identifier[n02] = identifier[n0] + identifier[n2]
identifier[SA12] = identifier[_array] ( literal[string] ,[ literal[int] ]*( identifier[n02] + literal[int] ))
identifier[SA0] = identifier[_array] ( literal[string] ,[ literal[int] ]* identifier[n0] )
identifier[s0] = identifier[_array] ( literal[string] ,[ literal[int] ]* identifier[n0] )
identifier[s12] = identifier[_array] ( literal[string] ,[ identifier[i] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[n] +( identifier[n0] - identifier[n1] )) keyword[if] identifier[i] % literal[int] ])
identifier[s12] . identifier[extend] ([ literal[int] ]* literal[int] )
identifier[_radixPass] ( identifier[s12] , identifier[SA12] , identifier[s] [ literal[int] :], identifier[n02] , identifier[K] )
keyword[if] identifier[_trace] :
identifier[operations] += identifier[n02]
identifier[_traceSuffixArray] ( identifier[operations] , identifier[totalOperations] )
identifier[_radixPass] ( identifier[SA12] , identifier[s12] , identifier[s] [ literal[int] :], identifier[n02] , identifier[K] )
keyword[if] identifier[_trace] :
identifier[operations] += identifier[n02]
identifier[_traceSuffixArray] ( identifier[operations] , identifier[totalOperations] )
identifier[_radixPass] ( identifier[s12] , identifier[SA12] , identifier[s] , identifier[n02] , identifier[K] )
keyword[if] identifier[_trace] :
identifier[operations] += identifier[n02]
identifier[_traceSuffixArray] ( identifier[operations] , identifier[totalOperations] )
identifier[name] = literal[int]
identifier[c] = identifier[_array] ( literal[string] ,[- literal[int] ]* literal[int] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[n02] ):
identifier[cSA12] = identifier[s] [ identifier[SA12] [ identifier[i] ]: identifier[SA12] [ identifier[i] ]+ literal[int] ]
keyword[if] identifier[cSA12] != identifier[c] :
identifier[name] += literal[int]
identifier[c] = identifier[cSA12]
keyword[if] identifier[SA12] [ identifier[i] ]% literal[int] == literal[int] :
identifier[s12] [ identifier[SA12] [ identifier[i] ]// literal[int] ]= identifier[name]
keyword[else] :
identifier[s12] [( identifier[SA12] [ identifier[i] ]// literal[int] )+ identifier[n0] ]= identifier[name]
keyword[if] identifier[name] < identifier[n02] :
identifier[operations] = identifier[_suffixArrayWithTrace] ( identifier[s12] , identifier[SA12] , identifier[n02] , identifier[name] + literal[int] , identifier[operations] , identifier[totalOperations] )
keyword[if] identifier[_trace] :
identifier[_traceSuffixArray] ( identifier[operations] , identifier[totalOperations] )
keyword[for] identifier[i] , identifier[SA12_i] keyword[in] identifier[enumerate] ( identifier[SA12] [: identifier[n02] ]):
identifier[s12] [ identifier[SA12_i] ]= identifier[i] + literal[int]
keyword[else] :
keyword[if] identifier[_trace] :
identifier[operations] += identifier[_nbOperations] ( identifier[n02] )
identifier[_traceSuffixArray] ( identifier[operations] , identifier[totalOperations] )
keyword[for] identifier[i] , identifier[s12_i] keyword[in] identifier[enumerate] ( identifier[s12] [: identifier[n02] ]):
identifier[SA12] [ identifier[s12_i] - literal[int] ]= identifier[i]
identifier[j] = literal[int]
keyword[for] identifier[SA12_i] keyword[in] identifier[SA12] [: identifier[n02] ]:
keyword[if] ( identifier[SA12_i] < identifier[n0] ):
identifier[s0] [ identifier[j] ]= literal[int] * identifier[SA12_i]
identifier[j] += literal[int]
identifier[_radixPass] ( identifier[s0] , identifier[SA0] , identifier[s] , identifier[n0] , identifier[K] )
keyword[if] identifier[_trace] :
identifier[operations] += identifier[n0]
identifier[_traceSuffixArray] ( identifier[operations] , identifier[totalOperations] )
identifier[p] = identifier[j] = identifier[k] = literal[int]
identifier[t] = identifier[n0] - identifier[n1]
keyword[while] identifier[k] < identifier[n] :
keyword[if] identifier[SA12] [ identifier[t] ]< identifier[n0] :
identifier[i] = identifier[SA12] [ identifier[t] ]* literal[int] + literal[int]
keyword[else] :
identifier[i] =( identifier[SA12] [ identifier[t] ]- identifier[n0] )* literal[int] + literal[int]
identifier[j] = identifier[SA0] [ identifier[p] ]
keyword[if] identifier[SA12] [ identifier[t] ]< identifier[n0] :
identifier[bool] =( identifier[s] [ identifier[i] ], identifier[s12] [ identifier[SA12] [ identifier[t] ]+ identifier[n0] ])<=( identifier[s] [ identifier[j] ], identifier[s12] [ identifier[int] ( identifier[j] / literal[int] )])
keyword[else] :
identifier[bool] =( identifier[s] [ identifier[i] ], identifier[s] [ identifier[i] + literal[int] ], identifier[s12] [ identifier[SA12] [ identifier[t] ]- identifier[n0] + literal[int] ])<=( identifier[s] [ identifier[j] ], identifier[s] [ identifier[j] + literal[int] ], identifier[s12] [ identifier[int] ( identifier[j] / literal[int] )+ identifier[n0] ])
keyword[if] ( identifier[bool] ):
identifier[SA] [ identifier[k] ]= identifier[i]
identifier[t] += literal[int]
keyword[if] identifier[t] == identifier[n02] :
identifier[k] += literal[int]
keyword[while] identifier[p] < identifier[n0] :
identifier[SA] [ identifier[k] ]= identifier[SA0] [ identifier[p] ]
identifier[p] += literal[int]
identifier[k] += literal[int]
keyword[else] :
identifier[SA] [ identifier[k] ]= identifier[j]
identifier[p] += literal[int]
keyword[if] identifier[p] == identifier[n0] :
identifier[k] += literal[int]
keyword[while] identifier[t] < identifier[n02] :
keyword[if] identifier[SA12] [ identifier[t] ]< identifier[n0] :
identifier[SA] [ identifier[k] ]=( identifier[SA12] [ identifier[t] ]* literal[int] )+ literal[int]
keyword[else] :
identifier[SA] [ identifier[k] ]=(( identifier[SA12] [ identifier[t] ]- identifier[n0] )* literal[int] )+ literal[int]
identifier[t] += literal[int]
identifier[k] += literal[int]
identifier[k] += literal[int]
keyword[return] identifier[operations] | def _suffixArrayWithTrace(s, SA, n, K, operations, totalOperations):
"""
This function is a rewrite in Python of the C implementation proposed in Kärkkäinen and Sanders paper.
Find the suffix array SA of s[0..n-1] in {1..K}^n
Require s[n]=s[n+1]=s[n+2]=0, n>=2
"""
if _trace:
_traceSuffixArray(operations, totalOperations) # depends on [control=['if'], data=[]]
n0 = (n + 2) // 3
n1 = (n + 1) // 3
n2 = n // 3
n02 = n0 + n2
SA12 = _array('i', [0] * (n02 + 3))
SA0 = _array('i', [0] * n0)
s0 = _array('i', [0] * n0)
# s12 : positions of mod 1 and mod 2 suffixes
s12 = _array('i', [i for i in range(n + (n0 - n1)) if i % 3]) # <- writing i%3 is more efficient than i%3!=0
s12.extend([0] * 3)
# lsb radix sort the mod 1 and mod 2 triples
_radixPass(s12, SA12, s[2:], n02, K)
if _trace:
operations += n02
_traceSuffixArray(operations, totalOperations) # depends on [control=['if'], data=[]]
_radixPass(SA12, s12, s[1:], n02, K)
if _trace:
operations += n02
_traceSuffixArray(operations, totalOperations) # depends on [control=['if'], data=[]]
_radixPass(s12, SA12, s, n02, K)
if _trace:
operations += n02
_traceSuffixArray(operations, totalOperations) # depends on [control=['if'], data=[]]
# find lexicographic names of triples
name = 0
c = _array('i', [-1] * 3)
for i in range(n02):
cSA12 = s[SA12[i]:SA12[i] + 3]
if cSA12 != c:
name += 1
c = cSA12 # depends on [control=['if'], data=['cSA12', 'c']]
if SA12[i] % 3 == 1:
s12[SA12[i] // 3] = name # left half # depends on [control=['if'], data=[]]
else:
s12[SA12[i] // 3 + n0] = name # right half # depends on [control=['for'], data=['i']]
if name < n02: # recurse if names are not yet unique
operations = _suffixArrayWithTrace(s12, SA12, n02, name + 1, operations, totalOperations)
if _trace:
_traceSuffixArray(operations, totalOperations) # depends on [control=['if'], data=[]]
# store unique names in s12 using the suffix array
for (i, SA12_i) in enumerate(SA12[:n02]):
s12[SA12_i] = i + 1 # depends on [control=['for'], data=[]] # depends on [control=['if'], data=['name', 'n02']]
else: # generate the suffix array of s12 directly
if _trace:
operations += _nbOperations(n02)
_traceSuffixArray(operations, totalOperations) # depends on [control=['if'], data=[]]
for (i, s12_i) in enumerate(s12[:n02]):
SA12[s12_i - 1] = i # depends on [control=['for'], data=[]]
# stably sort the mod 0 suffixes from SA12 by their first character
j = 0
for SA12_i in SA12[:n02]:
if SA12_i < n0:
s0[j] = 3 * SA12_i
j += 1 # depends on [control=['if'], data=['SA12_i']] # depends on [control=['for'], data=['SA12_i']]
_radixPass(s0, SA0, s, n0, K)
if _trace:
operations += n0
_traceSuffixArray(operations, totalOperations) # depends on [control=['if'], data=[]]
# merge sorted SA0 suffixes and sorted SA12 suffixes
p = j = k = 0
t = n0 - n1
while k < n:
if SA12[t] < n0: # pos of current offset 12 suffix
i = SA12[t] * 3 + 1 # depends on [control=['if'], data=[]]
else:
i = (SA12[t] - n0) * 3 + 2
j = SA0[p] # pos of current offset 0 suffix
if SA12[t] < n0:
bool = (s[i], s12[SA12[t] + n0]) <= (s[j], s12[int(j / 3)]) # depends on [control=['if'], data=['n0']]
else:
bool = (s[i], s[i + 1], s12[SA12[t] - n0 + 1]) <= (s[j], s[j + 1], s12[int(j / 3) + n0])
if bool:
SA[k] = i
t += 1
if t == n02: # done --- only SA0 suffixes left
k += 1
while p < n0:
SA[k] = SA0[p]
p += 1
k += 1 # depends on [control=['while'], data=['p']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
SA[k] = j
p += 1
if p == n0: # done --- only SA12 suffixes left
k += 1
while t < n02:
if SA12[t] < n0: # pos of current offset 12 suffix
SA[k] = SA12[t] * 3 + 1 # depends on [control=['if'], data=[]]
else:
SA[k] = (SA12[t] - n0) * 3 + 2
t += 1
k += 1 # depends on [control=['while'], data=['t']] # depends on [control=['if'], data=['n0']]
k += 1 # depends on [control=['while'], data=['k']]
return operations |
def get_folder(self, title):
"""
Retrieve a folder by its title
Usage: C{engine.get_folder(title)}
Note that if more than one folder has the same title, only the first match will be
returned.
"""
for folder in self.configManager.allFolders:
if folder.title == title:
return folder
return None | def function[get_folder, parameter[self, title]]:
constant[
Retrieve a folder by its title
Usage: C{engine.get_folder(title)}
Note that if more than one folder has the same title, only the first match will be
returned.
]
for taget[name[folder]] in starred[name[self].configManager.allFolders] begin[:]
if compare[name[folder].title equal[==] name[title]] begin[:]
return[name[folder]]
return[constant[None]] | keyword[def] identifier[get_folder] ( identifier[self] , identifier[title] ):
literal[string]
keyword[for] identifier[folder] keyword[in] identifier[self] . identifier[configManager] . identifier[allFolders] :
keyword[if] identifier[folder] . identifier[title] == identifier[title] :
keyword[return] identifier[folder]
keyword[return] keyword[None] | def get_folder(self, title):
"""
Retrieve a folder by its title
Usage: C{engine.get_folder(title)}
Note that if more than one folder has the same title, only the first match will be
returned.
"""
for folder in self.configManager.allFolders:
if folder.title == title:
return folder # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['folder']]
return None |
def read_actions():
"""Yields actions for pressed keys."""
while True:
key = get_key()
# Handle arrows, j/k (qwerty), and n/e (colemak)
if key in (const.KEY_UP, const.KEY_CTRL_N, 'k', 'e'):
yield const.ACTION_PREVIOUS
elif key in (const.KEY_DOWN, const.KEY_CTRL_P, 'j', 'n'):
yield const.ACTION_NEXT
elif key in (const.KEY_CTRL_C, 'q'):
yield const.ACTION_ABORT
elif key in ('\n', '\r'):
yield const.ACTION_SELECT | def function[read_actions, parameter[]]:
constant[Yields actions for pressed keys.]
while constant[True] begin[:]
variable[key] assign[=] call[name[get_key], parameter[]]
if compare[name[key] in tuple[[<ast.Attribute object at 0x7da1b1dd9e10>, <ast.Attribute object at 0x7da1b1dd85b0>, <ast.Constant object at 0x7da1b1ddbf70>, <ast.Constant object at 0x7da1b1ddafb0>]]] begin[:]
<ast.Yield object at 0x7da1b1dda380> | keyword[def] identifier[read_actions] ():
literal[string]
keyword[while] keyword[True] :
identifier[key] = identifier[get_key] ()
keyword[if] identifier[key] keyword[in] ( identifier[const] . identifier[KEY_UP] , identifier[const] . identifier[KEY_CTRL_N] , literal[string] , literal[string] ):
keyword[yield] identifier[const] . identifier[ACTION_PREVIOUS]
keyword[elif] identifier[key] keyword[in] ( identifier[const] . identifier[KEY_DOWN] , identifier[const] . identifier[KEY_CTRL_P] , literal[string] , literal[string] ):
keyword[yield] identifier[const] . identifier[ACTION_NEXT]
keyword[elif] identifier[key] keyword[in] ( identifier[const] . identifier[KEY_CTRL_C] , literal[string] ):
keyword[yield] identifier[const] . identifier[ACTION_ABORT]
keyword[elif] identifier[key] keyword[in] ( literal[string] , literal[string] ):
keyword[yield] identifier[const] . identifier[ACTION_SELECT] | def read_actions():
"""Yields actions for pressed keys."""
while True:
key = get_key()
# Handle arrows, j/k (qwerty), and n/e (colemak)
if key in (const.KEY_UP, const.KEY_CTRL_N, 'k', 'e'):
yield const.ACTION_PREVIOUS # depends on [control=['if'], data=[]]
elif key in (const.KEY_DOWN, const.KEY_CTRL_P, 'j', 'n'):
yield const.ACTION_NEXT # depends on [control=['if'], data=[]]
elif key in (const.KEY_CTRL_C, 'q'):
yield const.ACTION_ABORT # depends on [control=['if'], data=[]]
elif key in ('\n', '\r'):
yield const.ACTION_SELECT # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] |
def write_to(self, group, append=False):
"""Writes the properties to a `group`, or append it"""
data = self.data
if append is True:
try:
# concatenate original and new properties in a single list
original = read_properties(group)
data = original + data
except EOFError:
pass # no former data to append on
# h5py does not support embedded NULLs in strings ('\x00')
data = pickle.dumps(data).replace(b'\x00', b'__NULL__')
group['properties'][...] = np.void(data) | def function[write_to, parameter[self, group, append]]:
constant[Writes the properties to a `group`, or append it]
variable[data] assign[=] name[self].data
if compare[name[append] is constant[True]] begin[:]
<ast.Try object at 0x7da1b0e9c790>
variable[data] assign[=] call[call[name[pickle].dumps, parameter[name[data]]].replace, parameter[constant[b'\x00'], constant[b'__NULL__']]]
call[call[name[group]][constant[properties]]][constant[Ellipsis]] assign[=] call[name[np].void, parameter[name[data]]] | keyword[def] identifier[write_to] ( identifier[self] , identifier[group] , identifier[append] = keyword[False] ):
literal[string]
identifier[data] = identifier[self] . identifier[data]
keyword[if] identifier[append] keyword[is] keyword[True] :
keyword[try] :
identifier[original] = identifier[read_properties] ( identifier[group] )
identifier[data] = identifier[original] + identifier[data]
keyword[except] identifier[EOFError] :
keyword[pass]
identifier[data] = identifier[pickle] . identifier[dumps] ( identifier[data] ). identifier[replace] ( literal[string] , literal[string] )
identifier[group] [ literal[string] ][...]= identifier[np] . identifier[void] ( identifier[data] ) | def write_to(self, group, append=False):
"""Writes the properties to a `group`, or append it"""
data = self.data
if append is True:
try:
# concatenate original and new properties in a single list
original = read_properties(group)
data = original + data # depends on [control=['try'], data=[]]
except EOFError:
pass # no former data to append on # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
# h5py does not support embedded NULLs in strings ('\x00')
data = pickle.dumps(data).replace(b'\x00', b'__NULL__')
group['properties'][...] = np.void(data) |
def _send_str(self, cmd, args):
"""
Format:
{Command}{args length(little endian)}{str}
Length:
{4}{4}{str length}
"""
logger.debug("{} {}".format(cmd, args))
args = args.encode('utf-8')
le_args_len = self._little_endian(len(args))
data = cmd.encode() + le_args_len + args
logger.debug("Send string: {}".format(data))
self.connection.write(data) | def function[_send_str, parameter[self, cmd, args]]:
constant[
Format:
{Command}{args length(little endian)}{str}
Length:
{4}{4}{str length}
]
call[name[logger].debug, parameter[call[constant[{} {}].format, parameter[name[cmd], name[args]]]]]
variable[args] assign[=] call[name[args].encode, parameter[constant[utf-8]]]
variable[le_args_len] assign[=] call[name[self]._little_endian, parameter[call[name[len], parameter[name[args]]]]]
variable[data] assign[=] binary_operation[binary_operation[call[name[cmd].encode, parameter[]] + name[le_args_len]] + name[args]]
call[name[logger].debug, parameter[call[constant[Send string: {}].format, parameter[name[data]]]]]
call[name[self].connection.write, parameter[name[data]]] | keyword[def] identifier[_send_str] ( identifier[self] , identifier[cmd] , identifier[args] ):
literal[string]
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[cmd] , identifier[args] ))
identifier[args] = identifier[args] . identifier[encode] ( literal[string] )
identifier[le_args_len] = identifier[self] . identifier[_little_endian] ( identifier[len] ( identifier[args] ))
identifier[data] = identifier[cmd] . identifier[encode] ()+ identifier[le_args_len] + identifier[args]
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[data] ))
identifier[self] . identifier[connection] . identifier[write] ( identifier[data] ) | def _send_str(self, cmd, args):
"""
Format:
{Command}{args length(little endian)}{str}
Length:
{4}{4}{str length}
"""
logger.debug('{} {}'.format(cmd, args))
args = args.encode('utf-8')
le_args_len = self._little_endian(len(args))
data = cmd.encode() + le_args_len + args
logger.debug('Send string: {}'.format(data))
self.connection.write(data) |
def daemonize(self, log_into, after_app_loading=False):
"""Daemonize uWSGI.
:param str|unicode log_into: Logging destination:
* File: /tmp/mylog.log
* UPD: 192.168.1.2:1717
.. note:: This will require an UDP server to manage log messages.
Use ``networking.register_socket('192.168.1.2:1717, type=networking.SOCK_UDP)``
to start uWSGI UDP server.
:param str|unicode bool after_app_loading: Whether to daemonize after
or before applications loading.
"""
self._set('daemonize2' if after_app_loading else 'daemonize', log_into)
return self._section | def function[daemonize, parameter[self, log_into, after_app_loading]]:
constant[Daemonize uWSGI.
:param str|unicode log_into: Logging destination:
* File: /tmp/mylog.log
* UPD: 192.168.1.2:1717
.. note:: This will require an UDP server to manage log messages.
Use ``networking.register_socket('192.168.1.2:1717, type=networking.SOCK_UDP)``
to start uWSGI UDP server.
:param str|unicode bool after_app_loading: Whether to daemonize after
or before applications loading.
]
call[name[self]._set, parameter[<ast.IfExp object at 0x7da1b10d4310>, name[log_into]]]
return[name[self]._section] | keyword[def] identifier[daemonize] ( identifier[self] , identifier[log_into] , identifier[after_app_loading] = keyword[False] ):
literal[string]
identifier[self] . identifier[_set] ( literal[string] keyword[if] identifier[after_app_loading] keyword[else] literal[string] , identifier[log_into] )
keyword[return] identifier[self] . identifier[_section] | def daemonize(self, log_into, after_app_loading=False):
"""Daemonize uWSGI.
:param str|unicode log_into: Logging destination:
* File: /tmp/mylog.log
* UPD: 192.168.1.2:1717
.. note:: This will require an UDP server to manage log messages.
Use ``networking.register_socket('192.168.1.2:1717, type=networking.SOCK_UDP)``
to start uWSGI UDP server.
:param str|unicode bool after_app_loading: Whether to daemonize after
or before applications loading.
"""
self._set('daemonize2' if after_app_loading else 'daemonize', log_into)
return self._section |
def delete(self, filename):
"""Remove the metadata from the given filename."""
self._failed_atoms.clear()
self.clear()
self.save(filename, padding=lambda x: 0) | def function[delete, parameter[self, filename]]:
constant[Remove the metadata from the given filename.]
call[name[self]._failed_atoms.clear, parameter[]]
call[name[self].clear, parameter[]]
call[name[self].save, parameter[name[filename]]] | keyword[def] identifier[delete] ( identifier[self] , identifier[filename] ):
literal[string]
identifier[self] . identifier[_failed_atoms] . identifier[clear] ()
identifier[self] . identifier[clear] ()
identifier[self] . identifier[save] ( identifier[filename] , identifier[padding] = keyword[lambda] identifier[x] : literal[int] ) | def delete(self, filename):
"""Remove the metadata from the given filename."""
self._failed_atoms.clear()
self.clear()
self.save(filename, padding=lambda x: 0) |
def writable_path(path):
"""Test whether a path can be written to.
"""
if os.path.exists(path):
return os.access(path, os.W_OK)
try:
with open(path, 'w'):
pass
except (OSError, IOError):
return False
else:
os.remove(path)
return True | def function[writable_path, parameter[path]]:
constant[Test whether a path can be written to.
]
if call[name[os].path.exists, parameter[name[path]]] begin[:]
return[call[name[os].access, parameter[name[path], name[os].W_OK]]]
<ast.Try object at 0x7da1b021f400> | keyword[def] identifier[writable_path] ( identifier[path] ):
literal[string]
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[path] ):
keyword[return] identifier[os] . identifier[access] ( identifier[path] , identifier[os] . identifier[W_OK] )
keyword[try] :
keyword[with] identifier[open] ( identifier[path] , literal[string] ):
keyword[pass]
keyword[except] ( identifier[OSError] , identifier[IOError] ):
keyword[return] keyword[False]
keyword[else] :
identifier[os] . identifier[remove] ( identifier[path] )
keyword[return] keyword[True] | def writable_path(path):
"""Test whether a path can be written to.
"""
if os.path.exists(path):
return os.access(path, os.W_OK) # depends on [control=['if'], data=[]]
try:
with open(path, 'w'):
pass # depends on [control=['with'], data=[]] # depends on [control=['try'], data=[]]
except (OSError, IOError):
return False # depends on [control=['except'], data=[]]
else:
os.remove(path)
return True |
def get_qualification_score(self, qualification_type_id, worker_id):
"""TODO: Document."""
params = {'QualificationTypeId' : qualification_type_id,
'SubjectId' : worker_id}
return self._process_request('GetQualificationScore', params,
[('Qualification', Qualification),]) | def function[get_qualification_score, parameter[self, qualification_type_id, worker_id]]:
constant[TODO: Document.]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b265a380>, <ast.Constant object at 0x7da1b2659cc0>], [<ast.Name object at 0x7da1b265a920>, <ast.Name object at 0x7da1b2658130>]]
return[call[name[self]._process_request, parameter[constant[GetQualificationScore], name[params], list[[<ast.Tuple object at 0x7da1b265ac80>]]]]] | keyword[def] identifier[get_qualification_score] ( identifier[self] , identifier[qualification_type_id] , identifier[worker_id] ):
literal[string]
identifier[params] ={ literal[string] : identifier[qualification_type_id] ,
literal[string] : identifier[worker_id] }
keyword[return] identifier[self] . identifier[_process_request] ( literal[string] , identifier[params] ,
[( literal[string] , identifier[Qualification] ),]) | def get_qualification_score(self, qualification_type_id, worker_id):
"""TODO: Document."""
params = {'QualificationTypeId': qualification_type_id, 'SubjectId': worker_id}
return self._process_request('GetQualificationScore', params, [('Qualification', Qualification)]) |
def get_data(self, doi_id, idx):
"""
Resolve DOI and compile all attributes into one dictionary
:param str doi_id:
:param int idx: Publication index
:return dict: Updated publication dictionary
"""
tmp_dict = self.root_dict['pub'][0].copy()
try:
# Send request to grab metadata at URL
url = "http://dx.doi.org/" + doi_id
headers = {"accept": "application/rdf+xml;q=0.5, application/citeproc+json;q=1.0"}
r = requests.get(url, headers=headers)
# DOI 404. Data not retrieved. Log and return original pub
if r.status_code == 400:
logger_doi_resolver.warn("doi.org STATUS: 404, {}".format(doi_id))
# Ignore other status codes. Run when status is 200 (good response)
elif r.status_code == 200:
logger_doi_resolver.info("doi.org STATUS: 200")
# Load data from http response
raw = json.loads(r.text)
# Create a new pub dictionary with metadata received
fetch_dict = self.compile_fetch(raw, doi_id)
# Compare the two pubs. Overwrite old data with new data where applicable
tmp_dict = self.compare_replace(tmp_dict, fetch_dict)
tmp_dict['pubDataUrl'] = 'doi.org'
self.root_dict['pub'][idx] = tmp_dict
except urllib.error.URLError as e:
logger_doi_resolver.warn("get_data: URLError: malformed doi: {}, {}".format(doi_id, e))
except ValueError as e:
logger_doi_resolver.warn("get_data: ValueError: cannot resolve dois from this publisher: {}, {}".format(doi_id, e))
return | def function[get_data, parameter[self, doi_id, idx]]:
constant[
Resolve DOI and compile all attributes into one dictionary
:param str doi_id:
:param int idx: Publication index
:return dict: Updated publication dictionary
]
variable[tmp_dict] assign[=] call[call[call[name[self].root_dict][constant[pub]]][constant[0]].copy, parameter[]]
<ast.Try object at 0x7da20c6c7eb0>
return[None] | keyword[def] identifier[get_data] ( identifier[self] , identifier[doi_id] , identifier[idx] ):
literal[string]
identifier[tmp_dict] = identifier[self] . identifier[root_dict] [ literal[string] ][ literal[int] ]. identifier[copy] ()
keyword[try] :
identifier[url] = literal[string] + identifier[doi_id]
identifier[headers] ={ literal[string] : literal[string] }
identifier[r] = identifier[requests] . identifier[get] ( identifier[url] , identifier[headers] = identifier[headers] )
keyword[if] identifier[r] . identifier[status_code] == literal[int] :
identifier[logger_doi_resolver] . identifier[warn] ( literal[string] . identifier[format] ( identifier[doi_id] ))
keyword[elif] identifier[r] . identifier[status_code] == literal[int] :
identifier[logger_doi_resolver] . identifier[info] ( literal[string] )
identifier[raw] = identifier[json] . identifier[loads] ( identifier[r] . identifier[text] )
identifier[fetch_dict] = identifier[self] . identifier[compile_fetch] ( identifier[raw] , identifier[doi_id] )
identifier[tmp_dict] = identifier[self] . identifier[compare_replace] ( identifier[tmp_dict] , identifier[fetch_dict] )
identifier[tmp_dict] [ literal[string] ]= literal[string]
identifier[self] . identifier[root_dict] [ literal[string] ][ identifier[idx] ]= identifier[tmp_dict]
keyword[except] identifier[urllib] . identifier[error] . identifier[URLError] keyword[as] identifier[e] :
identifier[logger_doi_resolver] . identifier[warn] ( literal[string] . identifier[format] ( identifier[doi_id] , identifier[e] ))
keyword[except] identifier[ValueError] keyword[as] identifier[e] :
identifier[logger_doi_resolver] . identifier[warn] ( literal[string] . identifier[format] ( identifier[doi_id] , identifier[e] ))
keyword[return] | def get_data(self, doi_id, idx):
"""
Resolve DOI and compile all attributes into one dictionary
:param str doi_id:
:param int idx: Publication index
:return dict: Updated publication dictionary
"""
tmp_dict = self.root_dict['pub'][0].copy()
try:
# Send request to grab metadata at URL
url = 'http://dx.doi.org/' + doi_id
headers = {'accept': 'application/rdf+xml;q=0.5, application/citeproc+json;q=1.0'}
r = requests.get(url, headers=headers)
# DOI 404. Data not retrieved. Log and return original pub
if r.status_code == 400:
logger_doi_resolver.warn('doi.org STATUS: 404, {}'.format(doi_id)) # depends on [control=['if'], data=[]]
# Ignore other status codes. Run when status is 200 (good response)
elif r.status_code == 200:
logger_doi_resolver.info('doi.org STATUS: 200')
# Load data from http response
raw = json.loads(r.text)
# Create a new pub dictionary with metadata received
fetch_dict = self.compile_fetch(raw, doi_id)
# Compare the two pubs. Overwrite old data with new data where applicable
tmp_dict = self.compare_replace(tmp_dict, fetch_dict)
tmp_dict['pubDataUrl'] = 'doi.org'
self.root_dict['pub'][idx] = tmp_dict # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except urllib.error.URLError as e:
logger_doi_resolver.warn('get_data: URLError: malformed doi: {}, {}'.format(doi_id, e)) # depends on [control=['except'], data=['e']]
except ValueError as e:
logger_doi_resolver.warn('get_data: ValueError: cannot resolve dois from this publisher: {}, {}'.format(doi_id, e)) # depends on [control=['except'], data=['e']]
return |
def _colorize(self, depth_im, color_im):
"""Colorize a depth image from the PhoXi using a color image from the webcam.
Parameters
----------
depth_im : DepthImage
The PhoXi depth image.
color_im : ColorImage
Corresponding color image.
Returns
-------
ColorImage
A colorized image corresponding to the PhoXi depth image.
"""
# Project the point cloud into the webcam's frame
target_shape = (depth_im.data.shape[0], depth_im.data.shape[1], 3)
pc_depth = self._phoxi.ir_intrinsics.deproject(depth_im)
pc_color = self._T_webcam_world.inverse().dot(self._T_phoxi_world).apply(pc_depth)
# Sort the points by their distance from the webcam's apeture
pc_data = pc_color.data.T
dists = np.linalg.norm(pc_data, axis=1)
order = np.argsort(dists)
pc_data = pc_data[order]
pc_color = PointCloud(pc_data.T, frame=self._webcam.color_intrinsics.frame)
sorted_dists = dists[order]
sorted_depths = depth_im.data.flatten()[order]
# Generate image coordinates for each sorted point
icds = self._webcam.color_intrinsics.project(pc_color).data.T
# Create mask for points that are masked by others
rounded_icds = np.array(icds / 3.0, dtype=np.uint32)
unique_icds, unique_inds, unique_inv = np.unique(rounded_icds, axis=0, return_index=True, return_inverse=True)
icd_depths = sorted_dists[unique_inds]
min_depths_pp = icd_depths[unique_inv]
depth_delta_mask = np.abs(min_depths_pp - sorted_dists) < 5e-3
# Create mask for points with missing depth or that lie outside the image
valid_mask = np.logical_and(np.logical_and(icds[:,0] >= 0, icds[:,0] < self._webcam.color_intrinsics.width),
np.logical_and(icds[:,1] >= 0, icds[:,1] < self._webcam.color_intrinsics.height))
valid_mask = np.logical_and(valid_mask, sorted_depths != 0.0)
valid_mask = np.logical_and(valid_mask, depth_delta_mask)
valid_icds = icds[valid_mask]
colors = color_im.data[valid_icds[:,1],valid_icds[:,0],:]
color_im_data = np.zeros((target_shape[0] * target_shape[1], target_shape[2]), dtype=np.uint8)
color_im_data[valid_mask] = colors
color_im_data[order] = color_im_data.copy()
color_im_data = color_im_data.reshape(target_shape)
return ColorImage(color_im_data, frame=self._frame) | def function[_colorize, parameter[self, depth_im, color_im]]:
constant[Colorize a depth image from the PhoXi using a color image from the webcam.
Parameters
----------
depth_im : DepthImage
The PhoXi depth image.
color_im : ColorImage
Corresponding color image.
Returns
-------
ColorImage
A colorized image corresponding to the PhoXi depth image.
]
variable[target_shape] assign[=] tuple[[<ast.Subscript object at 0x7da1b056c4f0>, <ast.Subscript object at 0x7da1b056e1d0>, <ast.Constant object at 0x7da1b056c340>]]
variable[pc_depth] assign[=] call[name[self]._phoxi.ir_intrinsics.deproject, parameter[name[depth_im]]]
variable[pc_color] assign[=] call[call[call[name[self]._T_webcam_world.inverse, parameter[]].dot, parameter[name[self]._T_phoxi_world]].apply, parameter[name[pc_depth]]]
variable[pc_data] assign[=] name[pc_color].data.T
variable[dists] assign[=] call[name[np].linalg.norm, parameter[name[pc_data]]]
variable[order] assign[=] call[name[np].argsort, parameter[name[dists]]]
variable[pc_data] assign[=] call[name[pc_data]][name[order]]
variable[pc_color] assign[=] call[name[PointCloud], parameter[name[pc_data].T]]
variable[sorted_dists] assign[=] call[name[dists]][name[order]]
variable[sorted_depths] assign[=] call[call[name[depth_im].data.flatten, parameter[]]][name[order]]
variable[icds] assign[=] call[name[self]._webcam.color_intrinsics.project, parameter[name[pc_color]]].data.T
variable[rounded_icds] assign[=] call[name[np].array, parameter[binary_operation[name[icds] / constant[3.0]]]]
<ast.Tuple object at 0x7da1b04b24a0> assign[=] call[name[np].unique, parameter[name[rounded_icds]]]
variable[icd_depths] assign[=] call[name[sorted_dists]][name[unique_inds]]
variable[min_depths_pp] assign[=] call[name[icd_depths]][name[unique_inv]]
variable[depth_delta_mask] assign[=] compare[call[name[np].abs, parameter[binary_operation[name[min_depths_pp] - name[sorted_dists]]]] less[<] constant[0.005]]
variable[valid_mask] assign[=] call[name[np].logical_and, parameter[call[name[np].logical_and, parameter[compare[call[name[icds]][tuple[[<ast.Slice object at 0x7da1b0592650>, <ast.Constant object at 0x7da1b0591d20>]]] greater_or_equal[>=] constant[0]], compare[call[name[icds]][tuple[[<ast.Slice object at 0x7da1b05931f0>, <ast.Constant object at 0x7da1b0592110>]]] less[<] name[self]._webcam.color_intrinsics.width]]], call[name[np].logical_and, parameter[compare[call[name[icds]][tuple[[<ast.Slice object at 0x7da1b0590310>, <ast.Constant object at 0x7da1b05925c0>]]] greater_or_equal[>=] constant[0]], compare[call[name[icds]][tuple[[<ast.Slice object at 0x7da1b05925f0>, <ast.Constant object at 0x7da1b0590fa0>]]] less[<] name[self]._webcam.color_intrinsics.height]]]]]
variable[valid_mask] assign[=] call[name[np].logical_and, parameter[name[valid_mask], compare[name[sorted_depths] not_equal[!=] constant[0.0]]]]
variable[valid_mask] assign[=] call[name[np].logical_and, parameter[name[valid_mask], name[depth_delta_mask]]]
variable[valid_icds] assign[=] call[name[icds]][name[valid_mask]]
variable[colors] assign[=] call[name[color_im].data][tuple[[<ast.Subscript object at 0x7da1b0593640>, <ast.Subscript object at 0x7da1b0592fb0>, <ast.Slice object at 0x7da1b0591b10>]]]
variable[color_im_data] assign[=] call[name[np].zeros, parameter[tuple[[<ast.BinOp object at 0x7da1b0592aa0>, <ast.Subscript object at 0x7da20c9910c0>]]]]
call[name[color_im_data]][name[valid_mask]] assign[=] name[colors]
call[name[color_im_data]][name[order]] assign[=] call[name[color_im_data].copy, parameter[]]
variable[color_im_data] assign[=] call[name[color_im_data].reshape, parameter[name[target_shape]]]
return[call[name[ColorImage], parameter[name[color_im_data]]]] | keyword[def] identifier[_colorize] ( identifier[self] , identifier[depth_im] , identifier[color_im] ):
literal[string]
identifier[target_shape] =( identifier[depth_im] . identifier[data] . identifier[shape] [ literal[int] ], identifier[depth_im] . identifier[data] . identifier[shape] [ literal[int] ], literal[int] )
identifier[pc_depth] = identifier[self] . identifier[_phoxi] . identifier[ir_intrinsics] . identifier[deproject] ( identifier[depth_im] )
identifier[pc_color] = identifier[self] . identifier[_T_webcam_world] . identifier[inverse] (). identifier[dot] ( identifier[self] . identifier[_T_phoxi_world] ). identifier[apply] ( identifier[pc_depth] )
identifier[pc_data] = identifier[pc_color] . identifier[data] . identifier[T]
identifier[dists] = identifier[np] . identifier[linalg] . identifier[norm] ( identifier[pc_data] , identifier[axis] = literal[int] )
identifier[order] = identifier[np] . identifier[argsort] ( identifier[dists] )
identifier[pc_data] = identifier[pc_data] [ identifier[order] ]
identifier[pc_color] = identifier[PointCloud] ( identifier[pc_data] . identifier[T] , identifier[frame] = identifier[self] . identifier[_webcam] . identifier[color_intrinsics] . identifier[frame] )
identifier[sorted_dists] = identifier[dists] [ identifier[order] ]
identifier[sorted_depths] = identifier[depth_im] . identifier[data] . identifier[flatten] ()[ identifier[order] ]
identifier[icds] = identifier[self] . identifier[_webcam] . identifier[color_intrinsics] . identifier[project] ( identifier[pc_color] ). identifier[data] . identifier[T]
identifier[rounded_icds] = identifier[np] . identifier[array] ( identifier[icds] / literal[int] , identifier[dtype] = identifier[np] . identifier[uint32] )
identifier[unique_icds] , identifier[unique_inds] , identifier[unique_inv] = identifier[np] . identifier[unique] ( identifier[rounded_icds] , identifier[axis] = literal[int] , identifier[return_index] = keyword[True] , identifier[return_inverse] = keyword[True] )
identifier[icd_depths] = identifier[sorted_dists] [ identifier[unique_inds] ]
identifier[min_depths_pp] = identifier[icd_depths] [ identifier[unique_inv] ]
identifier[depth_delta_mask] = identifier[np] . identifier[abs] ( identifier[min_depths_pp] - identifier[sorted_dists] )< literal[int]
identifier[valid_mask] = identifier[np] . identifier[logical_and] ( identifier[np] . identifier[logical_and] ( identifier[icds] [:, literal[int] ]>= literal[int] , identifier[icds] [:, literal[int] ]< identifier[self] . identifier[_webcam] . identifier[color_intrinsics] . identifier[width] ),
identifier[np] . identifier[logical_and] ( identifier[icds] [:, literal[int] ]>= literal[int] , identifier[icds] [:, literal[int] ]< identifier[self] . identifier[_webcam] . identifier[color_intrinsics] . identifier[height] ))
identifier[valid_mask] = identifier[np] . identifier[logical_and] ( identifier[valid_mask] , identifier[sorted_depths] != literal[int] )
identifier[valid_mask] = identifier[np] . identifier[logical_and] ( identifier[valid_mask] , identifier[depth_delta_mask] )
identifier[valid_icds] = identifier[icds] [ identifier[valid_mask] ]
identifier[colors] = identifier[color_im] . identifier[data] [ identifier[valid_icds] [:, literal[int] ], identifier[valid_icds] [:, literal[int] ],:]
identifier[color_im_data] = identifier[np] . identifier[zeros] (( identifier[target_shape] [ literal[int] ]* identifier[target_shape] [ literal[int] ], identifier[target_shape] [ literal[int] ]), identifier[dtype] = identifier[np] . identifier[uint8] )
identifier[color_im_data] [ identifier[valid_mask] ]= identifier[colors]
identifier[color_im_data] [ identifier[order] ]= identifier[color_im_data] . identifier[copy] ()
identifier[color_im_data] = identifier[color_im_data] . identifier[reshape] ( identifier[target_shape] )
keyword[return] identifier[ColorImage] ( identifier[color_im_data] , identifier[frame] = identifier[self] . identifier[_frame] ) | def _colorize(self, depth_im, color_im):
"""Colorize a depth image from the PhoXi using a color image from the webcam.
Parameters
----------
depth_im : DepthImage
The PhoXi depth image.
color_im : ColorImage
Corresponding color image.
Returns
-------
ColorImage
A colorized image corresponding to the PhoXi depth image.
"""
# Project the point cloud into the webcam's frame
target_shape = (depth_im.data.shape[0], depth_im.data.shape[1], 3)
pc_depth = self._phoxi.ir_intrinsics.deproject(depth_im)
pc_color = self._T_webcam_world.inverse().dot(self._T_phoxi_world).apply(pc_depth)
# Sort the points by their distance from the webcam's apeture
pc_data = pc_color.data.T
dists = np.linalg.norm(pc_data, axis=1)
order = np.argsort(dists)
pc_data = pc_data[order]
pc_color = PointCloud(pc_data.T, frame=self._webcam.color_intrinsics.frame)
sorted_dists = dists[order]
sorted_depths = depth_im.data.flatten()[order]
# Generate image coordinates for each sorted point
icds = self._webcam.color_intrinsics.project(pc_color).data.T
# Create mask for points that are masked by others
rounded_icds = np.array(icds / 3.0, dtype=np.uint32)
(unique_icds, unique_inds, unique_inv) = np.unique(rounded_icds, axis=0, return_index=True, return_inverse=True)
icd_depths = sorted_dists[unique_inds]
min_depths_pp = icd_depths[unique_inv]
depth_delta_mask = np.abs(min_depths_pp - sorted_dists) < 0.005
# Create mask for points with missing depth or that lie outside the image
valid_mask = np.logical_and(np.logical_and(icds[:, 0] >= 0, icds[:, 0] < self._webcam.color_intrinsics.width), np.logical_and(icds[:, 1] >= 0, icds[:, 1] < self._webcam.color_intrinsics.height))
valid_mask = np.logical_and(valid_mask, sorted_depths != 0.0)
valid_mask = np.logical_and(valid_mask, depth_delta_mask)
valid_icds = icds[valid_mask]
colors = color_im.data[valid_icds[:, 1], valid_icds[:, 0], :]
color_im_data = np.zeros((target_shape[0] * target_shape[1], target_shape[2]), dtype=np.uint8)
color_im_data[valid_mask] = colors
color_im_data[order] = color_im_data.copy()
color_im_data = color_im_data.reshape(target_shape)
return ColorImage(color_im_data, frame=self._frame) |
def _extend_resources_paths():
"""
Extend resources paths.
"""
for path in (os.path.join(umbra.__path__[0], Constants.resources_directory),
os.path.join(os.getcwd(), umbra.__name__, Constants.resources_directory)):
path = os.path.normpath(path)
if foundations.common.path_exists(path):
path not in RuntimeGlobals.resources_directories and RuntimeGlobals.resources_directories.append(path) | def function[_extend_resources_paths, parameter[]]:
constant[
Extend resources paths.
]
for taget[name[path]] in starred[tuple[[<ast.Call object at 0x7da1b09b89a0>, <ast.Call object at 0x7da1b09b9930>]]] begin[:]
variable[path] assign[=] call[name[os].path.normpath, parameter[name[path]]]
if call[name[foundations].common.path_exists, parameter[name[path]]] begin[:]
<ast.BoolOp object at 0x7da1b09bbc40> | keyword[def] identifier[_extend_resources_paths] ():
literal[string]
keyword[for] identifier[path] keyword[in] ( identifier[os] . identifier[path] . identifier[join] ( identifier[umbra] . identifier[__path__] [ literal[int] ], identifier[Constants] . identifier[resources_directory] ),
identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[getcwd] (), identifier[umbra] . identifier[__name__] , identifier[Constants] . identifier[resources_directory] )):
identifier[path] = identifier[os] . identifier[path] . identifier[normpath] ( identifier[path] )
keyword[if] identifier[foundations] . identifier[common] . identifier[path_exists] ( identifier[path] ):
identifier[path] keyword[not] keyword[in] identifier[RuntimeGlobals] . identifier[resources_directories] keyword[and] identifier[RuntimeGlobals] . identifier[resources_directories] . identifier[append] ( identifier[path] ) | def _extend_resources_paths():
"""
Extend resources paths.
"""
for path in (os.path.join(umbra.__path__[0], Constants.resources_directory), os.path.join(os.getcwd(), umbra.__name__, Constants.resources_directory)):
path = os.path.normpath(path)
if foundations.common.path_exists(path):
path not in RuntimeGlobals.resources_directories and RuntimeGlobals.resources_directories.append(path) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['path']] |
def as_uni_field(field):
"""
Renders a form field like a django-uni-form field::
{% load uni_form_tags %}
{{ form.field|as_uni_field }}
"""
template = get_template('uni_form/field.html')
c = Context({'field':field})
return template.render(c) | def function[as_uni_field, parameter[field]]:
constant[
Renders a form field like a django-uni-form field::
{% load uni_form_tags %}
{{ form.field|as_uni_field }}
]
variable[template] assign[=] call[name[get_template], parameter[constant[uni_form/field.html]]]
variable[c] assign[=] call[name[Context], parameter[dictionary[[<ast.Constant object at 0x7da20c9936d0>], [<ast.Name object at 0x7da20c992560>]]]]
return[call[name[template].render, parameter[name[c]]]] | keyword[def] identifier[as_uni_field] ( identifier[field] ):
literal[string]
identifier[template] = identifier[get_template] ( literal[string] )
identifier[c] = identifier[Context] ({ literal[string] : identifier[field] })
keyword[return] identifier[template] . identifier[render] ( identifier[c] ) | def as_uni_field(field):
"""
Renders a form field like a django-uni-form field::
{% load uni_form_tags %}
{{ form.field|as_uni_field }}
"""
template = get_template('uni_form/field.html')
c = Context({'field': field})
return template.render(c) |
def install(self, **kwargs):
"""
Installs the app in the current user's account.
"""
if self._dxid is not None:
return dxpy.api.app_install(self._dxid, **kwargs)
else:
return dxpy.api.app_install('app-' + self._name, alias=self._alias, **kwargs) | def function[install, parameter[self]]:
constant[
Installs the app in the current user's account.
]
if compare[name[self]._dxid is_not constant[None]] begin[:]
return[call[name[dxpy].api.app_install, parameter[name[self]._dxid]]] | keyword[def] identifier[install] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[self] . identifier[_dxid] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[dxpy] . identifier[api] . identifier[app_install] ( identifier[self] . identifier[_dxid] ,** identifier[kwargs] )
keyword[else] :
keyword[return] identifier[dxpy] . identifier[api] . identifier[app_install] ( literal[string] + identifier[self] . identifier[_name] , identifier[alias] = identifier[self] . identifier[_alias] ,** identifier[kwargs] ) | def install(self, **kwargs):
"""
Installs the app in the current user's account.
"""
if self._dxid is not None:
return dxpy.api.app_install(self._dxid, **kwargs) # depends on [control=['if'], data=[]]
else:
return dxpy.api.app_install('app-' + self._name, alias=self._alias, **kwargs) |
def global_set(self, key, value):
"""Set ``key`` to ``value`` globally (not at any particular branch or
revision)
"""
(key, value) = map(self.pack, (key, value))
try:
return self.sql('global_insert', key, value)
except IntegrityError:
return self.sql('global_update', value, key) | def function[global_set, parameter[self, key, value]]:
constant[Set ``key`` to ``value`` globally (not at any particular branch or
revision)
]
<ast.Tuple object at 0x7da207f022c0> assign[=] call[name[map], parameter[name[self].pack, tuple[[<ast.Name object at 0x7da207f030a0>, <ast.Name object at 0x7da207f006a0>]]]]
<ast.Try object at 0x7da207f027a0> | keyword[def] identifier[global_set] ( identifier[self] , identifier[key] , identifier[value] ):
literal[string]
( identifier[key] , identifier[value] )= identifier[map] ( identifier[self] . identifier[pack] ,( identifier[key] , identifier[value] ))
keyword[try] :
keyword[return] identifier[self] . identifier[sql] ( literal[string] , identifier[key] , identifier[value] )
keyword[except] identifier[IntegrityError] :
keyword[return] identifier[self] . identifier[sql] ( literal[string] , identifier[value] , identifier[key] ) | def global_set(self, key, value):
"""Set ``key`` to ``value`` globally (not at any particular branch or
revision)
"""
(key, value) = map(self.pack, (key, value))
try:
return self.sql('global_insert', key, value) # depends on [control=['try'], data=[]]
except IntegrityError:
return self.sql('global_update', value, key) # depends on [control=['except'], data=[]] |
def subscribe(self, topic, channel):
"""Subscribe to a nsq `topic` and `channel`."""
self.send(nsq.subscribe(topic, channel)) | def function[subscribe, parameter[self, topic, channel]]:
constant[Subscribe to a nsq `topic` and `channel`.]
call[name[self].send, parameter[call[name[nsq].subscribe, parameter[name[topic], name[channel]]]]] | keyword[def] identifier[subscribe] ( identifier[self] , identifier[topic] , identifier[channel] ):
literal[string]
identifier[self] . identifier[send] ( identifier[nsq] . identifier[subscribe] ( identifier[topic] , identifier[channel] )) | def subscribe(self, topic, channel):
"""Subscribe to a nsq `topic` and `channel`."""
self.send(nsq.subscribe(topic, channel)) |
def truncated_normal_expval(mu, tau, a, b):
"""Expected value of the truncated normal distribution.
.. math::
E(X) =\mu + \frac{\sigma(\varphi_1-\varphi_2)}{T}
where
.. math::
T & =\Phi\left(\frac{B-\mu}{\sigma}\right)-\Phi
\left(\frac{A-\mu}{\sigma}\right)\text \\
\varphi_1 &=
\varphi\left(\frac{A-\mu}{\sigma}\right) \\
\varphi_2 &=
\varphi\left(\frac{B-\mu}{\sigma}\right) \\
and :math:`\varphi = N(0,1)` and :math:`tau & 1/sigma**2`.
:Parameters:
- `mu` : Mean of the distribution.
- `tau` : Precision of the distribution, which corresponds to 1/sigma**2 (tau > 0).
- `a` : Left bound of the distribution.
- `b` : Right bound of the distribution.
"""
phia = np.exp(normal_like(a, mu, tau))
phib = np.exp(normal_like(b, mu, tau))
sigma = 1. / np.sqrt(tau)
Phia = utils.normcdf((a - mu) / sigma)
if b == np.inf:
Phib = 1.0
else:
Phib = utils.normcdf((b - mu) / sigma)
return (mu + (phia - phib) / (Phib - Phia))[0] | def function[truncated_normal_expval, parameter[mu, tau, a, b]]:
constant[Expected value of the truncated normal distribution.
.. math::
E(X) =\mu + rac{\sigma(arphi_1-arphi_2)}{T}
where
.. math::
T & =\Phi\left(rac{B-\mu}{\sigma}
ight)-\Phi
\left(rac{A-\mu}{\sigma}
ight) ext \
arphi_1 &=
arphi\left(rac{A-\mu}{\sigma}
ight) \
arphi_2 &=
arphi\left(rac{B-\mu}{\sigma}
ight) \
and :math:`arphi = N(0,1)` and :math:`tau & 1/sigma**2`.
:Parameters:
- `mu` : Mean of the distribution.
- `tau` : Precision of the distribution, which corresponds to 1/sigma**2 (tau > 0).
- `a` : Left bound of the distribution.
- `b` : Right bound of the distribution.
]
variable[phia] assign[=] call[name[np].exp, parameter[call[name[normal_like], parameter[name[a], name[mu], name[tau]]]]]
variable[phib] assign[=] call[name[np].exp, parameter[call[name[normal_like], parameter[name[b], name[mu], name[tau]]]]]
variable[sigma] assign[=] binary_operation[constant[1.0] / call[name[np].sqrt, parameter[name[tau]]]]
variable[Phia] assign[=] call[name[utils].normcdf, parameter[binary_operation[binary_operation[name[a] - name[mu]] / name[sigma]]]]
if compare[name[b] equal[==] name[np].inf] begin[:]
variable[Phib] assign[=] constant[1.0]
return[call[binary_operation[name[mu] + binary_operation[binary_operation[name[phia] - name[phib]] / binary_operation[name[Phib] - name[Phia]]]]][constant[0]]] | keyword[def] identifier[truncated_normal_expval] ( identifier[mu] , identifier[tau] , identifier[a] , identifier[b] ):
literal[string]
identifier[phia] = identifier[np] . identifier[exp] ( identifier[normal_like] ( identifier[a] , identifier[mu] , identifier[tau] ))
identifier[phib] = identifier[np] . identifier[exp] ( identifier[normal_like] ( identifier[b] , identifier[mu] , identifier[tau] ))
identifier[sigma] = literal[int] / identifier[np] . identifier[sqrt] ( identifier[tau] )
identifier[Phia] = identifier[utils] . identifier[normcdf] (( identifier[a] - identifier[mu] )/ identifier[sigma] )
keyword[if] identifier[b] == identifier[np] . identifier[inf] :
identifier[Phib] = literal[int]
keyword[else] :
identifier[Phib] = identifier[utils] . identifier[normcdf] (( identifier[b] - identifier[mu] )/ identifier[sigma] )
keyword[return] ( identifier[mu] +( identifier[phia] - identifier[phib] )/( identifier[Phib] - identifier[Phia] ))[ literal[int] ] | def truncated_normal_expval(mu, tau, a, b):
"""Expected value of the truncated normal distribution.
.. math::
E(X) =\\mu + \x0crac{\\sigma(\x0barphi_1-\x0barphi_2)}{T}
where
.. math::
T & =\\Phi\\left(\x0crac{B-\\mu}{\\sigma}\right)-\\Phi
\\left(\x0crac{A-\\mu}{\\sigma}\right) ext \\
\x0barphi_1 &=
\x0barphi\\left(\x0crac{A-\\mu}{\\sigma}\right) \\
\x0barphi_2 &=
\x0barphi\\left(\x0crac{B-\\mu}{\\sigma}\right) \\
and :math:`\x0barphi = N(0,1)` and :math:`tau & 1/sigma**2`.
:Parameters:
- `mu` : Mean of the distribution.
- `tau` : Precision of the distribution, which corresponds to 1/sigma**2 (tau > 0).
- `a` : Left bound of the distribution.
- `b` : Right bound of the distribution.
"""
phia = np.exp(normal_like(a, mu, tau))
phib = np.exp(normal_like(b, mu, tau))
sigma = 1.0 / np.sqrt(tau)
Phia = utils.normcdf((a - mu) / sigma)
if b == np.inf:
Phib = 1.0 # depends on [control=['if'], data=[]]
else:
Phib = utils.normcdf((b - mu) / sigma)
return (mu + (phia - phib) / (Phib - Phia))[0] |
def hash_napiprojekt(video_path):
"""Compute a hash using NapiProjekt's algorithm.
:param str video_path: path of the video.
:return: the hash.
:rtype: str
"""
readsize = 1024 * 1024 * 10
with open(video_path, 'rb') as f:
data = f.read(readsize)
return hashlib.md5(data).hexdigest() | def function[hash_napiprojekt, parameter[video_path]]:
constant[Compute a hash using NapiProjekt's algorithm.
:param str video_path: path of the video.
:return: the hash.
:rtype: str
]
variable[readsize] assign[=] binary_operation[binary_operation[constant[1024] * constant[1024]] * constant[10]]
with call[name[open], parameter[name[video_path], constant[rb]]] begin[:]
variable[data] assign[=] call[name[f].read, parameter[name[readsize]]]
return[call[call[name[hashlib].md5, parameter[name[data]]].hexdigest, parameter[]]] | keyword[def] identifier[hash_napiprojekt] ( identifier[video_path] ):
literal[string]
identifier[readsize] = literal[int] * literal[int] * literal[int]
keyword[with] identifier[open] ( identifier[video_path] , literal[string] ) keyword[as] identifier[f] :
identifier[data] = identifier[f] . identifier[read] ( identifier[readsize] )
keyword[return] identifier[hashlib] . identifier[md5] ( identifier[data] ). identifier[hexdigest] () | def hash_napiprojekt(video_path):
"""Compute a hash using NapiProjekt's algorithm.
:param str video_path: path of the video.
:return: the hash.
:rtype: str
"""
readsize = 1024 * 1024 * 10
with open(video_path, 'rb') as f:
data = f.read(readsize) # depends on [control=['with'], data=['f']]
return hashlib.md5(data).hexdigest() |
def detail(
self,
participant1: Address,
participant2: Address,
block_identifier: BlockSpecification,
channel_identifier: ChannelID = None,
) -> ChannelDetails:
""" Returns a ChannelDetails instance with all the details of the
channel and the channel participants.
Note:
For now one of the participants has to be the node_address
"""
if self.node_address not in (participant1, participant2):
raise ValueError('One participant must be the node address')
if self.node_address == participant2:
participant1, participant2 = participant2, participant1
channel_data = self._detail_channel(
participant1=participant1,
participant2=participant2,
block_identifier=block_identifier,
channel_identifier=channel_identifier,
)
participants_data = self.detail_participants(
participant1=participant1,
participant2=participant2,
block_identifier=block_identifier,
channel_identifier=channel_data.channel_identifier,
)
chain_id = self.proxy.contract.functions.chain_id().call()
return ChannelDetails(
chain_id=chain_id,
channel_data=channel_data,
participants_data=participants_data,
) | def function[detail, parameter[self, participant1, participant2, block_identifier, channel_identifier]]:
constant[ Returns a ChannelDetails instance with all the details of the
channel and the channel participants.
Note:
For now one of the participants has to be the node_address
]
if compare[name[self].node_address <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Name object at 0x7da1b19dabc0>, <ast.Name object at 0x7da1b19daad0>]]] begin[:]
<ast.Raise object at 0x7da1b19d97b0>
if compare[name[self].node_address equal[==] name[participant2]] begin[:]
<ast.Tuple object at 0x7da1b19db670> assign[=] tuple[[<ast.Name object at 0x7da1b19db790>, <ast.Name object at 0x7da1b19d8790>]]
variable[channel_data] assign[=] call[name[self]._detail_channel, parameter[]]
variable[participants_data] assign[=] call[name[self].detail_participants, parameter[]]
variable[chain_id] assign[=] call[call[name[self].proxy.contract.functions.chain_id, parameter[]].call, parameter[]]
return[call[name[ChannelDetails], parameter[]]] | keyword[def] identifier[detail] (
identifier[self] ,
identifier[participant1] : identifier[Address] ,
identifier[participant2] : identifier[Address] ,
identifier[block_identifier] : identifier[BlockSpecification] ,
identifier[channel_identifier] : identifier[ChannelID] = keyword[None] ,
)-> identifier[ChannelDetails] :
literal[string]
keyword[if] identifier[self] . identifier[node_address] keyword[not] keyword[in] ( identifier[participant1] , identifier[participant2] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[self] . identifier[node_address] == identifier[participant2] :
identifier[participant1] , identifier[participant2] = identifier[participant2] , identifier[participant1]
identifier[channel_data] = identifier[self] . identifier[_detail_channel] (
identifier[participant1] = identifier[participant1] ,
identifier[participant2] = identifier[participant2] ,
identifier[block_identifier] = identifier[block_identifier] ,
identifier[channel_identifier] = identifier[channel_identifier] ,
)
identifier[participants_data] = identifier[self] . identifier[detail_participants] (
identifier[participant1] = identifier[participant1] ,
identifier[participant2] = identifier[participant2] ,
identifier[block_identifier] = identifier[block_identifier] ,
identifier[channel_identifier] = identifier[channel_data] . identifier[channel_identifier] ,
)
identifier[chain_id] = identifier[self] . identifier[proxy] . identifier[contract] . identifier[functions] . identifier[chain_id] (). identifier[call] ()
keyword[return] identifier[ChannelDetails] (
identifier[chain_id] = identifier[chain_id] ,
identifier[channel_data] = identifier[channel_data] ,
identifier[participants_data] = identifier[participants_data] ,
) | def detail(self, participant1: Address, participant2: Address, block_identifier: BlockSpecification, channel_identifier: ChannelID=None) -> ChannelDetails:
""" Returns a ChannelDetails instance with all the details of the
channel and the channel participants.
Note:
For now one of the participants has to be the node_address
"""
if self.node_address not in (participant1, participant2):
raise ValueError('One participant must be the node address') # depends on [control=['if'], data=[]]
if self.node_address == participant2:
(participant1, participant2) = (participant2, participant1) # depends on [control=['if'], data=['participant2']]
channel_data = self._detail_channel(participant1=participant1, participant2=participant2, block_identifier=block_identifier, channel_identifier=channel_identifier)
participants_data = self.detail_participants(participant1=participant1, participant2=participant2, block_identifier=block_identifier, channel_identifier=channel_data.channel_identifier)
chain_id = self.proxy.contract.functions.chain_id().call()
return ChannelDetails(chain_id=chain_id, channel_data=channel_data, participants_data=participants_data) |
def add_condor_job(self, token, batchmaketaskid, jobdefinitionfilename,
outputfilename, errorfilename, logfilename,
postfilename):
"""
Add a Condor DAG job to the Condor DAG associated with this
Batchmake task
:param token: A valid token for the user in question.
:type token: string
:param batchmaketaskid: id of the Batchmake task for this DAG
:type batchmaketaskid: int | long
:param jobdefinitionfilename: Filename of the definition file for the
job
:type jobdefinitionfilename: string
:param outputfilename: Filename of the output file for the job
:type outputfilename: string
:param errorfilename: Filename of the error file for the job
:type errorfilename: string
:param logfilename: Filename of the log file for the job
:type logfilename: string
:param postfilename: Filename of the post script log file for the job
:type postfilename: string
:return: The created Condor job DAO.
:rtype: dict
"""
parameters = dict()
parameters['token'] = token
parameters['batchmaketaskid'] = batchmaketaskid
parameters['jobdefinitionfilename'] = jobdefinitionfilename
parameters['outputfilename'] = outputfilename
parameters['errorfilename'] = errorfilename
parameters['logfilename'] = logfilename
parameters['postfilename'] = postfilename
response = self.request('midas.batchmake.add.condor.job', parameters)
return response | def function[add_condor_job, parameter[self, token, batchmaketaskid, jobdefinitionfilename, outputfilename, errorfilename, logfilename, postfilename]]:
constant[
Add a Condor DAG job to the Condor DAG associated with this
Batchmake task
:param token: A valid token for the user in question.
:type token: string
:param batchmaketaskid: id of the Batchmake task for this DAG
:type batchmaketaskid: int | long
:param jobdefinitionfilename: Filename of the definition file for the
job
:type jobdefinitionfilename: string
:param outputfilename: Filename of the output file for the job
:type outputfilename: string
:param errorfilename: Filename of the error file for the job
:type errorfilename: string
:param logfilename: Filename of the log file for the job
:type logfilename: string
:param postfilename: Filename of the post script log file for the job
:type postfilename: string
:return: The created Condor job DAO.
:rtype: dict
]
variable[parameters] assign[=] call[name[dict], parameter[]]
call[name[parameters]][constant[token]] assign[=] name[token]
call[name[parameters]][constant[batchmaketaskid]] assign[=] name[batchmaketaskid]
call[name[parameters]][constant[jobdefinitionfilename]] assign[=] name[jobdefinitionfilename]
call[name[parameters]][constant[outputfilename]] assign[=] name[outputfilename]
call[name[parameters]][constant[errorfilename]] assign[=] name[errorfilename]
call[name[parameters]][constant[logfilename]] assign[=] name[logfilename]
call[name[parameters]][constant[postfilename]] assign[=] name[postfilename]
variable[response] assign[=] call[name[self].request, parameter[constant[midas.batchmake.add.condor.job], name[parameters]]]
return[name[response]] | keyword[def] identifier[add_condor_job] ( identifier[self] , identifier[token] , identifier[batchmaketaskid] , identifier[jobdefinitionfilename] ,
identifier[outputfilename] , identifier[errorfilename] , identifier[logfilename] ,
identifier[postfilename] ):
literal[string]
identifier[parameters] = identifier[dict] ()
identifier[parameters] [ literal[string] ]= identifier[token]
identifier[parameters] [ literal[string] ]= identifier[batchmaketaskid]
identifier[parameters] [ literal[string] ]= identifier[jobdefinitionfilename]
identifier[parameters] [ literal[string] ]= identifier[outputfilename]
identifier[parameters] [ literal[string] ]= identifier[errorfilename]
identifier[parameters] [ literal[string] ]= identifier[logfilename]
identifier[parameters] [ literal[string] ]= identifier[postfilename]
identifier[response] = identifier[self] . identifier[request] ( literal[string] , identifier[parameters] )
keyword[return] identifier[response] | def add_condor_job(self, token, batchmaketaskid, jobdefinitionfilename, outputfilename, errorfilename, logfilename, postfilename):
"""
Add a Condor DAG job to the Condor DAG associated with this
Batchmake task
:param token: A valid token for the user in question.
:type token: string
:param batchmaketaskid: id of the Batchmake task for this DAG
:type batchmaketaskid: int | long
:param jobdefinitionfilename: Filename of the definition file for the
job
:type jobdefinitionfilename: string
:param outputfilename: Filename of the output file for the job
:type outputfilename: string
:param errorfilename: Filename of the error file for the job
:type errorfilename: string
:param logfilename: Filename of the log file for the job
:type logfilename: string
:param postfilename: Filename of the post script log file for the job
:type postfilename: string
:return: The created Condor job DAO.
:rtype: dict
"""
parameters = dict()
parameters['token'] = token
parameters['batchmaketaskid'] = batchmaketaskid
parameters['jobdefinitionfilename'] = jobdefinitionfilename
parameters['outputfilename'] = outputfilename
parameters['errorfilename'] = errorfilename
parameters['logfilename'] = logfilename
parameters['postfilename'] = postfilename
response = self.request('midas.batchmake.add.condor.job', parameters)
return response |
def register_model_converter(model, app):
"""Add url converter for model
Example:
class Student(db.model):
id = Column(Integer, primary_key=True)
name = Column(String(50))
register_model_converter(Student)
@route('/classmates/<Student:classmate>')
def get_classmate_info(classmate):
pass
This only support model's have single primary key.
You need call this function before create view function.
"""
if hasattr(model, 'id'):
class Converter(_ModelConverter):
_model = model
app.url_map.converters[model.__name__] = Converter | def function[register_model_converter, parameter[model, app]]:
constant[Add url converter for model
Example:
class Student(db.model):
id = Column(Integer, primary_key=True)
name = Column(String(50))
register_model_converter(Student)
@route('/classmates/<Student:classmate>')
def get_classmate_info(classmate):
pass
This only support model's have single primary key.
You need call this function before create view function.
]
if call[name[hasattr], parameter[name[model], constant[id]]] begin[:]
class class[Converter, parameter[]] begin[:]
variable[_model] assign[=] name[model]
call[name[app].url_map.converters][name[model].__name__] assign[=] name[Converter] | keyword[def] identifier[register_model_converter] ( identifier[model] , identifier[app] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[model] , literal[string] ):
keyword[class] identifier[Converter] ( identifier[_ModelConverter] ):
identifier[_model] = identifier[model]
identifier[app] . identifier[url_map] . identifier[converters] [ identifier[model] . identifier[__name__] ]= identifier[Converter] | def register_model_converter(model, app):
"""Add url converter for model
Example:
class Student(db.model):
id = Column(Integer, primary_key=True)
name = Column(String(50))
register_model_converter(Student)
@route('/classmates/<Student:classmate>')
def get_classmate_info(classmate):
pass
This only support model's have single primary key.
You need call this function before create view function.
"""
if hasattr(model, 'id'):
class Converter(_ModelConverter):
_model = model
app.url_map.converters[model.__name__] = Converter # depends on [control=['if'], data=[]] |
def psf_slice(self, zint, size=11, zoffset=0., getextent=False):
"""
Calculates the 3D psf at a particular z pixel height
Parameters
----------
zint : float
z pixel height in image coordinates , converted to 1/k by the
function using the slab position as well
size : int, list, tuple
The size over which to calculate the psf, can be 1 or 3 elements
for the different axes in image pixel coordinates
zoffset : float
Offset in pixel units to use in the calculation of the psf
cutval : float
If not None, the psf will be cut along a curve corresponding to
p(r) == 0 with exponential damping exp(-d^4)
getextent : boolean
If True, also return the extent of the psf in pixels for example
to get the support size. Can only be used with cutval.
"""
# calculate the current pixel value in 1/k, making sure we are above the slab
zint = max(self._p2k(self._tz(zint)), 0)
offset = np.array([zoffset*(zint>0), 0, 0])
scale = [self.param_dict[self.zscale], 1.0, 1.0]
# create the coordinate vectors for where to actually calculate the
tile = util.Tile(left=0, size=size, centered=True)
vecs = tile.coords(form='flat')
vecs = [self._p2k(s*i+o) for i,s,o in zip(vecs, scale, offset)]
psf = self.psffunc(*vecs[::-1], zint=zint, **self.pack_args()).T
vec = tile.coords(form='meshed')
# create a smoothly varying point spread function by cutting off the psf
# at a certain value and smoothly taking it to zero
if self.cutoffval is not None and not self.cutbyval:
# find the edges of the PSF
edge = psf > psf.max() * self.cutoffval
dd = nd.morphology.distance_transform_edt(~edge)
# calculate the new PSF and normalize it to the new support
psf = psf * np.exp(-dd**4)
psf /= psf.sum()
if getextent:
# the size is determined by the edge plus a 2 pad for the
# exponential damping to zero at the edge
size = np.array([
(vec*edge).min(axis=(1,2,3))-2,
(vec*edge).max(axis=(1,2,3))+2,
]).T
return psf, vec, size
return psf, vec
# perform a cut by value instead
if self.cutoffval is not None and self.cutbyval:
cutval = self.cutoffval * psf.max()
dd = (psf - cutval) / cutval
dd[dd > 0] = 0.
# calculate the new PSF and normalize it to the new support
psf = psf * np.exp(-(dd / self.cutfallrate)**4)
psf /= psf.sum()
# let the small values determine the edges
edge = psf > cutval * self.cutedgeval
if getextent:
# the size is determined by the edge plus a 2 pad for the
# exponential damping to zero at the edge
size = np.array([
(vec*edge).min(axis=(1,2,3))-2,
(vec*edge).max(axis=(1,2,3))+2,
]).T
return psf, vec, size
return psf, vec
return psf, vec | def function[psf_slice, parameter[self, zint, size, zoffset, getextent]]:
constant[
Calculates the 3D psf at a particular z pixel height
Parameters
----------
zint : float
z pixel height in image coordinates , converted to 1/k by the
function using the slab position as well
size : int, list, tuple
The size over which to calculate the psf, can be 1 or 3 elements
for the different axes in image pixel coordinates
zoffset : float
Offset in pixel units to use in the calculation of the psf
cutval : float
If not None, the psf will be cut along a curve corresponding to
p(r) == 0 with exponential damping exp(-d^4)
getextent : boolean
If True, also return the extent of the psf in pixels for example
to get the support size. Can only be used with cutval.
]
variable[zint] assign[=] call[name[max], parameter[call[name[self]._p2k, parameter[call[name[self]._tz, parameter[name[zint]]]]], constant[0]]]
variable[offset] assign[=] call[name[np].array, parameter[list[[<ast.BinOp object at 0x7da207f9a650>, <ast.Constant object at 0x7da207f9bb20>, <ast.Constant object at 0x7da207f98850>]]]]
variable[scale] assign[=] list[[<ast.Subscript object at 0x7da207f9b910>, <ast.Constant object at 0x7da207f9a980>, <ast.Constant object at 0x7da207f9a4a0>]]
variable[tile] assign[=] call[name[util].Tile, parameter[]]
variable[vecs] assign[=] call[name[tile].coords, parameter[]]
variable[vecs] assign[=] <ast.ListComp object at 0x7da207f9aad0>
variable[psf] assign[=] call[name[self].psffunc, parameter[<ast.Starred object at 0x7da207f9b010>]].T
variable[vec] assign[=] call[name[tile].coords, parameter[]]
if <ast.BoolOp object at 0x7da207f9a1a0> begin[:]
variable[edge] assign[=] compare[name[psf] greater[>] binary_operation[call[name[psf].max, parameter[]] * name[self].cutoffval]]
variable[dd] assign[=] call[name[nd].morphology.distance_transform_edt, parameter[<ast.UnaryOp object at 0x7da207f9a410>]]
variable[psf] assign[=] binary_operation[name[psf] * call[name[np].exp, parameter[<ast.UnaryOp object at 0x7da207f9acb0>]]]
<ast.AugAssign object at 0x7da207f9a0b0>
if name[getextent] begin[:]
variable[size] assign[=] call[name[np].array, parameter[list[[<ast.BinOp object at 0x7da207f98c10>, <ast.BinOp object at 0x7da18bc728f0>]]]].T
return[tuple[[<ast.Name object at 0x7da18bc70280>, <ast.Name object at 0x7da18bc73fd0>, <ast.Name object at 0x7da18bc72050>]]]
return[tuple[[<ast.Name object at 0x7da18bc71ed0>, <ast.Name object at 0x7da18bc71ff0>]]]
if <ast.BoolOp object at 0x7da18bc701f0> begin[:]
variable[cutval] assign[=] binary_operation[name[self].cutoffval * call[name[psf].max, parameter[]]]
variable[dd] assign[=] binary_operation[binary_operation[name[psf] - name[cutval]] / name[cutval]]
call[name[dd]][compare[name[dd] greater[>] constant[0]]] assign[=] constant[0.0]
variable[psf] assign[=] binary_operation[name[psf] * call[name[np].exp, parameter[<ast.UnaryOp object at 0x7da18bc70dc0>]]]
<ast.AugAssign object at 0x7da18bc72980>
variable[edge] assign[=] compare[name[psf] greater[>] binary_operation[name[cutval] * name[self].cutedgeval]]
if name[getextent] begin[:]
variable[size] assign[=] call[name[np].array, parameter[list[[<ast.BinOp object at 0x7da18bc718d0>, <ast.BinOp object at 0x7da18bc73f40>]]]].T
return[tuple[[<ast.Name object at 0x7da18bc70fd0>, <ast.Name object at 0x7da18bc73c40>, <ast.Name object at 0x7da18bc73640>]]]
return[tuple[[<ast.Name object at 0x7da18bc70160>, <ast.Name object at 0x7da18bc70070>]]]
return[tuple[[<ast.Name object at 0x7da18bc710f0>, <ast.Name object at 0x7da18bc725f0>]]] | keyword[def] identifier[psf_slice] ( identifier[self] , identifier[zint] , identifier[size] = literal[int] , identifier[zoffset] = literal[int] , identifier[getextent] = keyword[False] ):
literal[string]
identifier[zint] = identifier[max] ( identifier[self] . identifier[_p2k] ( identifier[self] . identifier[_tz] ( identifier[zint] )), literal[int] )
identifier[offset] = identifier[np] . identifier[array] ([ identifier[zoffset] *( identifier[zint] > literal[int] ), literal[int] , literal[int] ])
identifier[scale] =[ identifier[self] . identifier[param_dict] [ identifier[self] . identifier[zscale] ], literal[int] , literal[int] ]
identifier[tile] = identifier[util] . identifier[Tile] ( identifier[left] = literal[int] , identifier[size] = identifier[size] , identifier[centered] = keyword[True] )
identifier[vecs] = identifier[tile] . identifier[coords] ( identifier[form] = literal[string] )
identifier[vecs] =[ identifier[self] . identifier[_p2k] ( identifier[s] * identifier[i] + identifier[o] ) keyword[for] identifier[i] , identifier[s] , identifier[o] keyword[in] identifier[zip] ( identifier[vecs] , identifier[scale] , identifier[offset] )]
identifier[psf] = identifier[self] . identifier[psffunc] (* identifier[vecs] [::- literal[int] ], identifier[zint] = identifier[zint] ,** identifier[self] . identifier[pack_args] ()). identifier[T]
identifier[vec] = identifier[tile] . identifier[coords] ( identifier[form] = literal[string] )
keyword[if] identifier[self] . identifier[cutoffval] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[self] . identifier[cutbyval] :
identifier[edge] = identifier[psf] > identifier[psf] . identifier[max] ()* identifier[self] . identifier[cutoffval]
identifier[dd] = identifier[nd] . identifier[morphology] . identifier[distance_transform_edt] (~ identifier[edge] )
identifier[psf] = identifier[psf] * identifier[np] . identifier[exp] (- identifier[dd] ** literal[int] )
identifier[psf] /= identifier[psf] . identifier[sum] ()
keyword[if] identifier[getextent] :
identifier[size] = identifier[np] . identifier[array] ([
( identifier[vec] * identifier[edge] ). identifier[min] ( identifier[axis] =( literal[int] , literal[int] , literal[int] ))- literal[int] ,
( identifier[vec] * identifier[edge] ). identifier[max] ( identifier[axis] =( literal[int] , literal[int] , literal[int] ))+ literal[int] ,
]). identifier[T]
keyword[return] identifier[psf] , identifier[vec] , identifier[size]
keyword[return] identifier[psf] , identifier[vec]
keyword[if] identifier[self] . identifier[cutoffval] keyword[is] keyword[not] keyword[None] keyword[and] identifier[self] . identifier[cutbyval] :
identifier[cutval] = identifier[self] . identifier[cutoffval] * identifier[psf] . identifier[max] ()
identifier[dd] =( identifier[psf] - identifier[cutval] )/ identifier[cutval]
identifier[dd] [ identifier[dd] > literal[int] ]= literal[int]
identifier[psf] = identifier[psf] * identifier[np] . identifier[exp] (-( identifier[dd] / identifier[self] . identifier[cutfallrate] )** literal[int] )
identifier[psf] /= identifier[psf] . identifier[sum] ()
identifier[edge] = identifier[psf] > identifier[cutval] * identifier[self] . identifier[cutedgeval]
keyword[if] identifier[getextent] :
identifier[size] = identifier[np] . identifier[array] ([
( identifier[vec] * identifier[edge] ). identifier[min] ( identifier[axis] =( literal[int] , literal[int] , literal[int] ))- literal[int] ,
( identifier[vec] * identifier[edge] ). identifier[max] ( identifier[axis] =( literal[int] , literal[int] , literal[int] ))+ literal[int] ,
]). identifier[T]
keyword[return] identifier[psf] , identifier[vec] , identifier[size]
keyword[return] identifier[psf] , identifier[vec]
keyword[return] identifier[psf] , identifier[vec] | def psf_slice(self, zint, size=11, zoffset=0.0, getextent=False):
"""
Calculates the 3D psf at a particular z pixel height
Parameters
----------
zint : float
z pixel height in image coordinates , converted to 1/k by the
function using the slab position as well
size : int, list, tuple
The size over which to calculate the psf, can be 1 or 3 elements
for the different axes in image pixel coordinates
zoffset : float
Offset in pixel units to use in the calculation of the psf
cutval : float
If not None, the psf will be cut along a curve corresponding to
p(r) == 0 with exponential damping exp(-d^4)
getextent : boolean
If True, also return the extent of the psf in pixels for example
to get the support size. Can only be used with cutval.
"""
# calculate the current pixel value in 1/k, making sure we are above the slab
zint = max(self._p2k(self._tz(zint)), 0)
offset = np.array([zoffset * (zint > 0), 0, 0])
scale = [self.param_dict[self.zscale], 1.0, 1.0]
# create the coordinate vectors for where to actually calculate the
tile = util.Tile(left=0, size=size, centered=True)
vecs = tile.coords(form='flat')
vecs = [self._p2k(s * i + o) for (i, s, o) in zip(vecs, scale, offset)]
psf = self.psffunc(*vecs[::-1], zint=zint, **self.pack_args()).T
vec = tile.coords(form='meshed')
# create a smoothly varying point spread function by cutting off the psf
# at a certain value and smoothly taking it to zero
if self.cutoffval is not None and (not self.cutbyval):
# find the edges of the PSF
edge = psf > psf.max() * self.cutoffval
dd = nd.morphology.distance_transform_edt(~edge)
# calculate the new PSF and normalize it to the new support
psf = psf * np.exp(-dd ** 4)
psf /= psf.sum()
if getextent:
# the size is determined by the edge plus a 2 pad for the
# exponential damping to zero at the edge
size = np.array([(vec * edge).min(axis=(1, 2, 3)) - 2, (vec * edge).max(axis=(1, 2, 3)) + 2]).T
return (psf, vec, size) # depends on [control=['if'], data=[]]
return (psf, vec) # depends on [control=['if'], data=[]]
# perform a cut by value instead
if self.cutoffval is not None and self.cutbyval:
cutval = self.cutoffval * psf.max()
dd = (psf - cutval) / cutval
dd[dd > 0] = 0.0
# calculate the new PSF and normalize it to the new support
psf = psf * np.exp(-(dd / self.cutfallrate) ** 4)
psf /= psf.sum()
# let the small values determine the edges
edge = psf > cutval * self.cutedgeval
if getextent:
# the size is determined by the edge plus a 2 pad for the
# exponential damping to zero at the edge
size = np.array([(vec * edge).min(axis=(1, 2, 3)) - 2, (vec * edge).max(axis=(1, 2, 3)) + 2]).T
return (psf, vec, size) # depends on [control=['if'], data=[]]
return (psf, vec) # depends on [control=['if'], data=[]]
return (psf, vec) |
def update_tool_tip(self):
"""
Updates the node tooltip.
:return: Method success.
:rtype: bool
"""
self.roles[Qt.ToolTipRole] = self.__tool_tip_text.format(self.component.name,
self.component.author,
self.component.category,
", ".join(self.component.require),
self.component.version,
self.component.description)
return True | def function[update_tool_tip, parameter[self]]:
constant[
Updates the node tooltip.
:return: Method success.
:rtype: bool
]
call[name[self].roles][name[Qt].ToolTipRole] assign[=] call[name[self].__tool_tip_text.format, parameter[name[self].component.name, name[self].component.author, name[self].component.category, call[constant[, ].join, parameter[name[self].component.require]], name[self].component.version, name[self].component.description]]
return[constant[True]] | keyword[def] identifier[update_tool_tip] ( identifier[self] ):
literal[string]
identifier[self] . identifier[roles] [ identifier[Qt] . identifier[ToolTipRole] ]= identifier[self] . identifier[__tool_tip_text] . identifier[format] ( identifier[self] . identifier[component] . identifier[name] ,
identifier[self] . identifier[component] . identifier[author] ,
identifier[self] . identifier[component] . identifier[category] ,
literal[string] . identifier[join] ( identifier[self] . identifier[component] . identifier[require] ),
identifier[self] . identifier[component] . identifier[version] ,
identifier[self] . identifier[component] . identifier[description] )
keyword[return] keyword[True] | def update_tool_tip(self):
"""
Updates the node tooltip.
:return: Method success.
:rtype: bool
"""
self.roles[Qt.ToolTipRole] = self.__tool_tip_text.format(self.component.name, self.component.author, self.component.category, ', '.join(self.component.require), self.component.version, self.component.description)
return True |
def workspace(show_values: bool = True, show_types: bool = True):
"""
Adds a list of the shared variables currently stored in the project
workspace.
:param show_values:
When true the values for each variable will be shown in addition to
their name.
:param show_types:
When true the data types for each shared variable will be shown in
addition to their name.
"""
r = _get_report()
data = {}
for key, value in r.project.shared.fetch(None).items():
if key.startswith('__cauldron_'):
continue
data[key] = value
r.append_body(render.status(data, values=show_values, types=show_types)) | def function[workspace, parameter[show_values, show_types]]:
constant[
Adds a list of the shared variables currently stored in the project
workspace.
:param show_values:
When true the values for each variable will be shown in addition to
their name.
:param show_types:
When true the data types for each shared variable will be shown in
addition to their name.
]
variable[r] assign[=] call[name[_get_report], parameter[]]
variable[data] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b1b69210>, <ast.Name object at 0x7da1b1b69e40>]]] in starred[call[call[name[r].project.shared.fetch, parameter[constant[None]]].items, parameter[]]] begin[:]
if call[name[key].startswith, parameter[constant[__cauldron_]]] begin[:]
continue
call[name[data]][name[key]] assign[=] name[value]
call[name[r].append_body, parameter[call[name[render].status, parameter[name[data]]]]] | keyword[def] identifier[workspace] ( identifier[show_values] : identifier[bool] = keyword[True] , identifier[show_types] : identifier[bool] = keyword[True] ):
literal[string]
identifier[r] = identifier[_get_report] ()
identifier[data] ={}
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[r] . identifier[project] . identifier[shared] . identifier[fetch] ( keyword[None] ). identifier[items] ():
keyword[if] identifier[key] . identifier[startswith] ( literal[string] ):
keyword[continue]
identifier[data] [ identifier[key] ]= identifier[value]
identifier[r] . identifier[append_body] ( identifier[render] . identifier[status] ( identifier[data] , identifier[values] = identifier[show_values] , identifier[types] = identifier[show_types] )) | def workspace(show_values: bool=True, show_types: bool=True):
"""
Adds a list of the shared variables currently stored in the project
workspace.
:param show_values:
When true the values for each variable will be shown in addition to
their name.
:param show_types:
When true the data types for each shared variable will be shown in
addition to their name.
"""
r = _get_report()
data = {}
for (key, value) in r.project.shared.fetch(None).items():
if key.startswith('__cauldron_'):
continue # depends on [control=['if'], data=[]]
data[key] = value # depends on [control=['for'], data=[]]
r.append_body(render.status(data, values=show_values, types=show_types)) |
def average(old_avg, current_value, count):
"""
Calculate the average. Count must start with 0
>>> average(None, 3.23, 0)
3.23
>>> average(0, 1, 0)
1.0
>>> average(2.5, 5, 4)
3.0
"""
if old_avg is None:
return current_value
return (float(old_avg) * count + current_value) / (count + 1) | def function[average, parameter[old_avg, current_value, count]]:
constant[
Calculate the average. Count must start with 0
>>> average(None, 3.23, 0)
3.23
>>> average(0, 1, 0)
1.0
>>> average(2.5, 5, 4)
3.0
]
if compare[name[old_avg] is constant[None]] begin[:]
return[name[current_value]]
return[binary_operation[binary_operation[binary_operation[call[name[float], parameter[name[old_avg]]] * name[count]] + name[current_value]] / binary_operation[name[count] + constant[1]]]] | keyword[def] identifier[average] ( identifier[old_avg] , identifier[current_value] , identifier[count] ):
literal[string]
keyword[if] identifier[old_avg] keyword[is] keyword[None] :
keyword[return] identifier[current_value]
keyword[return] ( identifier[float] ( identifier[old_avg] )* identifier[count] + identifier[current_value] )/( identifier[count] + literal[int] ) | def average(old_avg, current_value, count):
"""
Calculate the average. Count must start with 0
>>> average(None, 3.23, 0)
3.23
>>> average(0, 1, 0)
1.0
>>> average(2.5, 5, 4)
3.0
"""
if old_avg is None:
return current_value # depends on [control=['if'], data=[]]
return (float(old_avg) * count + current_value) / (count + 1) |
def parse_exposure(self, node):
"""
Parses <Exposure>
@param node: Node containing the <Exposure> element
@type node: xml.etree.Element
@raise ParseError: Raised when the exposure name is not
being defined in the context of a component type.
"""
if self.current_component_type == None:
self.raise_error('Exposures must be defined in a component type')
try:
name = node.lattrib['name']
except:
self.raise_error('<Exposure> must specify a name')
try:
dimension = node.lattrib['dimension']
except:
self.raise_error("Exposure '{0}' must specify a dimension",
name)
description = node.lattrib.get('description', '')
self.current_component_type.add_exposure(Exposure(name, dimension, description)) | def function[parse_exposure, parameter[self, node]]:
constant[
Parses <Exposure>
@param node: Node containing the <Exposure> element
@type node: xml.etree.Element
@raise ParseError: Raised when the exposure name is not
being defined in the context of a component type.
]
if compare[name[self].current_component_type equal[==] constant[None]] begin[:]
call[name[self].raise_error, parameter[constant[Exposures must be defined in a component type]]]
<ast.Try object at 0x7da1b24c5870>
<ast.Try object at 0x7da1b2381600>
variable[description] assign[=] call[name[node].lattrib.get, parameter[constant[description], constant[]]]
call[name[self].current_component_type.add_exposure, parameter[call[name[Exposure], parameter[name[name], name[dimension], name[description]]]]] | keyword[def] identifier[parse_exposure] ( identifier[self] , identifier[node] ):
literal[string]
keyword[if] identifier[self] . identifier[current_component_type] == keyword[None] :
identifier[self] . identifier[raise_error] ( literal[string] )
keyword[try] :
identifier[name] = identifier[node] . identifier[lattrib] [ literal[string] ]
keyword[except] :
identifier[self] . identifier[raise_error] ( literal[string] )
keyword[try] :
identifier[dimension] = identifier[node] . identifier[lattrib] [ literal[string] ]
keyword[except] :
identifier[self] . identifier[raise_error] ( literal[string] ,
identifier[name] )
identifier[description] = identifier[node] . identifier[lattrib] . identifier[get] ( literal[string] , literal[string] )
identifier[self] . identifier[current_component_type] . identifier[add_exposure] ( identifier[Exposure] ( identifier[name] , identifier[dimension] , identifier[description] )) | def parse_exposure(self, node):
"""
Parses <Exposure>
@param node: Node containing the <Exposure> element
@type node: xml.etree.Element
@raise ParseError: Raised when the exposure name is not
being defined in the context of a component type.
"""
if self.current_component_type == None:
self.raise_error('Exposures must be defined in a component type') # depends on [control=['if'], data=[]]
try:
name = node.lattrib['name'] # depends on [control=['try'], data=[]]
except:
self.raise_error('<Exposure> must specify a name') # depends on [control=['except'], data=[]]
try:
dimension = node.lattrib['dimension'] # depends on [control=['try'], data=[]]
except:
self.raise_error("Exposure '{0}' must specify a dimension", name) # depends on [control=['except'], data=[]]
description = node.lattrib.get('description', '')
self.current_component_type.add_exposure(Exposure(name, dimension, description)) |
def dist(self, src, tar):
"""Return the NCD between two strings using LZMA compression.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
Returns
-------
float
Compression distance
Raises
------
ValueError
Install the PylibLZMA module in order to use LZMA
Examples
--------
>>> cmp = NCDlzma()
>>> cmp.dist('cat', 'hat')
0.08695652173913043
>>> cmp.dist('Niall', 'Neil')
0.16
>>> cmp.dist('aluminum', 'Catalan')
0.16
>>> cmp.dist('ATCG', 'TAGC')
0.08695652173913043
"""
if src == tar:
return 0.0
src = src.encode('utf-8')
tar = tar.encode('utf-8')
if lzma is not None:
src_comp = lzma.compress(src)[14:]
tar_comp = lzma.compress(tar)[14:]
concat_comp = lzma.compress(src + tar)[14:]
concat_comp2 = lzma.compress(tar + src)[14:]
else: # pragma: no cover
raise ValueError(
'Install the PylibLZMA module in order to use LZMA'
)
return (
min(len(concat_comp), len(concat_comp2))
- min(len(src_comp), len(tar_comp))
) / max(len(src_comp), len(tar_comp)) | def function[dist, parameter[self, src, tar]]:
constant[Return the NCD between two strings using LZMA compression.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
Returns
-------
float
Compression distance
Raises
------
ValueError
Install the PylibLZMA module in order to use LZMA
Examples
--------
>>> cmp = NCDlzma()
>>> cmp.dist('cat', 'hat')
0.08695652173913043
>>> cmp.dist('Niall', 'Neil')
0.16
>>> cmp.dist('aluminum', 'Catalan')
0.16
>>> cmp.dist('ATCG', 'TAGC')
0.08695652173913043
]
if compare[name[src] equal[==] name[tar]] begin[:]
return[constant[0.0]]
variable[src] assign[=] call[name[src].encode, parameter[constant[utf-8]]]
variable[tar] assign[=] call[name[tar].encode, parameter[constant[utf-8]]]
if compare[name[lzma] is_not constant[None]] begin[:]
variable[src_comp] assign[=] call[call[name[lzma].compress, parameter[name[src]]]][<ast.Slice object at 0x7da1b00d7a60>]
variable[tar_comp] assign[=] call[call[name[lzma].compress, parameter[name[tar]]]][<ast.Slice object at 0x7da1b00d62f0>]
variable[concat_comp] assign[=] call[call[name[lzma].compress, parameter[binary_operation[name[src] + name[tar]]]]][<ast.Slice object at 0x7da1b00d6f80>]
variable[concat_comp2] assign[=] call[call[name[lzma].compress, parameter[binary_operation[name[tar] + name[src]]]]][<ast.Slice object at 0x7da1b00d6ef0>]
return[binary_operation[binary_operation[call[name[min], parameter[call[name[len], parameter[name[concat_comp]]], call[name[len], parameter[name[concat_comp2]]]]] - call[name[min], parameter[call[name[len], parameter[name[src_comp]]], call[name[len], parameter[name[tar_comp]]]]]] / call[name[max], parameter[call[name[len], parameter[name[src_comp]]], call[name[len], parameter[name[tar_comp]]]]]]] | keyword[def] identifier[dist] ( identifier[self] , identifier[src] , identifier[tar] ):
literal[string]
keyword[if] identifier[src] == identifier[tar] :
keyword[return] literal[int]
identifier[src] = identifier[src] . identifier[encode] ( literal[string] )
identifier[tar] = identifier[tar] . identifier[encode] ( literal[string] )
keyword[if] identifier[lzma] keyword[is] keyword[not] keyword[None] :
identifier[src_comp] = identifier[lzma] . identifier[compress] ( identifier[src] )[ literal[int] :]
identifier[tar_comp] = identifier[lzma] . identifier[compress] ( identifier[tar] )[ literal[int] :]
identifier[concat_comp] = identifier[lzma] . identifier[compress] ( identifier[src] + identifier[tar] )[ literal[int] :]
identifier[concat_comp2] = identifier[lzma] . identifier[compress] ( identifier[tar] + identifier[src] )[ literal[int] :]
keyword[else] :
keyword[raise] identifier[ValueError] (
literal[string]
)
keyword[return] (
identifier[min] ( identifier[len] ( identifier[concat_comp] ), identifier[len] ( identifier[concat_comp2] ))
- identifier[min] ( identifier[len] ( identifier[src_comp] ), identifier[len] ( identifier[tar_comp] ))
)/ identifier[max] ( identifier[len] ( identifier[src_comp] ), identifier[len] ( identifier[tar_comp] )) | def dist(self, src, tar):
"""Return the NCD between two strings using LZMA compression.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
Returns
-------
float
Compression distance
Raises
------
ValueError
Install the PylibLZMA module in order to use LZMA
Examples
--------
>>> cmp = NCDlzma()
>>> cmp.dist('cat', 'hat')
0.08695652173913043
>>> cmp.dist('Niall', 'Neil')
0.16
>>> cmp.dist('aluminum', 'Catalan')
0.16
>>> cmp.dist('ATCG', 'TAGC')
0.08695652173913043
"""
if src == tar:
return 0.0 # depends on [control=['if'], data=[]]
src = src.encode('utf-8')
tar = tar.encode('utf-8')
if lzma is not None:
src_comp = lzma.compress(src)[14:]
tar_comp = lzma.compress(tar)[14:]
concat_comp = lzma.compress(src + tar)[14:]
concat_comp2 = lzma.compress(tar + src)[14:] # depends on [control=['if'], data=['lzma']]
else: # pragma: no cover
raise ValueError('Install the PylibLZMA module in order to use LZMA')
return (min(len(concat_comp), len(concat_comp2)) - min(len(src_comp), len(tar_comp))) / max(len(src_comp), len(tar_comp)) |
def backward_step(self):
"""Take a backward step for all active states in the state machine
"""
logger.debug("Executing backward step ...")
self.run_to_states = []
self.set_execution_mode(StateMachineExecutionStatus.BACKWARD) | def function[backward_step, parameter[self]]:
constant[Take a backward step for all active states in the state machine
]
call[name[logger].debug, parameter[constant[Executing backward step ...]]]
name[self].run_to_states assign[=] list[[]]
call[name[self].set_execution_mode, parameter[name[StateMachineExecutionStatus].BACKWARD]] | keyword[def] identifier[backward_step] ( identifier[self] ):
literal[string]
identifier[logger] . identifier[debug] ( literal[string] )
identifier[self] . identifier[run_to_states] =[]
identifier[self] . identifier[set_execution_mode] ( identifier[StateMachineExecutionStatus] . identifier[BACKWARD] ) | def backward_step(self):
"""Take a backward step for all active states in the state machine
"""
logger.debug('Executing backward step ...')
self.run_to_states = []
self.set_execution_mode(StateMachineExecutionStatus.BACKWARD) |
def filter_by_label(X, y, ref_label, reverse=False):
'''
Select items with label from dataset.
:param X: dataset
:param y: labels
:param ref_label: reference label
:param bool reverse: if false selects ref_labels else eliminates
'''
check_reference_label(y, ref_label)
return list(zip(*filter(lambda t: (not reverse) == (t[1] == ref_label),
zip(X, y)))) | def function[filter_by_label, parameter[X, y, ref_label, reverse]]:
constant[
Select items with label from dataset.
:param X: dataset
:param y: labels
:param ref_label: reference label
:param bool reverse: if false selects ref_labels else eliminates
]
call[name[check_reference_label], parameter[name[y], name[ref_label]]]
return[call[name[list], parameter[call[name[zip], parameter[<ast.Starred object at 0x7da18f721d20>]]]]] | keyword[def] identifier[filter_by_label] ( identifier[X] , identifier[y] , identifier[ref_label] , identifier[reverse] = keyword[False] ):
literal[string]
identifier[check_reference_label] ( identifier[y] , identifier[ref_label] )
keyword[return] identifier[list] ( identifier[zip] (* identifier[filter] ( keyword[lambda] identifier[t] :( keyword[not] identifier[reverse] )==( identifier[t] [ literal[int] ]== identifier[ref_label] ),
identifier[zip] ( identifier[X] , identifier[y] )))) | def filter_by_label(X, y, ref_label, reverse=False):
"""
Select items with label from dataset.
:param X: dataset
:param y: labels
:param ref_label: reference label
:param bool reverse: if false selects ref_labels else eliminates
"""
check_reference_label(y, ref_label)
return list(zip(*filter(lambda t: (not reverse) == (t[1] == ref_label), zip(X, y)))) |
def delete_io( hash ):
"""
Deletes records associated with a particular hash
:param str hash: The hash
:rtype int: The number of records deleted
"""
global CACHE_
load_cache(True)
record_used('cache', hash)
num_deleted = len(CACHE_['cache'].get(hash, []))
if hash in CACHE_['cache']:
del CACHE_['cache'][hash]
write_out()
return num_deleted | def function[delete_io, parameter[hash]]:
constant[
Deletes records associated with a particular hash
:param str hash: The hash
:rtype int: The number of records deleted
]
<ast.Global object at 0x7da1b0a49ba0>
call[name[load_cache], parameter[constant[True]]]
call[name[record_used], parameter[constant[cache], name[hash]]]
variable[num_deleted] assign[=] call[name[len], parameter[call[call[name[CACHE_]][constant[cache]].get, parameter[name[hash], list[[]]]]]]
if compare[name[hash] in call[name[CACHE_]][constant[cache]]] begin[:]
<ast.Delete object at 0x7da1b0a48df0>
call[name[write_out], parameter[]]
return[name[num_deleted]] | keyword[def] identifier[delete_io] ( identifier[hash] ):
literal[string]
keyword[global] identifier[CACHE_]
identifier[load_cache] ( keyword[True] )
identifier[record_used] ( literal[string] , identifier[hash] )
identifier[num_deleted] = identifier[len] ( identifier[CACHE_] [ literal[string] ]. identifier[get] ( identifier[hash] ,[]))
keyword[if] identifier[hash] keyword[in] identifier[CACHE_] [ literal[string] ]:
keyword[del] identifier[CACHE_] [ literal[string] ][ identifier[hash] ]
identifier[write_out] ()
keyword[return] identifier[num_deleted] | def delete_io(hash):
"""
Deletes records associated with a particular hash
:param str hash: The hash
:rtype int: The number of records deleted
"""
global CACHE_
load_cache(True)
record_used('cache', hash)
num_deleted = len(CACHE_['cache'].get(hash, []))
if hash in CACHE_['cache']:
del CACHE_['cache'][hash] # depends on [control=['if'], data=['hash']]
write_out()
return num_deleted |
def outputs(ctx, client, revision, paths):
r"""Show output files in the repository.
<PATHS> Files to show. If no files are given all output files are shown.
"""
graph = Graph(client)
filter = graph.build(paths=paths, revision=revision)
output_paths = graph.output_paths
click.echo('\n'.join(graph._format_path(path) for path in output_paths))
if paths:
if not output_paths:
ctx.exit(1)
from renku.models._datastructures import DirectoryTree
tree = DirectoryTree.from_list(item.path for item in filter)
for output in output_paths:
if tree.get(output) is None:
ctx.exit(1)
return | def function[outputs, parameter[ctx, client, revision, paths]]:
constant[Show output files in the repository.
<PATHS> Files to show. If no files are given all output files are shown.
]
variable[graph] assign[=] call[name[Graph], parameter[name[client]]]
variable[filter] assign[=] call[name[graph].build, parameter[]]
variable[output_paths] assign[=] name[graph].output_paths
call[name[click].echo, parameter[call[constant[
].join, parameter[<ast.GeneratorExp object at 0x7da204565e10>]]]]
if name[paths] begin[:]
if <ast.UnaryOp object at 0x7da1b04987c0> begin[:]
call[name[ctx].exit, parameter[constant[1]]]
from relative_module[renku.models._datastructures] import module[DirectoryTree]
variable[tree] assign[=] call[name[DirectoryTree].from_list, parameter[<ast.GeneratorExp object at 0x7da1b0498340>]]
for taget[name[output]] in starred[name[output_paths]] begin[:]
if compare[call[name[tree].get, parameter[name[output]]] is constant[None]] begin[:]
call[name[ctx].exit, parameter[constant[1]]]
return[None] | keyword[def] identifier[outputs] ( identifier[ctx] , identifier[client] , identifier[revision] , identifier[paths] ):
literal[string]
identifier[graph] = identifier[Graph] ( identifier[client] )
identifier[filter] = identifier[graph] . identifier[build] ( identifier[paths] = identifier[paths] , identifier[revision] = identifier[revision] )
identifier[output_paths] = identifier[graph] . identifier[output_paths]
identifier[click] . identifier[echo] ( literal[string] . identifier[join] ( identifier[graph] . identifier[_format_path] ( identifier[path] ) keyword[for] identifier[path] keyword[in] identifier[output_paths] ))
keyword[if] identifier[paths] :
keyword[if] keyword[not] identifier[output_paths] :
identifier[ctx] . identifier[exit] ( literal[int] )
keyword[from] identifier[renku] . identifier[models] . identifier[_datastructures] keyword[import] identifier[DirectoryTree]
identifier[tree] = identifier[DirectoryTree] . identifier[from_list] ( identifier[item] . identifier[path] keyword[for] identifier[item] keyword[in] identifier[filter] )
keyword[for] identifier[output] keyword[in] identifier[output_paths] :
keyword[if] identifier[tree] . identifier[get] ( identifier[output] ) keyword[is] keyword[None] :
identifier[ctx] . identifier[exit] ( literal[int] )
keyword[return] | def outputs(ctx, client, revision, paths):
"""Show output files in the repository.
<PATHS> Files to show. If no files are given all output files are shown.
"""
graph = Graph(client)
filter = graph.build(paths=paths, revision=revision)
output_paths = graph.output_paths
click.echo('\n'.join((graph._format_path(path) for path in output_paths)))
if paths:
if not output_paths:
ctx.exit(1) # depends on [control=['if'], data=[]]
from renku.models._datastructures import DirectoryTree
tree = DirectoryTree.from_list((item.path for item in filter))
for output in output_paths:
if tree.get(output) is None:
ctx.exit(1)
return # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['output']] # depends on [control=['if'], data=[]] |
def docpie(doc, argv=None, help=True, version=None,
stdopt=True, attachopt=True, attachvalue=True,
helpstyle='python',
auto2dashes=True, name=None, case_sensitive=False,
optionsfirst=False, appearedonly=False, namedoptions=False,
extra=None):
"""
Parse `argv` based on command-line interface described in `doc`.
`docpie` creates your command-line interface based on its
description that you pass as `doc`. Such description can contain
--options, <positional-argument>, commands, which could be
[optional], (required), (mutually | exclusive) or repeated...
Parameters
----------
doc : str
Description of your command-line interface.
argv : list of str, optional
Argument vector to be parsed. sys.argv is used if not
provided.
help : bool (default: True)
Set to False to disable automatic help on -h or --help
options.
version : any object but None
If passed, the object will be printed if --version is in
`argv`.
stdopt : bool (default: True)
When it's True, long flag should only starts with --
attachopt: bool (default: True)
write/pass several short flag into one, e.g. -abc can mean -a -b -c.
This only works when stdopt=True
attachvalue: bool (default: True)
allow you to write short flag and its value together,
e.g. -abc can mean -a bc
auto2dashes: bool (default: True)
automaticly handle -- (which means "end of command line flag")
name: str (default: None)
the "name" of your program. In each of your "usage" the "name" will be
ignored. By default docpie will ignore the first element of your
"usage".
case_sensitive: bool (deprecated / default: False)
specifies if it need case sensitive when matching
"Usage:" and "Options:"
optionsfirst: bool (default: False)
everything after first positional argument will be interpreted as
positional argument
appearedonly: bool (default: False)
when set True, the options that never appear in argv will not
be put in result. Note this only affect options
extra: dict
customize pre-handled options. See
http://docpie.comes.today/document/advanced-apis/
for more infomation.
Returns
-------
args : dict
A dictionary, where keys are names of command-line elements
such as e.g. "--verbose" and "<path>", and values are the
parsed values of those elements.
Example
-------
>>> from docpie import docpie
>>> doc = '''
... Usage:
... my_program tcp <host> <port> [--timeout=<seconds>]
... my_program serial <port> [--baud=<n>] [--timeout=<seconds>]
... my_program (-h | --help | --version)
...
... Options:
... -h, --help Show this screen and exit.
... --baud=<n> Baudrate [default: 9600]
... '''
>>> argv = ['my_program', 'tcp', '127.0.0.1', '80', '--timeout', '30']
>>> docpie(doc, argv)
{
'--': False,
'-h': False,
'--baud': '9600',
'--help': False,
'--timeout': '30',
'--version': False,
'<host>': '127.0.0.1',
'<port>': '80',
'serial': False,
'tcp': True}
See also
--------
* Full documentation is available in README.md as well as online
at http://docpie.comes.today/document/quick-start/
"""
if case_sensitive:
warnings.warn('`case_sensitive` is deprecated, `docpie` is always '
'case insensitive')
kwargs = locals()
argv = kwargs.pop('argv')
pie = Docpie(**kwargs)
pie.docpie(argv)
return pie | def function[docpie, parameter[doc, argv, help, version, stdopt, attachopt, attachvalue, helpstyle, auto2dashes, name, case_sensitive, optionsfirst, appearedonly, namedoptions, extra]]:
constant[
Parse `argv` based on command-line interface described in `doc`.
`docpie` creates your command-line interface based on its
description that you pass as `doc`. Such description can contain
--options, <positional-argument>, commands, which could be
[optional], (required), (mutually | exclusive) or repeated...
Parameters
----------
doc : str
Description of your command-line interface.
argv : list of str, optional
Argument vector to be parsed. sys.argv is used if not
provided.
help : bool (default: True)
Set to False to disable automatic help on -h or --help
options.
version : any object but None
If passed, the object will be printed if --version is in
`argv`.
stdopt : bool (default: True)
When it's True, long flag should only starts with --
attachopt: bool (default: True)
write/pass several short flag into one, e.g. -abc can mean -a -b -c.
This only works when stdopt=True
attachvalue: bool (default: True)
allow you to write short flag and its value together,
e.g. -abc can mean -a bc
auto2dashes: bool (default: True)
automaticly handle -- (which means "end of command line flag")
name: str (default: None)
the "name" of your program. In each of your "usage" the "name" will be
ignored. By default docpie will ignore the first element of your
"usage".
case_sensitive: bool (deprecated / default: False)
specifies if it need case sensitive when matching
"Usage:" and "Options:"
optionsfirst: bool (default: False)
everything after first positional argument will be interpreted as
positional argument
appearedonly: bool (default: False)
when set True, the options that never appear in argv will not
be put in result. Note this only affect options
extra: dict
customize pre-handled options. See
http://docpie.comes.today/document/advanced-apis/
for more infomation.
Returns
-------
args : dict
A dictionary, where keys are names of command-line elements
such as e.g. "--verbose" and "<path>", and values are the
parsed values of those elements.
Example
-------
>>> from docpie import docpie
>>> doc = '''
... Usage:
... my_program tcp <host> <port> [--timeout=<seconds>]
... my_program serial <port> [--baud=<n>] [--timeout=<seconds>]
... my_program (-h | --help | --version)
...
... Options:
... -h, --help Show this screen and exit.
... --baud=<n> Baudrate [default: 9600]
... '''
>>> argv = ['my_program', 'tcp', '127.0.0.1', '80', '--timeout', '30']
>>> docpie(doc, argv)
{
'--': False,
'-h': False,
'--baud': '9600',
'--help': False,
'--timeout': '30',
'--version': False,
'<host>': '127.0.0.1',
'<port>': '80',
'serial': False,
'tcp': True}
See also
--------
* Full documentation is available in README.md as well as online
at http://docpie.comes.today/document/quick-start/
]
if name[case_sensitive] begin[:]
call[name[warnings].warn, parameter[constant[`case_sensitive` is deprecated, `docpie` is always case insensitive]]]
variable[kwargs] assign[=] call[name[locals], parameter[]]
variable[argv] assign[=] call[name[kwargs].pop, parameter[constant[argv]]]
variable[pie] assign[=] call[name[Docpie], parameter[]]
call[name[pie].docpie, parameter[name[argv]]]
return[name[pie]] | keyword[def] identifier[docpie] ( identifier[doc] , identifier[argv] = keyword[None] , identifier[help] = keyword[True] , identifier[version] = keyword[None] ,
identifier[stdopt] = keyword[True] , identifier[attachopt] = keyword[True] , identifier[attachvalue] = keyword[True] ,
identifier[helpstyle] = literal[string] ,
identifier[auto2dashes] = keyword[True] , identifier[name] = keyword[None] , identifier[case_sensitive] = keyword[False] ,
identifier[optionsfirst] = keyword[False] , identifier[appearedonly] = keyword[False] , identifier[namedoptions] = keyword[False] ,
identifier[extra] = keyword[None] ):
literal[string]
keyword[if] identifier[case_sensitive] :
identifier[warnings] . identifier[warn] ( literal[string]
literal[string] )
identifier[kwargs] = identifier[locals] ()
identifier[argv] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[pie] = identifier[Docpie] (** identifier[kwargs] )
identifier[pie] . identifier[docpie] ( identifier[argv] )
keyword[return] identifier[pie] | def docpie(doc, argv=None, help=True, version=None, stdopt=True, attachopt=True, attachvalue=True, helpstyle='python', auto2dashes=True, name=None, case_sensitive=False, optionsfirst=False, appearedonly=False, namedoptions=False, extra=None):
"""
Parse `argv` based on command-line interface described in `doc`.
`docpie` creates your command-line interface based on its
description that you pass as `doc`. Such description can contain
--options, <positional-argument>, commands, which could be
[optional], (required), (mutually | exclusive) or repeated...
Parameters
----------
doc : str
Description of your command-line interface.
argv : list of str, optional
Argument vector to be parsed. sys.argv is used if not
provided.
help : bool (default: True)
Set to False to disable automatic help on -h or --help
options.
version : any object but None
If passed, the object will be printed if --version is in
`argv`.
stdopt : bool (default: True)
When it's True, long flag should only starts with --
attachopt: bool (default: True)
write/pass several short flag into one, e.g. -abc can mean -a -b -c.
This only works when stdopt=True
attachvalue: bool (default: True)
allow you to write short flag and its value together,
e.g. -abc can mean -a bc
auto2dashes: bool (default: True)
automaticly handle -- (which means "end of command line flag")
name: str (default: None)
the "name" of your program. In each of your "usage" the "name" will be
ignored. By default docpie will ignore the first element of your
"usage".
case_sensitive: bool (deprecated / default: False)
specifies if it need case sensitive when matching
"Usage:" and "Options:"
optionsfirst: bool (default: False)
everything after first positional argument will be interpreted as
positional argument
appearedonly: bool (default: False)
when set True, the options that never appear in argv will not
be put in result. Note this only affect options
extra: dict
customize pre-handled options. See
http://docpie.comes.today/document/advanced-apis/
for more infomation.
Returns
-------
args : dict
A dictionary, where keys are names of command-line elements
such as e.g. "--verbose" and "<path>", and values are the
parsed values of those elements.
Example
-------
>>> from docpie import docpie
>>> doc = '''
... Usage:
... my_program tcp <host> <port> [--timeout=<seconds>]
... my_program serial <port> [--baud=<n>] [--timeout=<seconds>]
... my_program (-h | --help | --version)
...
... Options:
... -h, --help Show this screen and exit.
... --baud=<n> Baudrate [default: 9600]
... '''
>>> argv = ['my_program', 'tcp', '127.0.0.1', '80', '--timeout', '30']
>>> docpie(doc, argv)
{
'--': False,
'-h': False,
'--baud': '9600',
'--help': False,
'--timeout': '30',
'--version': False,
'<host>': '127.0.0.1',
'<port>': '80',
'serial': False,
'tcp': True}
See also
--------
* Full documentation is available in README.md as well as online
at http://docpie.comes.today/document/quick-start/
"""
if case_sensitive:
warnings.warn('`case_sensitive` is deprecated, `docpie` is always case insensitive') # depends on [control=['if'], data=[]]
kwargs = locals()
argv = kwargs.pop('argv')
pie = Docpie(**kwargs)
pie.docpie(argv)
return pie |
def describe_all(self, refresh=True):
""" Describe all tables in the connected region """
tables = self.connection.list_tables()
descs = []
for tablename in tables:
descs.append(self.describe(tablename, refresh))
return descs | def function[describe_all, parameter[self, refresh]]:
constant[ Describe all tables in the connected region ]
variable[tables] assign[=] call[name[self].connection.list_tables, parameter[]]
variable[descs] assign[=] list[[]]
for taget[name[tablename]] in starred[name[tables]] begin[:]
call[name[descs].append, parameter[call[name[self].describe, parameter[name[tablename], name[refresh]]]]]
return[name[descs]] | keyword[def] identifier[describe_all] ( identifier[self] , identifier[refresh] = keyword[True] ):
literal[string]
identifier[tables] = identifier[self] . identifier[connection] . identifier[list_tables] ()
identifier[descs] =[]
keyword[for] identifier[tablename] keyword[in] identifier[tables] :
identifier[descs] . identifier[append] ( identifier[self] . identifier[describe] ( identifier[tablename] , identifier[refresh] ))
keyword[return] identifier[descs] | def describe_all(self, refresh=True):
""" Describe all tables in the connected region """
tables = self.connection.list_tables()
descs = []
for tablename in tables:
descs.append(self.describe(tablename, refresh)) # depends on [control=['for'], data=['tablename']]
return descs |
def _series_lsm(self):
"""Return main and thumbnail series in LSM file."""
lsmi = self.lsm_metadata
axes = TIFF.CZ_LSMINFO_SCANTYPE[lsmi['ScanType']]
if self.pages[0].photometric == 2: # RGB; more than one channel
axes = axes.replace('C', '').replace('XY', 'XYC')
if lsmi.get('DimensionP', 0) > 1:
axes += 'P'
if lsmi.get('DimensionM', 0) > 1:
axes += 'M'
axes = axes[::-1]
shape = tuple(int(lsmi[TIFF.CZ_LSMINFO_DIMENSIONS[i]]) for i in axes)
name = lsmi.get('Name', '')
pages = self.pages._getlist(slice(0, None, 2), validate=False)
dtype = pages[0].dtype
series = [TiffPageSeries(pages, shape, dtype, axes, name=name,
kind='LSM')]
if self.pages[1].is_reduced:
pages = self.pages._getlist(slice(1, None, 2), validate=False)
dtype = pages[0].dtype
cp = 1
i = 0
while cp < len(pages) and i < len(shape)-2:
cp *= shape[i]
i += 1
shape = shape[:i] + pages[0].shape
axes = axes[:i] + 'CYX'
series.append(TiffPageSeries(pages, shape, dtype, axes, name=name,
kind='LSMreduced'))
self.is_uniform = False
return series | def function[_series_lsm, parameter[self]]:
constant[Return main and thumbnail series in LSM file.]
variable[lsmi] assign[=] name[self].lsm_metadata
variable[axes] assign[=] call[name[TIFF].CZ_LSMINFO_SCANTYPE][call[name[lsmi]][constant[ScanType]]]
if compare[call[name[self].pages][constant[0]].photometric equal[==] constant[2]] begin[:]
variable[axes] assign[=] call[call[name[axes].replace, parameter[constant[C], constant[]]].replace, parameter[constant[XY], constant[XYC]]]
if compare[call[name[lsmi].get, parameter[constant[DimensionP], constant[0]]] greater[>] constant[1]] begin[:]
<ast.AugAssign object at 0x7da1b185c460>
if compare[call[name[lsmi].get, parameter[constant[DimensionM], constant[0]]] greater[>] constant[1]] begin[:]
<ast.AugAssign object at 0x7da1b185c670>
variable[axes] assign[=] call[name[axes]][<ast.Slice object at 0x7da1b185c7c0>]
variable[shape] assign[=] call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da1b185c910>]]
variable[name] assign[=] call[name[lsmi].get, parameter[constant[Name], constant[]]]
variable[pages] assign[=] call[name[self].pages._getlist, parameter[call[name[slice], parameter[constant[0], constant[None], constant[2]]]]]
variable[dtype] assign[=] call[name[pages]][constant[0]].dtype
variable[series] assign[=] list[[<ast.Call object at 0x7da1b18e51b0>]]
if call[name[self].pages][constant[1]].is_reduced begin[:]
variable[pages] assign[=] call[name[self].pages._getlist, parameter[call[name[slice], parameter[constant[1], constant[None], constant[2]]]]]
variable[dtype] assign[=] call[name[pages]][constant[0]].dtype
variable[cp] assign[=] constant[1]
variable[i] assign[=] constant[0]
while <ast.BoolOp object at 0x7da1b18e5990> begin[:]
<ast.AugAssign object at 0x7da1b18e6500>
<ast.AugAssign object at 0x7da1b18e65f0>
variable[shape] assign[=] binary_operation[call[name[shape]][<ast.Slice object at 0x7da1b18e6770>] + call[name[pages]][constant[0]].shape]
variable[axes] assign[=] binary_operation[call[name[axes]][<ast.Slice object at 0x7da1b18e6980>] + constant[CYX]]
call[name[series].append, parameter[call[name[TiffPageSeries], parameter[name[pages], name[shape], name[dtype], name[axes]]]]]
name[self].is_uniform assign[=] constant[False]
return[name[series]] | keyword[def] identifier[_series_lsm] ( identifier[self] ):
literal[string]
identifier[lsmi] = identifier[self] . identifier[lsm_metadata]
identifier[axes] = identifier[TIFF] . identifier[CZ_LSMINFO_SCANTYPE] [ identifier[lsmi] [ literal[string] ]]
keyword[if] identifier[self] . identifier[pages] [ literal[int] ]. identifier[photometric] == literal[int] :
identifier[axes] = identifier[axes] . identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] )
keyword[if] identifier[lsmi] . identifier[get] ( literal[string] , literal[int] )> literal[int] :
identifier[axes] += literal[string]
keyword[if] identifier[lsmi] . identifier[get] ( literal[string] , literal[int] )> literal[int] :
identifier[axes] += literal[string]
identifier[axes] = identifier[axes] [::- literal[int] ]
identifier[shape] = identifier[tuple] ( identifier[int] ( identifier[lsmi] [ identifier[TIFF] . identifier[CZ_LSMINFO_DIMENSIONS] [ identifier[i] ]]) keyword[for] identifier[i] keyword[in] identifier[axes] )
identifier[name] = identifier[lsmi] . identifier[get] ( literal[string] , literal[string] )
identifier[pages] = identifier[self] . identifier[pages] . identifier[_getlist] ( identifier[slice] ( literal[int] , keyword[None] , literal[int] ), identifier[validate] = keyword[False] )
identifier[dtype] = identifier[pages] [ literal[int] ]. identifier[dtype]
identifier[series] =[ identifier[TiffPageSeries] ( identifier[pages] , identifier[shape] , identifier[dtype] , identifier[axes] , identifier[name] = identifier[name] ,
identifier[kind] = literal[string] )]
keyword[if] identifier[self] . identifier[pages] [ literal[int] ]. identifier[is_reduced] :
identifier[pages] = identifier[self] . identifier[pages] . identifier[_getlist] ( identifier[slice] ( literal[int] , keyword[None] , literal[int] ), identifier[validate] = keyword[False] )
identifier[dtype] = identifier[pages] [ literal[int] ]. identifier[dtype]
identifier[cp] = literal[int]
identifier[i] = literal[int]
keyword[while] identifier[cp] < identifier[len] ( identifier[pages] ) keyword[and] identifier[i] < identifier[len] ( identifier[shape] )- literal[int] :
identifier[cp] *= identifier[shape] [ identifier[i] ]
identifier[i] += literal[int]
identifier[shape] = identifier[shape] [: identifier[i] ]+ identifier[pages] [ literal[int] ]. identifier[shape]
identifier[axes] = identifier[axes] [: identifier[i] ]+ literal[string]
identifier[series] . identifier[append] ( identifier[TiffPageSeries] ( identifier[pages] , identifier[shape] , identifier[dtype] , identifier[axes] , identifier[name] = identifier[name] ,
identifier[kind] = literal[string] ))
identifier[self] . identifier[is_uniform] = keyword[False]
keyword[return] identifier[series] | def _series_lsm(self):
"""Return main and thumbnail series in LSM file."""
lsmi = self.lsm_metadata
axes = TIFF.CZ_LSMINFO_SCANTYPE[lsmi['ScanType']]
if self.pages[0].photometric == 2: # RGB; more than one channel
axes = axes.replace('C', '').replace('XY', 'XYC') # depends on [control=['if'], data=[]]
if lsmi.get('DimensionP', 0) > 1:
axes += 'P' # depends on [control=['if'], data=[]]
if lsmi.get('DimensionM', 0) > 1:
axes += 'M' # depends on [control=['if'], data=[]]
axes = axes[::-1]
shape = tuple((int(lsmi[TIFF.CZ_LSMINFO_DIMENSIONS[i]]) for i in axes))
name = lsmi.get('Name', '')
pages = self.pages._getlist(slice(0, None, 2), validate=False)
dtype = pages[0].dtype
series = [TiffPageSeries(pages, shape, dtype, axes, name=name, kind='LSM')]
if self.pages[1].is_reduced:
pages = self.pages._getlist(slice(1, None, 2), validate=False)
dtype = pages[0].dtype
cp = 1
i = 0
while cp < len(pages) and i < len(shape) - 2:
cp *= shape[i]
i += 1 # depends on [control=['while'], data=[]]
shape = shape[:i] + pages[0].shape
axes = axes[:i] + 'CYX'
series.append(TiffPageSeries(pages, shape, dtype, axes, name=name, kind='LSMreduced')) # depends on [control=['if'], data=[]]
self.is_uniform = False
return series |
def stream_filesystem_node(path,
recursive=False,
patterns='**',
chunk_size=default_chunk_size):
"""Gets a buffered generator for streaming either files or directories.
Returns a buffered generator which encodes the file or directory at the
given path as :mimetype:`multipart/form-data` with the corresponding
headers.
Parameters
----------
path : str
The filepath of the directory or file to stream
recursive : bool
Stream all content within the directory recursively?
patterns : str | list
Single *glob* pattern or list of *glob* patterns and compiled
regular expressions to match the names of the filepaths to keep
chunk_size : int
Maximum size of each stream chunk
"""
is_dir = isinstance(path, six.string_types) and os.path.isdir(path)
if recursive or is_dir:
return stream_directory(path, recursive, patterns, chunk_size)
else:
return stream_files(path, chunk_size) | def function[stream_filesystem_node, parameter[path, recursive, patterns, chunk_size]]:
constant[Gets a buffered generator for streaming either files or directories.
Returns a buffered generator which encodes the file or directory at the
given path as :mimetype:`multipart/form-data` with the corresponding
headers.
Parameters
----------
path : str
The filepath of the directory or file to stream
recursive : bool
Stream all content within the directory recursively?
patterns : str | list
Single *glob* pattern or list of *glob* patterns and compiled
regular expressions to match the names of the filepaths to keep
chunk_size : int
Maximum size of each stream chunk
]
variable[is_dir] assign[=] <ast.BoolOp object at 0x7da207f9b4f0>
if <ast.BoolOp object at 0x7da207f9add0> begin[:]
return[call[name[stream_directory], parameter[name[path], name[recursive], name[patterns], name[chunk_size]]]] | keyword[def] identifier[stream_filesystem_node] ( identifier[path] ,
identifier[recursive] = keyword[False] ,
identifier[patterns] = literal[string] ,
identifier[chunk_size] = identifier[default_chunk_size] ):
literal[string]
identifier[is_dir] = identifier[isinstance] ( identifier[path] , identifier[six] . identifier[string_types] ) keyword[and] identifier[os] . identifier[path] . identifier[isdir] ( identifier[path] )
keyword[if] identifier[recursive] keyword[or] identifier[is_dir] :
keyword[return] identifier[stream_directory] ( identifier[path] , identifier[recursive] , identifier[patterns] , identifier[chunk_size] )
keyword[else] :
keyword[return] identifier[stream_files] ( identifier[path] , identifier[chunk_size] ) | def stream_filesystem_node(path, recursive=False, patterns='**', chunk_size=default_chunk_size):
"""Gets a buffered generator for streaming either files or directories.
Returns a buffered generator which encodes the file or directory at the
given path as :mimetype:`multipart/form-data` with the corresponding
headers.
Parameters
----------
path : str
The filepath of the directory or file to stream
recursive : bool
Stream all content within the directory recursively?
patterns : str | list
Single *glob* pattern or list of *glob* patterns and compiled
regular expressions to match the names of the filepaths to keep
chunk_size : int
Maximum size of each stream chunk
"""
is_dir = isinstance(path, six.string_types) and os.path.isdir(path)
if recursive or is_dir:
return stream_directory(path, recursive, patterns, chunk_size) # depends on [control=['if'], data=[]]
else:
return stream_files(path, chunk_size) |
def start_element (self, tag, attrs):
"""Search for <title> tag."""
if tag == 'title':
data = self.parser.peek(MAX_TITLELEN)
data = data.decode(self.parser.encoding, "ignore")
self.title = linkname.title_name(data)
raise StopParse("found <title> tag")
elif tag == 'body':
raise StopParse("found <body> tag") | def function[start_element, parameter[self, tag, attrs]]:
constant[Search for <title> tag.]
if compare[name[tag] equal[==] constant[title]] begin[:]
variable[data] assign[=] call[name[self].parser.peek, parameter[name[MAX_TITLELEN]]]
variable[data] assign[=] call[name[data].decode, parameter[name[self].parser.encoding, constant[ignore]]]
name[self].title assign[=] call[name[linkname].title_name, parameter[name[data]]]
<ast.Raise object at 0x7da18fe90790> | keyword[def] identifier[start_element] ( identifier[self] , identifier[tag] , identifier[attrs] ):
literal[string]
keyword[if] identifier[tag] == literal[string] :
identifier[data] = identifier[self] . identifier[parser] . identifier[peek] ( identifier[MAX_TITLELEN] )
identifier[data] = identifier[data] . identifier[decode] ( identifier[self] . identifier[parser] . identifier[encoding] , literal[string] )
identifier[self] . identifier[title] = identifier[linkname] . identifier[title_name] ( identifier[data] )
keyword[raise] identifier[StopParse] ( literal[string] )
keyword[elif] identifier[tag] == literal[string] :
keyword[raise] identifier[StopParse] ( literal[string] ) | def start_element(self, tag, attrs):
"""Search for <title> tag."""
if tag == 'title':
data = self.parser.peek(MAX_TITLELEN)
data = data.decode(self.parser.encoding, 'ignore')
self.title = linkname.title_name(data)
raise StopParse('found <title> tag') # depends on [control=['if'], data=[]]
elif tag == 'body':
raise StopParse('found <body> tag') # depends on [control=['if'], data=[]] |
def load(f):
"""Load audio metadata from filepath or file-like object.
Parameters:
f (str, os.PathLike, or file-like object):
A filepath, path-like object or file-like object of an audio file.
Returns:
Format: An audio format object.
Raises:
UnsupportedFormat: If file is not of a supported format.
ValueError: If filepath/file-like object is not valid or readable.
"""
if isinstance(f, (os.PathLike, str)):
fileobj = open(f, 'rb')
else:
try:
f.read(0)
except AttributeError:
raise ValueError("Not a valid file-like object.")
except Exception:
raise ValueError("Can't read from file-like object.")
fileobj = f
parser_cls = determine_format(fileobj, os.path.splitext(fileobj.name)[1])
if parser_cls is None:
raise UnsupportedFormat("Supported format signature not found.")
else:
fileobj.seek(0, os.SEEK_SET)
return parser_cls.load(fileobj) | def function[load, parameter[f]]:
constant[Load audio metadata from filepath or file-like object.
Parameters:
f (str, os.PathLike, or file-like object):
A filepath, path-like object or file-like object of an audio file.
Returns:
Format: An audio format object.
Raises:
UnsupportedFormat: If file is not of a supported format.
ValueError: If filepath/file-like object is not valid or readable.
]
if call[name[isinstance], parameter[name[f], tuple[[<ast.Attribute object at 0x7da1b2507b50>, <ast.Name object at 0x7da1b2505450>]]]] begin[:]
variable[fileobj] assign[=] call[name[open], parameter[name[f], constant[rb]]]
variable[parser_cls] assign[=] call[name[determine_format], parameter[name[fileobj], call[call[name[os].path.splitext, parameter[name[fileobj].name]]][constant[1]]]]
if compare[name[parser_cls] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b2507d90>
return[call[name[parser_cls].load, parameter[name[fileobj]]]] | keyword[def] identifier[load] ( identifier[f] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[f] ,( identifier[os] . identifier[PathLike] , identifier[str] )):
identifier[fileobj] = identifier[open] ( identifier[f] , literal[string] )
keyword[else] :
keyword[try] :
identifier[f] . identifier[read] ( literal[int] )
keyword[except] identifier[AttributeError] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[except] identifier[Exception] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[fileobj] = identifier[f]
identifier[parser_cls] = identifier[determine_format] ( identifier[fileobj] , identifier[os] . identifier[path] . identifier[splitext] ( identifier[fileobj] . identifier[name] )[ literal[int] ])
keyword[if] identifier[parser_cls] keyword[is] keyword[None] :
keyword[raise] identifier[UnsupportedFormat] ( literal[string] )
keyword[else] :
identifier[fileobj] . identifier[seek] ( literal[int] , identifier[os] . identifier[SEEK_SET] )
keyword[return] identifier[parser_cls] . identifier[load] ( identifier[fileobj] ) | def load(f):
"""Load audio metadata from filepath or file-like object.
Parameters:
f (str, os.PathLike, or file-like object):
A filepath, path-like object or file-like object of an audio file.
Returns:
Format: An audio format object.
Raises:
UnsupportedFormat: If file is not of a supported format.
ValueError: If filepath/file-like object is not valid or readable.
"""
if isinstance(f, (os.PathLike, str)):
fileobj = open(f, 'rb') # depends on [control=['if'], data=[]]
else:
try:
f.read(0) # depends on [control=['try'], data=[]]
except AttributeError:
raise ValueError('Not a valid file-like object.') # depends on [control=['except'], data=[]]
except Exception:
raise ValueError("Can't read from file-like object.") # depends on [control=['except'], data=[]]
fileobj = f
parser_cls = determine_format(fileobj, os.path.splitext(fileobj.name)[1])
if parser_cls is None:
raise UnsupportedFormat('Supported format signature not found.') # depends on [control=['if'], data=[]]
else:
fileobj.seek(0, os.SEEK_SET)
return parser_cls.load(fileobj) |
def parse_http_header(header_line):
"""Parse an HTTP header from a string, and return an ``HttpHeader``.
``header_line`` should only contain one line.
``BadHttpHeaderError`` is raised if the string is an invalid header line.
"""
header_line = header_line.decode().strip()
col_idx = header_line.find(':')
if col_idx < 1:
raise BadHttpHeaderError('Bad header: {}'.format(repr(header_line)))
key = header_line[0:col_idx].strip()
value = header_line[(col_idx+1):].strip()
return HttpHeader(key=key, value=value) | def function[parse_http_header, parameter[header_line]]:
constant[Parse an HTTP header from a string, and return an ``HttpHeader``.
``header_line`` should only contain one line.
``BadHttpHeaderError`` is raised if the string is an invalid header line.
]
variable[header_line] assign[=] call[call[name[header_line].decode, parameter[]].strip, parameter[]]
variable[col_idx] assign[=] call[name[header_line].find, parameter[constant[:]]]
if compare[name[col_idx] less[<] constant[1]] begin[:]
<ast.Raise object at 0x7da1b09eb100>
variable[key] assign[=] call[call[name[header_line]][<ast.Slice object at 0x7da1b09eaf80>].strip, parameter[]]
variable[value] assign[=] call[call[name[header_line]][<ast.Slice object at 0x7da1b09eb2b0>].strip, parameter[]]
return[call[name[HttpHeader], parameter[]]] | keyword[def] identifier[parse_http_header] ( identifier[header_line] ):
literal[string]
identifier[header_line] = identifier[header_line] . identifier[decode] (). identifier[strip] ()
identifier[col_idx] = identifier[header_line] . identifier[find] ( literal[string] )
keyword[if] identifier[col_idx] < literal[int] :
keyword[raise] identifier[BadHttpHeaderError] ( literal[string] . identifier[format] ( identifier[repr] ( identifier[header_line] )))
identifier[key] = identifier[header_line] [ literal[int] : identifier[col_idx] ]. identifier[strip] ()
identifier[value] = identifier[header_line] [( identifier[col_idx] + literal[int] ):]. identifier[strip] ()
keyword[return] identifier[HttpHeader] ( identifier[key] = identifier[key] , identifier[value] = identifier[value] ) | def parse_http_header(header_line):
"""Parse an HTTP header from a string, and return an ``HttpHeader``.
``header_line`` should only contain one line.
``BadHttpHeaderError`` is raised if the string is an invalid header line.
"""
header_line = header_line.decode().strip()
col_idx = header_line.find(':')
if col_idx < 1:
raise BadHttpHeaderError('Bad header: {}'.format(repr(header_line))) # depends on [control=['if'], data=[]]
key = header_line[0:col_idx].strip()
value = header_line[col_idx + 1:].strip()
return HttpHeader(key=key, value=value) |
def select_params_from_section_schema(section_schema, param_class=Param,
deep=False):
"""Selects the parameters of a config section schema.
:param section_schema: Configuration file section schema to use.
:return: Generator of params
"""
# pylint: disable=invalid-name
for name, value in inspect.getmembers(section_schema):
if name.startswith("__") or value is None:
continue # pragma: no cover
elif inspect.isclass(value) and deep:
# -- CASE: class => SELF-CALL (recursively).
# pylint: disable= bad-continuation
cls = value
for name, value in select_params_from_section_schema(cls,
param_class=param_class, deep=True):
yield (name, value)
elif isinstance(value, param_class):
yield (name, value) | def function[select_params_from_section_schema, parameter[section_schema, param_class, deep]]:
constant[Selects the parameters of a config section schema.
:param section_schema: Configuration file section schema to use.
:return: Generator of params
]
for taget[tuple[[<ast.Name object at 0x7da18f58ee30>, <ast.Name object at 0x7da18f58f490>]]] in starred[call[name[inspect].getmembers, parameter[name[section_schema]]]] begin[:]
if <ast.BoolOp object at 0x7da207f996f0> begin[:]
continue | keyword[def] identifier[select_params_from_section_schema] ( identifier[section_schema] , identifier[param_class] = identifier[Param] ,
identifier[deep] = keyword[False] ):
literal[string]
keyword[for] identifier[name] , identifier[value] keyword[in] identifier[inspect] . identifier[getmembers] ( identifier[section_schema] ):
keyword[if] identifier[name] . identifier[startswith] ( literal[string] ) keyword[or] identifier[value] keyword[is] keyword[None] :
keyword[continue]
keyword[elif] identifier[inspect] . identifier[isclass] ( identifier[value] ) keyword[and] identifier[deep] :
identifier[cls] = identifier[value]
keyword[for] identifier[name] , identifier[value] keyword[in] identifier[select_params_from_section_schema] ( identifier[cls] ,
identifier[param_class] = identifier[param_class] , identifier[deep] = keyword[True] ):
keyword[yield] ( identifier[name] , identifier[value] )
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[param_class] ):
keyword[yield] ( identifier[name] , identifier[value] ) | def select_params_from_section_schema(section_schema, param_class=Param, deep=False):
"""Selects the parameters of a config section schema.
:param section_schema: Configuration file section schema to use.
:return: Generator of params
"""
# pylint: disable=invalid-name
for (name, value) in inspect.getmembers(section_schema):
if name.startswith('__') or value is None:
continue # pragma: no cover # depends on [control=['if'], data=[]]
elif inspect.isclass(value) and deep:
# -- CASE: class => SELF-CALL (recursively).
# pylint: disable= bad-continuation
cls = value
for (name, value) in select_params_from_section_schema(cls, param_class=param_class, deep=True):
yield (name, value) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(value, param_class):
yield (name, value) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] |
def event_return(events):
'''
Return events to Mongodb server
'''
conn, mdb = _get_conn(ret=None)
if isinstance(events, list):
events = events[0]
if isinstance(events, dict):
log.debug(events)
if PYMONGO_VERSION > _LooseVersion('2.3'):
mdb.events.insert_one(events.copy())
else:
mdb.events.insert(events.copy()) | def function[event_return, parameter[events]]:
constant[
Return events to Mongodb server
]
<ast.Tuple object at 0x7da18f810190> assign[=] call[name[_get_conn], parameter[]]
if call[name[isinstance], parameter[name[events], name[list]]] begin[:]
variable[events] assign[=] call[name[events]][constant[0]]
if call[name[isinstance], parameter[name[events], name[dict]]] begin[:]
call[name[log].debug, parameter[name[events]]]
if compare[name[PYMONGO_VERSION] greater[>] call[name[_LooseVersion], parameter[constant[2.3]]]] begin[:]
call[name[mdb].events.insert_one, parameter[call[name[events].copy, parameter[]]]] | keyword[def] identifier[event_return] ( identifier[events] ):
literal[string]
identifier[conn] , identifier[mdb] = identifier[_get_conn] ( identifier[ret] = keyword[None] )
keyword[if] identifier[isinstance] ( identifier[events] , identifier[list] ):
identifier[events] = identifier[events] [ literal[int] ]
keyword[if] identifier[isinstance] ( identifier[events] , identifier[dict] ):
identifier[log] . identifier[debug] ( identifier[events] )
keyword[if] identifier[PYMONGO_VERSION] > identifier[_LooseVersion] ( literal[string] ):
identifier[mdb] . identifier[events] . identifier[insert_one] ( identifier[events] . identifier[copy] ())
keyword[else] :
identifier[mdb] . identifier[events] . identifier[insert] ( identifier[events] . identifier[copy] ()) | def event_return(events):
"""
Return events to Mongodb server
"""
(conn, mdb) = _get_conn(ret=None)
if isinstance(events, list):
events = events[0] # depends on [control=['if'], data=[]]
if isinstance(events, dict):
log.debug(events)
if PYMONGO_VERSION > _LooseVersion('2.3'):
mdb.events.insert_one(events.copy()) # depends on [control=['if'], data=[]]
else:
mdb.events.insert(events.copy()) # depends on [control=['if'], data=[]] |
def insert_before(self, text):
"""
Create a new document, with this text inserted before the buffer.
It keeps selection ranges and cursor position in sync.
"""
selection_state = self.selection
if selection_state:
selection_state = SelectionState(
original_cursor_position=selection_state.original_cursor_position + len(text),
type=selection_state.type)
return Document(
text=text + self.text,
cursor_position=self.cursor_position + len(text),
selection=selection_state) | def function[insert_before, parameter[self, text]]:
constant[
Create a new document, with this text inserted before the buffer.
It keeps selection ranges and cursor position in sync.
]
variable[selection_state] assign[=] name[self].selection
if name[selection_state] begin[:]
variable[selection_state] assign[=] call[name[SelectionState], parameter[]]
return[call[name[Document], parameter[]]] | keyword[def] identifier[insert_before] ( identifier[self] , identifier[text] ):
literal[string]
identifier[selection_state] = identifier[self] . identifier[selection]
keyword[if] identifier[selection_state] :
identifier[selection_state] = identifier[SelectionState] (
identifier[original_cursor_position] = identifier[selection_state] . identifier[original_cursor_position] + identifier[len] ( identifier[text] ),
identifier[type] = identifier[selection_state] . identifier[type] )
keyword[return] identifier[Document] (
identifier[text] = identifier[text] + identifier[self] . identifier[text] ,
identifier[cursor_position] = identifier[self] . identifier[cursor_position] + identifier[len] ( identifier[text] ),
identifier[selection] = identifier[selection_state] ) | def insert_before(self, text):
"""
Create a new document, with this text inserted before the buffer.
It keeps selection ranges and cursor position in sync.
"""
selection_state = self.selection
if selection_state:
selection_state = SelectionState(original_cursor_position=selection_state.original_cursor_position + len(text), type=selection_state.type) # depends on [control=['if'], data=[]]
return Document(text=text + self.text, cursor_position=self.cursor_position + len(text), selection=selection_state) |
def get_file_hexdigest(filename, blocksize=1024*1024*10):
'''Get a hex digest of a file.'''
if hashlib.__name__ == 'hashlib':
m = hashlib.md5() # new - 'hashlib' module
else:
m = hashlib.new() # old - 'md5' module - remove once py2.4 gone
fd = open(filename, 'r')
while True:
data = fd.read(blocksize)
if len(data) == 0:
break
m.update(data)
fd.close()
return m.hexdigest() | def function[get_file_hexdigest, parameter[filename, blocksize]]:
constant[Get a hex digest of a file.]
if compare[name[hashlib].__name__ equal[==] constant[hashlib]] begin[:]
variable[m] assign[=] call[name[hashlib].md5, parameter[]]
variable[fd] assign[=] call[name[open], parameter[name[filename], constant[r]]]
while constant[True] begin[:]
variable[data] assign[=] call[name[fd].read, parameter[name[blocksize]]]
if compare[call[name[len], parameter[name[data]]] equal[==] constant[0]] begin[:]
break
call[name[m].update, parameter[name[data]]]
call[name[fd].close, parameter[]]
return[call[name[m].hexdigest, parameter[]]] | keyword[def] identifier[get_file_hexdigest] ( identifier[filename] , identifier[blocksize] = literal[int] * literal[int] * literal[int] ):
literal[string]
keyword[if] identifier[hashlib] . identifier[__name__] == literal[string] :
identifier[m] = identifier[hashlib] . identifier[md5] ()
keyword[else] :
identifier[m] = identifier[hashlib] . identifier[new] ()
identifier[fd] = identifier[open] ( identifier[filename] , literal[string] )
keyword[while] keyword[True] :
identifier[data] = identifier[fd] . identifier[read] ( identifier[blocksize] )
keyword[if] identifier[len] ( identifier[data] )== literal[int] :
keyword[break]
identifier[m] . identifier[update] ( identifier[data] )
identifier[fd] . identifier[close] ()
keyword[return] identifier[m] . identifier[hexdigest] () | def get_file_hexdigest(filename, blocksize=1024 * 1024 * 10):
"""Get a hex digest of a file."""
if hashlib.__name__ == 'hashlib':
m = hashlib.md5() # new - 'hashlib' module # depends on [control=['if'], data=[]]
else:
m = hashlib.new() # old - 'md5' module - remove once py2.4 gone
fd = open(filename, 'r')
while True:
data = fd.read(blocksize)
if len(data) == 0:
break # depends on [control=['if'], data=[]]
m.update(data) # depends on [control=['while'], data=[]]
fd.close()
return m.hexdigest() |
def frequencyRedefinition(CellChannelDescription_presence=0):
"""Frequency redefinition Section 9.1.13"""
a = TpPd(pd=0x6)
b = MessageType(mesType=0x14) # 00010100
c = ChannelDescription()
d = MobileAllocation()
e = StartingTime()
packet = a / b / c / d / e
if CellChannelDescription_presence is 1:
f = CellChannelDescriptionHdr(ieiCCD=0x62, eightBitCCD=0x0)
packet = packet / f
return packet | def function[frequencyRedefinition, parameter[CellChannelDescription_presence]]:
constant[Frequency redefinition Section 9.1.13]
variable[a] assign[=] call[name[TpPd], parameter[]]
variable[b] assign[=] call[name[MessageType], parameter[]]
variable[c] assign[=] call[name[ChannelDescription], parameter[]]
variable[d] assign[=] call[name[MobileAllocation], parameter[]]
variable[e] assign[=] call[name[StartingTime], parameter[]]
variable[packet] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[a] / name[b]] / name[c]] / name[d]] / name[e]]
if compare[name[CellChannelDescription_presence] is constant[1]] begin[:]
variable[f] assign[=] call[name[CellChannelDescriptionHdr], parameter[]]
variable[packet] assign[=] binary_operation[name[packet] / name[f]]
return[name[packet]] | keyword[def] identifier[frequencyRedefinition] ( identifier[CellChannelDescription_presence] = literal[int] ):
literal[string]
identifier[a] = identifier[TpPd] ( identifier[pd] = literal[int] )
identifier[b] = identifier[MessageType] ( identifier[mesType] = literal[int] )
identifier[c] = identifier[ChannelDescription] ()
identifier[d] = identifier[MobileAllocation] ()
identifier[e] = identifier[StartingTime] ()
identifier[packet] = identifier[a] / identifier[b] / identifier[c] / identifier[d] / identifier[e]
keyword[if] identifier[CellChannelDescription_presence] keyword[is] literal[int] :
identifier[f] = identifier[CellChannelDescriptionHdr] ( identifier[ieiCCD] = literal[int] , identifier[eightBitCCD] = literal[int] )
identifier[packet] = identifier[packet] / identifier[f]
keyword[return] identifier[packet] | def frequencyRedefinition(CellChannelDescription_presence=0):
"""Frequency redefinition Section 9.1.13"""
a = TpPd(pd=6)
b = MessageType(mesType=20) # 00010100
c = ChannelDescription()
d = MobileAllocation()
e = StartingTime()
packet = a / b / c / d / e
if CellChannelDescription_presence is 1:
f = CellChannelDescriptionHdr(ieiCCD=98, eightBitCCD=0)
packet = packet / f # depends on [control=['if'], data=[]]
return packet |
def genKw(w,msk,z):
"""
Generates key Kw using key-selector @w, master secret key @msk, and
table value @z.
@returns Kw as a BigInt.
"""
# Hash inputs into a string of bytes
b = hmac(msk, z + w, tag="TAG_PYTHIA_KW")
# Convert the string into a long value (no larger than the order of Gt),
# then return a BigInt value.
return BigInt(longFromString(b) % long(orderGt())) | def function[genKw, parameter[w, msk, z]]:
constant[
Generates key Kw using key-selector @w, master secret key @msk, and
table value @z.
@returns Kw as a BigInt.
]
variable[b] assign[=] call[name[hmac], parameter[name[msk], binary_operation[name[z] + name[w]]]]
return[call[name[BigInt], parameter[binary_operation[call[name[longFromString], parameter[name[b]]] <ast.Mod object at 0x7da2590d6920> call[name[long], parameter[call[name[orderGt], parameter[]]]]]]]] | keyword[def] identifier[genKw] ( identifier[w] , identifier[msk] , identifier[z] ):
literal[string]
identifier[b] = identifier[hmac] ( identifier[msk] , identifier[z] + identifier[w] , identifier[tag] = literal[string] )
keyword[return] identifier[BigInt] ( identifier[longFromString] ( identifier[b] )% identifier[long] ( identifier[orderGt] ())) | def genKw(w, msk, z):
"""
Generates key Kw using key-selector @w, master secret key @msk, and
table value @z.
@returns Kw as a BigInt.
"""
# Hash inputs into a string of bytes
b = hmac(msk, z + w, tag='TAG_PYTHIA_KW')
# Convert the string into a long value (no larger than the order of Gt),
# then return a BigInt value.
return BigInt(longFromString(b) % long(orderGt())) |
def list_tables(self, libref, results: str = 'list'):
"""
This method returns a list of tuples containing MEMNAME, MEMTYPE of members in the library of memtype data or view
If you would like a Pandas dataframe returned instead of a list, specify results='pandas'
"""
if not self.nosub:
ll = self.submit("%put LIBREF_EXISTS=%sysfunc(libref("+libref+"));")
exists = ll['LOG'].rpartition('LIBREF_EXISTS=')[2].split('\n')[0]
if exists != '0':
print('Libref provided is not assigned')
return None
code = """
proc datasets dd=librefx nodetails nolist noprint;
contents memtype=(data view) nodetails
dir out=work._saspy_lib_list(keep=memname memtype) data=_all_ noprint;
run;
proc sql;
create table work._saspy_lib_list as select distinct * from work._saspy_lib_list;
quit;
""".replace('librefx', libref)
if self.nosub:
print(code)
return None
else:
ll = self.submit(code, results='text')
if results != 'list':
res = self.sd2df('_saspy_lib_list', 'work')
return res
code = """
data _null_;
set work._saspy_lib_list end=last curobs=first;
if first EQ 1 then
put 'MEMSTART';
put 'MEMNAME=' memname;
put 'MEMTYPE=' memtype;
if last then
put 'MEMEND';
run;
"""
ll = self.submit(code, results='text')
res = []
log = ll['LOG'].rpartition('MEMEND')[0].rpartition('MEMSTART')
for i in range(log[2].count('MEMNAME')):
log = log[2].partition('MEMNAME=')[2].partition('\n')
key = log[0]
log = log[2].partition('MEMTYPE=')[2].partition('\n')
val = log[0]
res.append(tuple((key, val)))
return res | def function[list_tables, parameter[self, libref, results]]:
constant[
This method returns a list of tuples containing MEMNAME, MEMTYPE of members in the library of memtype data or view
If you would like a Pandas dataframe returned instead of a list, specify results='pandas'
]
if <ast.UnaryOp object at 0x7da18f812200> begin[:]
variable[ll] assign[=] call[name[self].submit, parameter[binary_operation[binary_operation[constant[%put LIBREF_EXISTS=%sysfunc(libref(] + name[libref]] + constant[));]]]]
variable[exists] assign[=] call[call[call[call[call[name[ll]][constant[LOG]].rpartition, parameter[constant[LIBREF_EXISTS=]]]][constant[2]].split, parameter[constant[
]]]][constant[0]]
if compare[name[exists] not_equal[!=] constant[0]] begin[:]
call[name[print], parameter[constant[Libref provided is not assigned]]]
return[constant[None]]
variable[code] assign[=] call[constant[
proc datasets dd=librefx nodetails nolist noprint;
contents memtype=(data view) nodetails
dir out=work._saspy_lib_list(keep=memname memtype) data=_all_ noprint;
run;
proc sql;
create table work._saspy_lib_list as select distinct * from work._saspy_lib_list;
quit;
].replace, parameter[constant[librefx], name[libref]]]
if name[self].nosub begin[:]
call[name[print], parameter[name[code]]]
return[constant[None]]
if compare[name[results] not_equal[!=] constant[list]] begin[:]
variable[res] assign[=] call[name[self].sd2df, parameter[constant[_saspy_lib_list], constant[work]]]
return[name[res]]
variable[code] assign[=] constant[
data _null_;
set work._saspy_lib_list end=last curobs=first;
if first EQ 1 then
put 'MEMSTART';
put 'MEMNAME=' memname;
put 'MEMTYPE=' memtype;
if last then
put 'MEMEND';
run;
]
variable[ll] assign[=] call[name[self].submit, parameter[name[code]]]
variable[res] assign[=] list[[]]
variable[log] assign[=] call[call[call[call[name[ll]][constant[LOG]].rpartition, parameter[constant[MEMEND]]]][constant[0]].rpartition, parameter[constant[MEMSTART]]]
for taget[name[i]] in starred[call[name[range], parameter[call[call[name[log]][constant[2]].count, parameter[constant[MEMNAME]]]]]] begin[:]
variable[log] assign[=] call[call[call[call[name[log]][constant[2]].partition, parameter[constant[MEMNAME=]]]][constant[2]].partition, parameter[constant[
]]]
variable[key] assign[=] call[name[log]][constant[0]]
variable[log] assign[=] call[call[call[call[name[log]][constant[2]].partition, parameter[constant[MEMTYPE=]]]][constant[2]].partition, parameter[constant[
]]]
variable[val] assign[=] call[name[log]][constant[0]]
call[name[res].append, parameter[call[name[tuple], parameter[tuple[[<ast.Name object at 0x7da18f810700>, <ast.Name object at 0x7da18f812710>]]]]]]
return[name[res]] | keyword[def] identifier[list_tables] ( identifier[self] , identifier[libref] , identifier[results] : identifier[str] = literal[string] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[nosub] :
identifier[ll] = identifier[self] . identifier[submit] ( literal[string] + identifier[libref] + literal[string] )
identifier[exists] = identifier[ll] [ literal[string] ]. identifier[rpartition] ( literal[string] )[ literal[int] ]. identifier[split] ( literal[string] )[ literal[int] ]
keyword[if] identifier[exists] != literal[string] :
identifier[print] ( literal[string] )
keyword[return] keyword[None]
identifier[code] = literal[string] . identifier[replace] ( literal[string] , identifier[libref] )
keyword[if] identifier[self] . identifier[nosub] :
identifier[print] ( identifier[code] )
keyword[return] keyword[None]
keyword[else] :
identifier[ll] = identifier[self] . identifier[submit] ( identifier[code] , identifier[results] = literal[string] )
keyword[if] identifier[results] != literal[string] :
identifier[res] = identifier[self] . identifier[sd2df] ( literal[string] , literal[string] )
keyword[return] identifier[res]
identifier[code] = literal[string]
identifier[ll] = identifier[self] . identifier[submit] ( identifier[code] , identifier[results] = literal[string] )
identifier[res] =[]
identifier[log] = identifier[ll] [ literal[string] ]. identifier[rpartition] ( literal[string] )[ literal[int] ]. identifier[rpartition] ( literal[string] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[log] [ literal[int] ]. identifier[count] ( literal[string] )):
identifier[log] = identifier[log] [ literal[int] ]. identifier[partition] ( literal[string] )[ literal[int] ]. identifier[partition] ( literal[string] )
identifier[key] = identifier[log] [ literal[int] ]
identifier[log] = identifier[log] [ literal[int] ]. identifier[partition] ( literal[string] )[ literal[int] ]. identifier[partition] ( literal[string] )
identifier[val] = identifier[log] [ literal[int] ]
identifier[res] . identifier[append] ( identifier[tuple] (( identifier[key] , identifier[val] )))
keyword[return] identifier[res] | def list_tables(self, libref, results: str='list'):
"""
This method returns a list of tuples containing MEMNAME, MEMTYPE of members in the library of memtype data or view
If you would like a Pandas dataframe returned instead of a list, specify results='pandas'
"""
if not self.nosub:
ll = self.submit('%put LIBREF_EXISTS=%sysfunc(libref(' + libref + '));')
exists = ll['LOG'].rpartition('LIBREF_EXISTS=')[2].split('\n')[0]
if exists != '0':
print('Libref provided is not assigned')
return None # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
code = '\n proc datasets dd=librefx nodetails nolist noprint;\n contents memtype=(data view) nodetails \n dir out=work._saspy_lib_list(keep=memname memtype) data=_all_ noprint;\n run;\n \n proc sql;\n create table work._saspy_lib_list as select distinct * from work._saspy_lib_list;\n quit;\n '.replace('librefx', libref)
if self.nosub:
print(code)
return None # depends on [control=['if'], data=[]]
else:
ll = self.submit(code, results='text')
if results != 'list':
res = self.sd2df('_saspy_lib_list', 'work')
return res # depends on [control=['if'], data=[]]
code = "\n data _null_;\n set work._saspy_lib_list end=last curobs=first;\n if first EQ 1 then\n put 'MEMSTART';\n put 'MEMNAME=' memname;\n put 'MEMTYPE=' memtype;\n if last then\n put 'MEMEND';\n run;\n "
ll = self.submit(code, results='text')
res = []
log = ll['LOG'].rpartition('MEMEND')[0].rpartition('MEMSTART')
for i in range(log[2].count('MEMNAME')):
log = log[2].partition('MEMNAME=')[2].partition('\n')
key = log[0]
log = log[2].partition('MEMTYPE=')[2].partition('\n')
val = log[0]
res.append(tuple((key, val))) # depends on [control=['for'], data=[]]
return res |
def load_plugins(self, plugin_path):
"""
Loads plugins from modules in plugin_path. Looks for the config_name property
in each object that's found. If so, adds that to the dictionary with the
config_name as the key. config_name should be unique between different plugins.
:param plugin_path: Path to load plugins from
:return: dictionary of plugins by config_name
"""
self.logger.debug('Loading plugins from {0}'.format(plugin_path))
plugins = {}
plugin_dir = os.path.realpath(plugin_path)
sys.path.append(plugin_dir)
for f in os.listdir(plugin_dir):
if f.endswith(".py"):
name = f[:-3]
elif f.endswith(".pyc"):
name = f[:-4]
# Possible support for plugins inside directories - worth doing?
# elif os.path.isdir(os.path.join(plugin_dir, f)):
# name = f
else:
continue
try:
self.logger.debug('Adding plugin from: {0}'.format(f))
mod = __import__(name, globals(), locals(), [], 0)
for plugin_class in inspect.getmembers(mod):
if plugin_class[0][0:2] == '__': # Skip dunder members - builtins, etc
continue
if hasattr(plugin_class[1], 'config_name'):
if plugin_class[1].config_name is not None:
# Skip plugins where config_name is None, like the base classes
plugins[plugin_class[1].config_name] = plugin_class[1]
self.logger.debug('Added plugin: {0}'.format(plugin_class[1].config_name))
# Todo: Add error checking here. If a plugin with that name already exists,
# log an error. Quit or continue?
except ImportError as e:
self.logger.error(e)
pass # problem importing
self.logger.debug('Done loading plugins')
return plugins | def function[load_plugins, parameter[self, plugin_path]]:
constant[
Loads plugins from modules in plugin_path. Looks for the config_name property
in each object that's found. If so, adds that to the dictionary with the
config_name as the key. config_name should be unique between different plugins.
:param plugin_path: Path to load plugins from
:return: dictionary of plugins by config_name
]
call[name[self].logger.debug, parameter[call[constant[Loading plugins from {0}].format, parameter[name[plugin_path]]]]]
variable[plugins] assign[=] dictionary[[], []]
variable[plugin_dir] assign[=] call[name[os].path.realpath, parameter[name[plugin_path]]]
call[name[sys].path.append, parameter[name[plugin_dir]]]
for taget[name[f]] in starred[call[name[os].listdir, parameter[name[plugin_dir]]]] begin[:]
if call[name[f].endswith, parameter[constant[.py]]] begin[:]
variable[name] assign[=] call[name[f]][<ast.Slice object at 0x7da1b162b670>]
<ast.Try object at 0x7da1b162b340>
call[name[self].logger.debug, parameter[constant[Done loading plugins]]]
return[name[plugins]] | keyword[def] identifier[load_plugins] ( identifier[self] , identifier[plugin_path] ):
literal[string]
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[plugin_path] ))
identifier[plugins] ={}
identifier[plugin_dir] = identifier[os] . identifier[path] . identifier[realpath] ( identifier[plugin_path] )
identifier[sys] . identifier[path] . identifier[append] ( identifier[plugin_dir] )
keyword[for] identifier[f] keyword[in] identifier[os] . identifier[listdir] ( identifier[plugin_dir] ):
keyword[if] identifier[f] . identifier[endswith] ( literal[string] ):
identifier[name] = identifier[f] [:- literal[int] ]
keyword[elif] identifier[f] . identifier[endswith] ( literal[string] ):
identifier[name] = identifier[f] [:- literal[int] ]
keyword[else] :
keyword[continue]
keyword[try] :
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[f] ))
identifier[mod] = identifier[__import__] ( identifier[name] , identifier[globals] (), identifier[locals] (),[], literal[int] )
keyword[for] identifier[plugin_class] keyword[in] identifier[inspect] . identifier[getmembers] ( identifier[mod] ):
keyword[if] identifier[plugin_class] [ literal[int] ][ literal[int] : literal[int] ]== literal[string] :
keyword[continue]
keyword[if] identifier[hasattr] ( identifier[plugin_class] [ literal[int] ], literal[string] ):
keyword[if] identifier[plugin_class] [ literal[int] ]. identifier[config_name] keyword[is] keyword[not] keyword[None] :
identifier[plugins] [ identifier[plugin_class] [ literal[int] ]. identifier[config_name] ]= identifier[plugin_class] [ literal[int] ]
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[plugin_class] [ literal[int] ]. identifier[config_name] ))
keyword[except] identifier[ImportError] keyword[as] identifier[e] :
identifier[self] . identifier[logger] . identifier[error] ( identifier[e] )
keyword[pass]
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] )
keyword[return] identifier[plugins] | def load_plugins(self, plugin_path):
"""
Loads plugins from modules in plugin_path. Looks for the config_name property
in each object that's found. If so, adds that to the dictionary with the
config_name as the key. config_name should be unique between different plugins.
:param plugin_path: Path to load plugins from
:return: dictionary of plugins by config_name
"""
self.logger.debug('Loading plugins from {0}'.format(plugin_path))
plugins = {}
plugin_dir = os.path.realpath(plugin_path)
sys.path.append(plugin_dir)
for f in os.listdir(plugin_dir):
if f.endswith('.py'):
name = f[:-3] # depends on [control=['if'], data=[]]
elif f.endswith('.pyc'):
name = f[:-4] # depends on [control=['if'], data=[]]
else:
# Possible support for plugins inside directories - worth doing?
# elif os.path.isdir(os.path.join(plugin_dir, f)):
# name = f
continue
try:
self.logger.debug('Adding plugin from: {0}'.format(f))
mod = __import__(name, globals(), locals(), [], 0)
for plugin_class in inspect.getmembers(mod):
if plugin_class[0][0:2] == '__': # Skip dunder members - builtins, etc
continue # depends on [control=['if'], data=[]]
if hasattr(plugin_class[1], 'config_name'):
if plugin_class[1].config_name is not None:
# Skip plugins where config_name is None, like the base classes
plugins[plugin_class[1].config_name] = plugin_class[1]
self.logger.debug('Added plugin: {0}'.format(plugin_class[1].config_name)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['plugin_class']] # depends on [control=['try'], data=[]]
# Todo: Add error checking here. If a plugin with that name already exists,
# log an error. Quit or continue?
except ImportError as e:
self.logger.error(e)
pass # problem importing # depends on [control=['except'], data=['e']] # depends on [control=['for'], data=['f']]
self.logger.debug('Done loading plugins')
return plugins |
def write_contents(self, root_target, reduced_dependencies, chroot):
"""Write contents of the target."""
def write_target_source(target, src):
chroot.copy(os.path.join(get_buildroot(), target.target_base, src),
os.path.join(self.SOURCE_ROOT, src))
# check parent __init__.pys to see if they also need to be copied. this is to allow
# us to determine if they belong to regular packages or namespace packages.
while True:
src = os.path.dirname(src)
if not src:
# Do not allow the repository root to leak (i.e. '.' should not be a package in setup.py)
break
if os.path.exists(os.path.join(target.target_base, src, '__init__.py')):
chroot.copy(os.path.join(target.target_base, src, '__init__.py'),
os.path.join(self.SOURCE_ROOT, src, '__init__.py'))
def write_target(target):
# We want to operate on the final sources target owns, so we potentially replace it with
# the target derived from it (by a codegen task).
subject = self.derived_by_original.get(target, target)
for rel_source in subject.sources_relative_to_buildroot():
abs_source_path = os.path.join(get_buildroot(), rel_source)
abs_source_root_path = os.path.join(get_buildroot(), subject.target_base)
source_root_relative_path = os.path.relpath(abs_source_path, abs_source_root_path)
write_target_source(subject, source_root_relative_path)
write_target(root_target)
for dependency in reduced_dependencies:
if self.is_python_target(dependency) and not dependency.provides:
write_target(dependency)
elif self.is_resources_target(dependency):
write_target(dependency) | def function[write_contents, parameter[self, root_target, reduced_dependencies, chroot]]:
constant[Write contents of the target.]
def function[write_target_source, parameter[target, src]]:
call[name[chroot].copy, parameter[call[name[os].path.join, parameter[call[name[get_buildroot], parameter[]], name[target].target_base, name[src]]], call[name[os].path.join, parameter[name[self].SOURCE_ROOT, name[src]]]]]
while constant[True] begin[:]
variable[src] assign[=] call[name[os].path.dirname, parameter[name[src]]]
if <ast.UnaryOp object at 0x7da1b22a65c0> begin[:]
break
if call[name[os].path.exists, parameter[call[name[os].path.join, parameter[name[target].target_base, name[src], constant[__init__.py]]]]] begin[:]
call[name[chroot].copy, parameter[call[name[os].path.join, parameter[name[target].target_base, name[src], constant[__init__.py]]], call[name[os].path.join, parameter[name[self].SOURCE_ROOT, name[src], constant[__init__.py]]]]]
def function[write_target, parameter[target]]:
variable[subject] assign[=] call[name[self].derived_by_original.get, parameter[name[target], name[target]]]
for taget[name[rel_source]] in starred[call[name[subject].sources_relative_to_buildroot, parameter[]]] begin[:]
variable[abs_source_path] assign[=] call[name[os].path.join, parameter[call[name[get_buildroot], parameter[]], name[rel_source]]]
variable[abs_source_root_path] assign[=] call[name[os].path.join, parameter[call[name[get_buildroot], parameter[]], name[subject].target_base]]
variable[source_root_relative_path] assign[=] call[name[os].path.relpath, parameter[name[abs_source_path], name[abs_source_root_path]]]
call[name[write_target_source], parameter[name[subject], name[source_root_relative_path]]]
call[name[write_target], parameter[name[root_target]]]
for taget[name[dependency]] in starred[name[reduced_dependencies]] begin[:]
if <ast.BoolOp object at 0x7da1b1e8ebf0> begin[:]
call[name[write_target], parameter[name[dependency]]] | keyword[def] identifier[write_contents] ( identifier[self] , identifier[root_target] , identifier[reduced_dependencies] , identifier[chroot] ):
literal[string]
keyword[def] identifier[write_target_source] ( identifier[target] , identifier[src] ):
identifier[chroot] . identifier[copy] ( identifier[os] . identifier[path] . identifier[join] ( identifier[get_buildroot] (), identifier[target] . identifier[target_base] , identifier[src] ),
identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[SOURCE_ROOT] , identifier[src] ))
keyword[while] keyword[True] :
identifier[src] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[src] )
keyword[if] keyword[not] identifier[src] :
keyword[break]
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[os] . identifier[path] . identifier[join] ( identifier[target] . identifier[target_base] , identifier[src] , literal[string] )):
identifier[chroot] . identifier[copy] ( identifier[os] . identifier[path] . identifier[join] ( identifier[target] . identifier[target_base] , identifier[src] , literal[string] ),
identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[SOURCE_ROOT] , identifier[src] , literal[string] ))
keyword[def] identifier[write_target] ( identifier[target] ):
identifier[subject] = identifier[self] . identifier[derived_by_original] . identifier[get] ( identifier[target] , identifier[target] )
keyword[for] identifier[rel_source] keyword[in] identifier[subject] . identifier[sources_relative_to_buildroot] ():
identifier[abs_source_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[get_buildroot] (), identifier[rel_source] )
identifier[abs_source_root_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[get_buildroot] (), identifier[subject] . identifier[target_base] )
identifier[source_root_relative_path] = identifier[os] . identifier[path] . identifier[relpath] ( identifier[abs_source_path] , identifier[abs_source_root_path] )
identifier[write_target_source] ( identifier[subject] , identifier[source_root_relative_path] )
identifier[write_target] ( identifier[root_target] )
keyword[for] identifier[dependency] keyword[in] identifier[reduced_dependencies] :
keyword[if] identifier[self] . identifier[is_python_target] ( identifier[dependency] ) keyword[and] keyword[not] identifier[dependency] . identifier[provides] :
identifier[write_target] ( identifier[dependency] )
keyword[elif] identifier[self] . identifier[is_resources_target] ( identifier[dependency] ):
identifier[write_target] ( identifier[dependency] ) | def write_contents(self, root_target, reduced_dependencies, chroot):
"""Write contents of the target."""
def write_target_source(target, src):
chroot.copy(os.path.join(get_buildroot(), target.target_base, src), os.path.join(self.SOURCE_ROOT, src))
# check parent __init__.pys to see if they also need to be copied. this is to allow
# us to determine if they belong to regular packages or namespace packages.
while True:
src = os.path.dirname(src)
if not src:
# Do not allow the repository root to leak (i.e. '.' should not be a package in setup.py)
break # depends on [control=['if'], data=[]]
if os.path.exists(os.path.join(target.target_base, src, '__init__.py')):
chroot.copy(os.path.join(target.target_base, src, '__init__.py'), os.path.join(self.SOURCE_ROOT, src, '__init__.py')) # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
def write_target(target):
# We want to operate on the final sources target owns, so we potentially replace it with
# the target derived from it (by a codegen task).
subject = self.derived_by_original.get(target, target)
for rel_source in subject.sources_relative_to_buildroot():
abs_source_path = os.path.join(get_buildroot(), rel_source)
abs_source_root_path = os.path.join(get_buildroot(), subject.target_base)
source_root_relative_path = os.path.relpath(abs_source_path, abs_source_root_path)
write_target_source(subject, source_root_relative_path) # depends on [control=['for'], data=['rel_source']]
write_target(root_target)
for dependency in reduced_dependencies:
if self.is_python_target(dependency) and (not dependency.provides):
write_target(dependency) # depends on [control=['if'], data=[]]
elif self.is_resources_target(dependency):
write_target(dependency) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['dependency']] |
def fill_parentidid2obj_r1(self, id2obj_user, child_obj):
"""Fill id2obj_user with all parent/relationship key item IDs and their objects."""
for higher_obj in self._getobjs_higher(child_obj):
if higher_obj.item_id not in id2obj_user:
id2obj_user[higher_obj.item_id] = higher_obj
self.fill_parentidid2obj_r1(id2obj_user, higher_obj) | def function[fill_parentidid2obj_r1, parameter[self, id2obj_user, child_obj]]:
constant[Fill id2obj_user with all parent/relationship key item IDs and their objects.]
for taget[name[higher_obj]] in starred[call[name[self]._getobjs_higher, parameter[name[child_obj]]]] begin[:]
if compare[name[higher_obj].item_id <ast.NotIn object at 0x7da2590d7190> name[id2obj_user]] begin[:]
call[name[id2obj_user]][name[higher_obj].item_id] assign[=] name[higher_obj]
call[name[self].fill_parentidid2obj_r1, parameter[name[id2obj_user], name[higher_obj]]] | keyword[def] identifier[fill_parentidid2obj_r1] ( identifier[self] , identifier[id2obj_user] , identifier[child_obj] ):
literal[string]
keyword[for] identifier[higher_obj] keyword[in] identifier[self] . identifier[_getobjs_higher] ( identifier[child_obj] ):
keyword[if] identifier[higher_obj] . identifier[item_id] keyword[not] keyword[in] identifier[id2obj_user] :
identifier[id2obj_user] [ identifier[higher_obj] . identifier[item_id] ]= identifier[higher_obj]
identifier[self] . identifier[fill_parentidid2obj_r1] ( identifier[id2obj_user] , identifier[higher_obj] ) | def fill_parentidid2obj_r1(self, id2obj_user, child_obj):
"""Fill id2obj_user with all parent/relationship key item IDs and their objects."""
for higher_obj in self._getobjs_higher(child_obj):
if higher_obj.item_id not in id2obj_user:
id2obj_user[higher_obj.item_id] = higher_obj
self.fill_parentidid2obj_r1(id2obj_user, higher_obj) # depends on [control=['if'], data=['id2obj_user']] # depends on [control=['for'], data=['higher_obj']] |
def _compute_errors(self):
"""
Compute parameter errors based on the diagonal of the covariance
matrix of the four harmonic coefficients for harmonics n=1 and
n=2.
"""
try:
coeffs = fit_first_and_second_harmonics(self.sample.values[0],
self.sample.values[2])
covariance = coeffs[1]
coeffs = coeffs[0]
model = first_and_second_harmonic_function(self.sample.values[0],
coeffs)
residual_rms = np.std(self.sample.values[2] - model)
errors = np.diagonal(covariance) * residual_rms
eps = self.sample.geometry.eps
pa = self.sample.geometry.pa
# parameter errors result from direct projection of
# coefficient errors. These showed to be the error estimators
# that best convey the errors measured in Monte Carlo
# experiments (see Busko 1996; ASPC 101, 139).
ea = abs(errors[2] / self.grad)
eb = abs(errors[1] * (1. - eps) / self.grad)
self.x0_err = np.sqrt((ea * np.cos(pa))**2 + (eb * np.sin(pa))**2)
self.y0_err = np.sqrt((ea * np.sin(pa))**2 + (eb * np.cos(pa))**2)
self.ellip_err = (abs(2. * errors[4] * (1. - eps) / self.sma /
self.grad))
if (abs(eps) > np.finfo(float).resolution):
self.pa_err = (abs(2. * errors[3] * (1. - eps) / self.sma /
self.grad / (1. - (1. - eps)**2)))
else:
self.pa_err = 0.
except Exception: # we want to catch everything
self.x0_err = self.y0_err = self.pa_err = self.ellip_err = 0. | def function[_compute_errors, parameter[self]]:
constant[
Compute parameter errors based on the diagonal of the covariance
matrix of the four harmonic coefficients for harmonics n=1 and
n=2.
]
<ast.Try object at 0x7da20c7c8430> | keyword[def] identifier[_compute_errors] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[coeffs] = identifier[fit_first_and_second_harmonics] ( identifier[self] . identifier[sample] . identifier[values] [ literal[int] ],
identifier[self] . identifier[sample] . identifier[values] [ literal[int] ])
identifier[covariance] = identifier[coeffs] [ literal[int] ]
identifier[coeffs] = identifier[coeffs] [ literal[int] ]
identifier[model] = identifier[first_and_second_harmonic_function] ( identifier[self] . identifier[sample] . identifier[values] [ literal[int] ],
identifier[coeffs] )
identifier[residual_rms] = identifier[np] . identifier[std] ( identifier[self] . identifier[sample] . identifier[values] [ literal[int] ]- identifier[model] )
identifier[errors] = identifier[np] . identifier[diagonal] ( identifier[covariance] )* identifier[residual_rms]
identifier[eps] = identifier[self] . identifier[sample] . identifier[geometry] . identifier[eps]
identifier[pa] = identifier[self] . identifier[sample] . identifier[geometry] . identifier[pa]
identifier[ea] = identifier[abs] ( identifier[errors] [ literal[int] ]/ identifier[self] . identifier[grad] )
identifier[eb] = identifier[abs] ( identifier[errors] [ literal[int] ]*( literal[int] - identifier[eps] )/ identifier[self] . identifier[grad] )
identifier[self] . identifier[x0_err] = identifier[np] . identifier[sqrt] (( identifier[ea] * identifier[np] . identifier[cos] ( identifier[pa] ))** literal[int] +( identifier[eb] * identifier[np] . identifier[sin] ( identifier[pa] ))** literal[int] )
identifier[self] . identifier[y0_err] = identifier[np] . identifier[sqrt] (( identifier[ea] * identifier[np] . identifier[sin] ( identifier[pa] ))** literal[int] +( identifier[eb] * identifier[np] . identifier[cos] ( identifier[pa] ))** literal[int] )
identifier[self] . identifier[ellip_err] =( identifier[abs] ( literal[int] * identifier[errors] [ literal[int] ]*( literal[int] - identifier[eps] )/ identifier[self] . identifier[sma] /
identifier[self] . identifier[grad] ))
keyword[if] ( identifier[abs] ( identifier[eps] )> identifier[np] . identifier[finfo] ( identifier[float] ). identifier[resolution] ):
identifier[self] . identifier[pa_err] =( identifier[abs] ( literal[int] * identifier[errors] [ literal[int] ]*( literal[int] - identifier[eps] )/ identifier[self] . identifier[sma] /
identifier[self] . identifier[grad] /( literal[int] -( literal[int] - identifier[eps] )** literal[int] )))
keyword[else] :
identifier[self] . identifier[pa_err] = literal[int]
keyword[except] identifier[Exception] :
identifier[self] . identifier[x0_err] = identifier[self] . identifier[y0_err] = identifier[self] . identifier[pa_err] = identifier[self] . identifier[ellip_err] = literal[int] | def _compute_errors(self):
"""
Compute parameter errors based on the diagonal of the covariance
matrix of the four harmonic coefficients for harmonics n=1 and
n=2.
"""
try:
coeffs = fit_first_and_second_harmonics(self.sample.values[0], self.sample.values[2])
covariance = coeffs[1]
coeffs = coeffs[0]
model = first_and_second_harmonic_function(self.sample.values[0], coeffs)
residual_rms = np.std(self.sample.values[2] - model)
errors = np.diagonal(covariance) * residual_rms
eps = self.sample.geometry.eps
pa = self.sample.geometry.pa
# parameter errors result from direct projection of
# coefficient errors. These showed to be the error estimators
# that best convey the errors measured in Monte Carlo
# experiments (see Busko 1996; ASPC 101, 139).
ea = abs(errors[2] / self.grad)
eb = abs(errors[1] * (1.0 - eps) / self.grad)
self.x0_err = np.sqrt((ea * np.cos(pa)) ** 2 + (eb * np.sin(pa)) ** 2)
self.y0_err = np.sqrt((ea * np.sin(pa)) ** 2 + (eb * np.cos(pa)) ** 2)
self.ellip_err = abs(2.0 * errors[4] * (1.0 - eps) / self.sma / self.grad)
if abs(eps) > np.finfo(float).resolution:
self.pa_err = abs(2.0 * errors[3] * (1.0 - eps) / self.sma / self.grad / (1.0 - (1.0 - eps) ** 2)) # depends on [control=['if'], data=[]]
else:
self.pa_err = 0.0 # depends on [control=['try'], data=[]]
except Exception: # we want to catch everything
self.x0_err = self.y0_err = self.pa_err = self.ellip_err = 0.0 # depends on [control=['except'], data=[]] |
def __get_all_lowpoints(dfs_data):
"""Calculates the lowpoints for each node in a graph."""
lowpoint_1_lookup = {}
lowpoint_2_lookup = {}
ordering = dfs_data['ordering']
for node in ordering:
low_1, low_2 = __get_lowpoints(node, dfs_data)
lowpoint_1_lookup[node] = low_1
lowpoint_2_lookup[node] = low_2
return lowpoint_1_lookup, lowpoint_2_lookup | def function[__get_all_lowpoints, parameter[dfs_data]]:
constant[Calculates the lowpoints for each node in a graph.]
variable[lowpoint_1_lookup] assign[=] dictionary[[], []]
variable[lowpoint_2_lookup] assign[=] dictionary[[], []]
variable[ordering] assign[=] call[name[dfs_data]][constant[ordering]]
for taget[name[node]] in starred[name[ordering]] begin[:]
<ast.Tuple object at 0x7da1b2853790> assign[=] call[name[__get_lowpoints], parameter[name[node], name[dfs_data]]]
call[name[lowpoint_1_lookup]][name[node]] assign[=] name[low_1]
call[name[lowpoint_2_lookup]][name[node]] assign[=] name[low_2]
return[tuple[[<ast.Name object at 0x7da1b2853340>, <ast.Name object at 0x7da1b28533d0>]]] | keyword[def] identifier[__get_all_lowpoints] ( identifier[dfs_data] ):
literal[string]
identifier[lowpoint_1_lookup] ={}
identifier[lowpoint_2_lookup] ={}
identifier[ordering] = identifier[dfs_data] [ literal[string] ]
keyword[for] identifier[node] keyword[in] identifier[ordering] :
identifier[low_1] , identifier[low_2] = identifier[__get_lowpoints] ( identifier[node] , identifier[dfs_data] )
identifier[lowpoint_1_lookup] [ identifier[node] ]= identifier[low_1]
identifier[lowpoint_2_lookup] [ identifier[node] ]= identifier[low_2]
keyword[return] identifier[lowpoint_1_lookup] , identifier[lowpoint_2_lookup] | def __get_all_lowpoints(dfs_data):
"""Calculates the lowpoints for each node in a graph."""
lowpoint_1_lookup = {}
lowpoint_2_lookup = {}
ordering = dfs_data['ordering']
for node in ordering:
(low_1, low_2) = __get_lowpoints(node, dfs_data)
lowpoint_1_lookup[node] = low_1
lowpoint_2_lookup[node] = low_2 # depends on [control=['for'], data=['node']]
return (lowpoint_1_lookup, lowpoint_2_lookup) |
def new(self, **kwargs):
'''Return a new ``Message`` instance. The arguments are
passed to the ``marrow.mailer.Message`` constructor.'''
app = self.app or current_app
mailer = app.extensions['marrowmailer']
msg = mailer.new(**kwargs)
msg.__class__ = Message
return msg | def function[new, parameter[self]]:
constant[Return a new ``Message`` instance. The arguments are
passed to the ``marrow.mailer.Message`` constructor.]
variable[app] assign[=] <ast.BoolOp object at 0x7da1b2351ab0>
variable[mailer] assign[=] call[name[app].extensions][constant[marrowmailer]]
variable[msg] assign[=] call[name[mailer].new, parameter[]]
name[msg].__class__ assign[=] name[Message]
return[name[msg]] | keyword[def] identifier[new] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[app] = identifier[self] . identifier[app] keyword[or] identifier[current_app]
identifier[mailer] = identifier[app] . identifier[extensions] [ literal[string] ]
identifier[msg] = identifier[mailer] . identifier[new] (** identifier[kwargs] )
identifier[msg] . identifier[__class__] = identifier[Message]
keyword[return] identifier[msg] | def new(self, **kwargs):
"""Return a new ``Message`` instance. The arguments are
passed to the ``marrow.mailer.Message`` constructor."""
app = self.app or current_app
mailer = app.extensions['marrowmailer']
msg = mailer.new(**kwargs)
msg.__class__ = Message
return msg |
def _matmul_with_relative_keys_2d(x, y, heads_share_relative_embedding):
"""Helper function for dot_product_unmasked_self_attention_relative_2d."""
if heads_share_relative_embedding:
ret = tf.einsum("bhxyd,md->bhxym", x, y)
else:
ret = tf.einsum("bhxyd,hmd->bhxym", x, y)
return ret | def function[_matmul_with_relative_keys_2d, parameter[x, y, heads_share_relative_embedding]]:
constant[Helper function for dot_product_unmasked_self_attention_relative_2d.]
if name[heads_share_relative_embedding] begin[:]
variable[ret] assign[=] call[name[tf].einsum, parameter[constant[bhxyd,md->bhxym], name[x], name[y]]]
return[name[ret]] | keyword[def] identifier[_matmul_with_relative_keys_2d] ( identifier[x] , identifier[y] , identifier[heads_share_relative_embedding] ):
literal[string]
keyword[if] identifier[heads_share_relative_embedding] :
identifier[ret] = identifier[tf] . identifier[einsum] ( literal[string] , identifier[x] , identifier[y] )
keyword[else] :
identifier[ret] = identifier[tf] . identifier[einsum] ( literal[string] , identifier[x] , identifier[y] )
keyword[return] identifier[ret] | def _matmul_with_relative_keys_2d(x, y, heads_share_relative_embedding):
"""Helper function for dot_product_unmasked_self_attention_relative_2d."""
if heads_share_relative_embedding:
ret = tf.einsum('bhxyd,md->bhxym', x, y) # depends on [control=['if'], data=[]]
else:
ret = tf.einsum('bhxyd,hmd->bhxym', x, y)
return ret |
def assemble(self, header_json, metadata_json, content_json):
''' Create a Message instance assembled from json fragments.
Args:
header_json (``JSON``) :
metadata_json (``JSON``) :
content_json (``JSON``) :
Returns:
message
'''
header = json_decode(header_json)
if 'msgtype' not in header:
log.error("Bad header with no msgtype was: %r", header)
raise ProtocolError("No 'msgtype' in header")
return self._messages[header['msgtype']].assemble(
header_json, metadata_json, content_json
) | def function[assemble, parameter[self, header_json, metadata_json, content_json]]:
constant[ Create a Message instance assembled from json fragments.
Args:
header_json (``JSON``) :
metadata_json (``JSON``) :
content_json (``JSON``) :
Returns:
message
]
variable[header] assign[=] call[name[json_decode], parameter[name[header_json]]]
if compare[constant[msgtype] <ast.NotIn object at 0x7da2590d7190> name[header]] begin[:]
call[name[log].error, parameter[constant[Bad header with no msgtype was: %r], name[header]]]
<ast.Raise object at 0x7da20c76c760>
return[call[call[name[self]._messages][call[name[header]][constant[msgtype]]].assemble, parameter[name[header_json], name[metadata_json], name[content_json]]]] | keyword[def] identifier[assemble] ( identifier[self] , identifier[header_json] , identifier[metadata_json] , identifier[content_json] ):
literal[string]
identifier[header] = identifier[json_decode] ( identifier[header_json] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[header] :
identifier[log] . identifier[error] ( literal[string] , identifier[header] )
keyword[raise] identifier[ProtocolError] ( literal[string] )
keyword[return] identifier[self] . identifier[_messages] [ identifier[header] [ literal[string] ]]. identifier[assemble] (
identifier[header_json] , identifier[metadata_json] , identifier[content_json]
) | def assemble(self, header_json, metadata_json, content_json):
""" Create a Message instance assembled from json fragments.
Args:
header_json (``JSON``) :
metadata_json (``JSON``) :
content_json (``JSON``) :
Returns:
message
"""
header = json_decode(header_json)
if 'msgtype' not in header:
log.error('Bad header with no msgtype was: %r', header)
raise ProtocolError("No 'msgtype' in header") # depends on [control=['if'], data=['header']]
return self._messages[header['msgtype']].assemble(header_json, metadata_json, content_json) |
def _merge(self, value):
""" Returns a list based on `value`:
* missing required value is converted to an empty list;
* missing required items are never created;
* nested items are merged recursively.
"""
if not value:
return []
if value is not None and not isinstance(value, list):
# bogus value; will not pass validation but should be preserved
return value
item_spec = self._nested_validator
return [x if x is None else item_spec.get_default_for(x) for x in value] | def function[_merge, parameter[self, value]]:
constant[ Returns a list based on `value`:
* missing required value is converted to an empty list;
* missing required items are never created;
* nested items are merged recursively.
]
if <ast.UnaryOp object at 0x7da1b2427790> begin[:]
return[list[[]]]
if <ast.BoolOp object at 0x7da1b2424850> begin[:]
return[name[value]]
variable[item_spec] assign[=] name[self]._nested_validator
return[<ast.ListComp object at 0x7da1b2426170>] | keyword[def] identifier[_merge] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] keyword[not] identifier[value] :
keyword[return] []
keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[isinstance] ( identifier[value] , identifier[list] ):
keyword[return] identifier[value]
identifier[item_spec] = identifier[self] . identifier[_nested_validator]
keyword[return] [ identifier[x] keyword[if] identifier[x] keyword[is] keyword[None] keyword[else] identifier[item_spec] . identifier[get_default_for] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[value] ] | def _merge(self, value):
""" Returns a list based on `value`:
* missing required value is converted to an empty list;
* missing required items are never created;
* nested items are merged recursively.
"""
if not value:
return [] # depends on [control=['if'], data=[]]
if value is not None and (not isinstance(value, list)):
# bogus value; will not pass validation but should be preserved
return value # depends on [control=['if'], data=[]]
item_spec = self._nested_validator
return [x if x is None else item_spec.get_default_for(x) for x in value] |
def valid_backbone_bond_lengths(self, atol=0.1):
"""True if all backbone bonds are within atol Angstroms of the expected distance.
Notes
-----
Ideal bond lengths taken from [1].
References
----------
.. [1] Schulz, G. E, and R. Heiner Schirmer. Principles Of
Protein Structure. New York: Springer-Verlag, 1979.
Parameters
----------
atol : float, optional
Tolerance value in Angstoms for the absolute deviation
away from ideal backbone bond lengths.
"""
bond_lengths = self.backbone_bond_lengths
a1 = numpy.allclose(bond_lengths['n_ca'],
[ideal_backbone_bond_lengths['n_ca']] * len(self),
atol=atol)
a2 = numpy.allclose(bond_lengths['ca_c'],
[ideal_backbone_bond_lengths['ca_c']] * len(self),
atol=atol)
a3 = numpy.allclose(bond_lengths['c_o'],
[ideal_backbone_bond_lengths['c_o']] * len(self),
atol=atol)
a4 = numpy.allclose(bond_lengths['c_n'],
[ideal_backbone_bond_lengths['c_n']] *
(len(self) - 1),
atol=atol)
return all([a1, a2, a3, a4]) | def function[valid_backbone_bond_lengths, parameter[self, atol]]:
constant[True if all backbone bonds are within atol Angstroms of the expected distance.
Notes
-----
Ideal bond lengths taken from [1].
References
----------
.. [1] Schulz, G. E, and R. Heiner Schirmer. Principles Of
Protein Structure. New York: Springer-Verlag, 1979.
Parameters
----------
atol : float, optional
Tolerance value in Angstoms for the absolute deviation
away from ideal backbone bond lengths.
]
variable[bond_lengths] assign[=] name[self].backbone_bond_lengths
variable[a1] assign[=] call[name[numpy].allclose, parameter[call[name[bond_lengths]][constant[n_ca]], binary_operation[list[[<ast.Subscript object at 0x7da1b261f550>]] * call[name[len], parameter[name[self]]]]]]
variable[a2] assign[=] call[name[numpy].allclose, parameter[call[name[bond_lengths]][constant[ca_c]], binary_operation[list[[<ast.Subscript object at 0x7da1b2657610>]] * call[name[len], parameter[name[self]]]]]]
variable[a3] assign[=] call[name[numpy].allclose, parameter[call[name[bond_lengths]][constant[c_o]], binary_operation[list[[<ast.Subscript object at 0x7da1b2657940>]] * call[name[len], parameter[name[self]]]]]]
variable[a4] assign[=] call[name[numpy].allclose, parameter[call[name[bond_lengths]][constant[c_n]], binary_operation[list[[<ast.Subscript object at 0x7da1b26575b0>]] * binary_operation[call[name[len], parameter[name[self]]] - constant[1]]]]]
return[call[name[all], parameter[list[[<ast.Name object at 0x7da1b26573d0>, <ast.Name object at 0x7da1b2657250>, <ast.Name object at 0x7da1b26570a0>, <ast.Name object at 0x7da1b2657ac0>]]]]] | keyword[def] identifier[valid_backbone_bond_lengths] ( identifier[self] , identifier[atol] = literal[int] ):
literal[string]
identifier[bond_lengths] = identifier[self] . identifier[backbone_bond_lengths]
identifier[a1] = identifier[numpy] . identifier[allclose] ( identifier[bond_lengths] [ literal[string] ],
[ identifier[ideal_backbone_bond_lengths] [ literal[string] ]]* identifier[len] ( identifier[self] ),
identifier[atol] = identifier[atol] )
identifier[a2] = identifier[numpy] . identifier[allclose] ( identifier[bond_lengths] [ literal[string] ],
[ identifier[ideal_backbone_bond_lengths] [ literal[string] ]]* identifier[len] ( identifier[self] ),
identifier[atol] = identifier[atol] )
identifier[a3] = identifier[numpy] . identifier[allclose] ( identifier[bond_lengths] [ literal[string] ],
[ identifier[ideal_backbone_bond_lengths] [ literal[string] ]]* identifier[len] ( identifier[self] ),
identifier[atol] = identifier[atol] )
identifier[a4] = identifier[numpy] . identifier[allclose] ( identifier[bond_lengths] [ literal[string] ],
[ identifier[ideal_backbone_bond_lengths] [ literal[string] ]]*
( identifier[len] ( identifier[self] )- literal[int] ),
identifier[atol] = identifier[atol] )
keyword[return] identifier[all] ([ identifier[a1] , identifier[a2] , identifier[a3] , identifier[a4] ]) | def valid_backbone_bond_lengths(self, atol=0.1):
"""True if all backbone bonds are within atol Angstroms of the expected distance.
Notes
-----
Ideal bond lengths taken from [1].
References
----------
.. [1] Schulz, G. E, and R. Heiner Schirmer. Principles Of
Protein Structure. New York: Springer-Verlag, 1979.
Parameters
----------
atol : float, optional
Tolerance value in Angstoms for the absolute deviation
away from ideal backbone bond lengths.
"""
bond_lengths = self.backbone_bond_lengths
a1 = numpy.allclose(bond_lengths['n_ca'], [ideal_backbone_bond_lengths['n_ca']] * len(self), atol=atol)
a2 = numpy.allclose(bond_lengths['ca_c'], [ideal_backbone_bond_lengths['ca_c']] * len(self), atol=atol)
a3 = numpy.allclose(bond_lengths['c_o'], [ideal_backbone_bond_lengths['c_o']] * len(self), atol=atol)
a4 = numpy.allclose(bond_lengths['c_n'], [ideal_backbone_bond_lengths['c_n']] * (len(self) - 1), atol=atol)
return all([a1, a2, a3, a4]) |
def escape(identifier, ansi_quotes, should_quote):
"""
Escape identifiers.
ANSI uses single quotes, but many databases use back quotes.
"""
if not should_quote(identifier):
return identifier
quote = '"' if ansi_quotes else '`'
identifier = identifier.replace(quote, 2*quote)
return '{0}{1}{2}'.format(quote, identifier, quote) | def function[escape, parameter[identifier, ansi_quotes, should_quote]]:
constant[
Escape identifiers.
ANSI uses single quotes, but many databases use back quotes.
]
if <ast.UnaryOp object at 0x7da1b19edc60> begin[:]
return[name[identifier]]
variable[quote] assign[=] <ast.IfExp object at 0x7da1b19edf00>
variable[identifier] assign[=] call[name[identifier].replace, parameter[name[quote], binary_operation[constant[2] * name[quote]]]]
return[call[constant[{0}{1}{2}].format, parameter[name[quote], name[identifier], name[quote]]]] | keyword[def] identifier[escape] ( identifier[identifier] , identifier[ansi_quotes] , identifier[should_quote] ):
literal[string]
keyword[if] keyword[not] identifier[should_quote] ( identifier[identifier] ):
keyword[return] identifier[identifier]
identifier[quote] = literal[string] keyword[if] identifier[ansi_quotes] keyword[else] literal[string]
identifier[identifier] = identifier[identifier] . identifier[replace] ( identifier[quote] , literal[int] * identifier[quote] )
keyword[return] literal[string] . identifier[format] ( identifier[quote] , identifier[identifier] , identifier[quote] ) | def escape(identifier, ansi_quotes, should_quote):
"""
Escape identifiers.
ANSI uses single quotes, but many databases use back quotes.
"""
if not should_quote(identifier):
return identifier # depends on [control=['if'], data=[]]
quote = '"' if ansi_quotes else '`'
identifier = identifier.replace(quote, 2 * quote)
return '{0}{1}{2}'.format(quote, identifier, quote) |
def _new_message_properties(self, content_type=None, content_encoding=None,
headers=None, delivery_mode=None, priority=None,
correlation_id=None, reply_to=None,
expiration=None, message_id=None,
timestamp=None, message_type=None, user_id=None,
app_id=None):
"""Create a BasicProperties object, with the properties specified
:param str content_type: MIME content type
:param str content_encoding: MIME content encoding
:param dict headers: Message header field table
:param int delivery_mode: Non-persistent (1) or persistent (2)
:param int priority: Message priority, 0 to 9
:param str correlation_id: Application correlation identifier
:param str reply_to: Address to reply to
:param str expiration: Message expiration specification
:param str message_id: Application message identifier
:param int timestamp: Message timestamp
:param str message_type: Message type name
:param str user_id: Creating user id
:param str app_id: Creating application id
:rtype: pika.BasicProperties
"""
return pika.BasicProperties(content_type, content_encoding, headers,
delivery_mode, priority, correlation_id,
reply_to, expiration, message_id, timestamp,
message_type, user_id, app_id) | def function[_new_message_properties, parameter[self, content_type, content_encoding, headers, delivery_mode, priority, correlation_id, reply_to, expiration, message_id, timestamp, message_type, user_id, app_id]]:
constant[Create a BasicProperties object, with the properties specified
:param str content_type: MIME content type
:param str content_encoding: MIME content encoding
:param dict headers: Message header field table
:param int delivery_mode: Non-persistent (1) or persistent (2)
:param int priority: Message priority, 0 to 9
:param str correlation_id: Application correlation identifier
:param str reply_to: Address to reply to
:param str expiration: Message expiration specification
:param str message_id: Application message identifier
:param int timestamp: Message timestamp
:param str message_type: Message type name
:param str user_id: Creating user id
:param str app_id: Creating application id
:rtype: pika.BasicProperties
]
return[call[name[pika].BasicProperties, parameter[name[content_type], name[content_encoding], name[headers], name[delivery_mode], name[priority], name[correlation_id], name[reply_to], name[expiration], name[message_id], name[timestamp], name[message_type], name[user_id], name[app_id]]]] | keyword[def] identifier[_new_message_properties] ( identifier[self] , identifier[content_type] = keyword[None] , identifier[content_encoding] = keyword[None] ,
identifier[headers] = keyword[None] , identifier[delivery_mode] = keyword[None] , identifier[priority] = keyword[None] ,
identifier[correlation_id] = keyword[None] , identifier[reply_to] = keyword[None] ,
identifier[expiration] = keyword[None] , identifier[message_id] = keyword[None] ,
identifier[timestamp] = keyword[None] , identifier[message_type] = keyword[None] , identifier[user_id] = keyword[None] ,
identifier[app_id] = keyword[None] ):
literal[string]
keyword[return] identifier[pika] . identifier[BasicProperties] ( identifier[content_type] , identifier[content_encoding] , identifier[headers] ,
identifier[delivery_mode] , identifier[priority] , identifier[correlation_id] ,
identifier[reply_to] , identifier[expiration] , identifier[message_id] , identifier[timestamp] ,
identifier[message_type] , identifier[user_id] , identifier[app_id] ) | def _new_message_properties(self, content_type=None, content_encoding=None, headers=None, delivery_mode=None, priority=None, correlation_id=None, reply_to=None, expiration=None, message_id=None, timestamp=None, message_type=None, user_id=None, app_id=None):
"""Create a BasicProperties object, with the properties specified
:param str content_type: MIME content type
:param str content_encoding: MIME content encoding
:param dict headers: Message header field table
:param int delivery_mode: Non-persistent (1) or persistent (2)
:param int priority: Message priority, 0 to 9
:param str correlation_id: Application correlation identifier
:param str reply_to: Address to reply to
:param str expiration: Message expiration specification
:param str message_id: Application message identifier
:param int timestamp: Message timestamp
:param str message_type: Message type name
:param str user_id: Creating user id
:param str app_id: Creating application id
:rtype: pika.BasicProperties
"""
return pika.BasicProperties(content_type, content_encoding, headers, delivery_mode, priority, correlation_id, reply_to, expiration, message_id, timestamp, message_type, user_id, app_id) |
def is_in_this_week(self):
"""Checks if date is in this week (from sunday to sunday)
:return: True iff date is in this week (from sunday to sunday)
"""
return self.is_date_in_between(
Weekday.get_last(self.week_end, including_today=True),
Weekday.get_next(self.week_end),
include_end=False
) | def function[is_in_this_week, parameter[self]]:
constant[Checks if date is in this week (from sunday to sunday)
:return: True iff date is in this week (from sunday to sunday)
]
return[call[name[self].is_date_in_between, parameter[call[name[Weekday].get_last, parameter[name[self].week_end]], call[name[Weekday].get_next, parameter[name[self].week_end]]]]] | keyword[def] identifier[is_in_this_week] ( identifier[self] ):
literal[string]
keyword[return] identifier[self] . identifier[is_date_in_between] (
identifier[Weekday] . identifier[get_last] ( identifier[self] . identifier[week_end] , identifier[including_today] = keyword[True] ),
identifier[Weekday] . identifier[get_next] ( identifier[self] . identifier[week_end] ),
identifier[include_end] = keyword[False]
) | def is_in_this_week(self):
"""Checks if date is in this week (from sunday to sunday)
:return: True iff date is in this week (from sunday to sunday)
"""
return self.is_date_in_between(Weekday.get_last(self.week_end, including_today=True), Weekday.get_next(self.week_end), include_end=False) |
def get_decomposition_type_property(value, is_bytes=False):
"""Get `DECOMPOSITION TYPE` property."""
obj = unidata.ascii_decomposition_type if is_bytes else unidata.unicode_decomposition_type
if value.startswith('^'):
negated = value[1:]
value = '^' + unidata.unicode_alias['decompositiontype'].get(negated, negated)
else:
value = unidata.unicode_alias['decompositiontype'].get(value, value)
return obj[value] | def function[get_decomposition_type_property, parameter[value, is_bytes]]:
constant[Get `DECOMPOSITION TYPE` property.]
variable[obj] assign[=] <ast.IfExp object at 0x7da1b032f220>
if call[name[value].startswith, parameter[constant[^]]] begin[:]
variable[negated] assign[=] call[name[value]][<ast.Slice object at 0x7da1b032ee00>]
variable[value] assign[=] binary_operation[constant[^] + call[call[name[unidata].unicode_alias][constant[decompositiontype]].get, parameter[name[negated], name[negated]]]]
return[call[name[obj]][name[value]]] | keyword[def] identifier[get_decomposition_type_property] ( identifier[value] , identifier[is_bytes] = keyword[False] ):
literal[string]
identifier[obj] = identifier[unidata] . identifier[ascii_decomposition_type] keyword[if] identifier[is_bytes] keyword[else] identifier[unidata] . identifier[unicode_decomposition_type]
keyword[if] identifier[value] . identifier[startswith] ( literal[string] ):
identifier[negated] = identifier[value] [ literal[int] :]
identifier[value] = literal[string] + identifier[unidata] . identifier[unicode_alias] [ literal[string] ]. identifier[get] ( identifier[negated] , identifier[negated] )
keyword[else] :
identifier[value] = identifier[unidata] . identifier[unicode_alias] [ literal[string] ]. identifier[get] ( identifier[value] , identifier[value] )
keyword[return] identifier[obj] [ identifier[value] ] | def get_decomposition_type_property(value, is_bytes=False):
"""Get `DECOMPOSITION TYPE` property."""
obj = unidata.ascii_decomposition_type if is_bytes else unidata.unicode_decomposition_type
if value.startswith('^'):
negated = value[1:]
value = '^' + unidata.unicode_alias['decompositiontype'].get(negated, negated) # depends on [control=['if'], data=[]]
else:
value = unidata.unicode_alias['decompositiontype'].get(value, value)
return obj[value] |
def bench(client, n):
""" Benchmark n requests """
items = list(range(n))
# Time client publish operations
# ------------------------------
started = time.time()
for i in items:
client.publish('test', i)
duration = time.time() - started
print('Publisher client stats:')
util.print_stats(n, duration) | def function[bench, parameter[client, n]]:
constant[ Benchmark n requests ]
variable[items] assign[=] call[name[list], parameter[call[name[range], parameter[name[n]]]]]
variable[started] assign[=] call[name[time].time, parameter[]]
for taget[name[i]] in starred[name[items]] begin[:]
call[name[client].publish, parameter[constant[test], name[i]]]
variable[duration] assign[=] binary_operation[call[name[time].time, parameter[]] - name[started]]
call[name[print], parameter[constant[Publisher client stats:]]]
call[name[util].print_stats, parameter[name[n], name[duration]]] | keyword[def] identifier[bench] ( identifier[client] , identifier[n] ):
literal[string]
identifier[items] = identifier[list] ( identifier[range] ( identifier[n] ))
identifier[started] = identifier[time] . identifier[time] ()
keyword[for] identifier[i] keyword[in] identifier[items] :
identifier[client] . identifier[publish] ( literal[string] , identifier[i] )
identifier[duration] = identifier[time] . identifier[time] ()- identifier[started]
identifier[print] ( literal[string] )
identifier[util] . identifier[print_stats] ( identifier[n] , identifier[duration] ) | def bench(client, n):
""" Benchmark n requests """
items = list(range(n))
# Time client publish operations
# ------------------------------
started = time.time()
for i in items:
client.publish('test', i) # depends on [control=['for'], data=['i']]
duration = time.time() - started
print('Publisher client stats:')
util.print_stats(n, duration) |
def flat_map(self, flatmap_function):
"""Return a new Streamlet by applying map_function to each element of this Streamlet
and flattening the result
"""
from heronpy.streamlet.impl.flatmapbolt import FlatMapStreamlet
fm_streamlet = FlatMapStreamlet(flatmap_function, self)
self._add_child(fm_streamlet)
return fm_streamlet | def function[flat_map, parameter[self, flatmap_function]]:
constant[Return a new Streamlet by applying map_function to each element of this Streamlet
and flattening the result
]
from relative_module[heronpy.streamlet.impl.flatmapbolt] import module[FlatMapStreamlet]
variable[fm_streamlet] assign[=] call[name[FlatMapStreamlet], parameter[name[flatmap_function], name[self]]]
call[name[self]._add_child, parameter[name[fm_streamlet]]]
return[name[fm_streamlet]] | keyword[def] identifier[flat_map] ( identifier[self] , identifier[flatmap_function] ):
literal[string]
keyword[from] identifier[heronpy] . identifier[streamlet] . identifier[impl] . identifier[flatmapbolt] keyword[import] identifier[FlatMapStreamlet]
identifier[fm_streamlet] = identifier[FlatMapStreamlet] ( identifier[flatmap_function] , identifier[self] )
identifier[self] . identifier[_add_child] ( identifier[fm_streamlet] )
keyword[return] identifier[fm_streamlet] | def flat_map(self, flatmap_function):
"""Return a new Streamlet by applying map_function to each element of this Streamlet
and flattening the result
"""
from heronpy.streamlet.impl.flatmapbolt import FlatMapStreamlet
fm_streamlet = FlatMapStreamlet(flatmap_function, self)
self._add_child(fm_streamlet)
return fm_streamlet |
def create(self):
"""
Generate the data and figs for the report and fill the LaTeX templates with them
to generate a PDF file with the report.
:return:
"""
logger.info("Generating the report from %s to %s", self.start, self.end)
self.create_data_figs()
self.create_pdf()
logger.info("Report completed") | def function[create, parameter[self]]:
constant[
Generate the data and figs for the report and fill the LaTeX templates with them
to generate a PDF file with the report.
:return:
]
call[name[logger].info, parameter[constant[Generating the report from %s to %s], name[self].start, name[self].end]]
call[name[self].create_data_figs, parameter[]]
call[name[self].create_pdf, parameter[]]
call[name[logger].info, parameter[constant[Report completed]]] | keyword[def] identifier[create] ( identifier[self] ):
literal[string]
identifier[logger] . identifier[info] ( literal[string] , identifier[self] . identifier[start] , identifier[self] . identifier[end] )
identifier[self] . identifier[create_data_figs] ()
identifier[self] . identifier[create_pdf] ()
identifier[logger] . identifier[info] ( literal[string] ) | def create(self):
"""
Generate the data and figs for the report and fill the LaTeX templates with them
to generate a PDF file with the report.
:return:
"""
logger.info('Generating the report from %s to %s', self.start, self.end)
self.create_data_figs()
self.create_pdf()
logger.info('Report completed') |
def create(self):
"""
Create the SSH Key
"""
input_params = {
"name": self.name,
"public_key": self.public_key,
}
data = self.get_data("account/keys/", type=POST, params=input_params)
if data:
self.id = data['ssh_key']['id'] | def function[create, parameter[self]]:
constant[
Create the SSH Key
]
variable[input_params] assign[=] dictionary[[<ast.Constant object at 0x7da1b016c760>, <ast.Constant object at 0x7da1b016d690>], [<ast.Attribute object at 0x7da1b016c640>, <ast.Attribute object at 0x7da1b016c220>]]
variable[data] assign[=] call[name[self].get_data, parameter[constant[account/keys/]]]
if name[data] begin[:]
name[self].id assign[=] call[call[name[data]][constant[ssh_key]]][constant[id]] | keyword[def] identifier[create] ( identifier[self] ):
literal[string]
identifier[input_params] ={
literal[string] : identifier[self] . identifier[name] ,
literal[string] : identifier[self] . identifier[public_key] ,
}
identifier[data] = identifier[self] . identifier[get_data] ( literal[string] , identifier[type] = identifier[POST] , identifier[params] = identifier[input_params] )
keyword[if] identifier[data] :
identifier[self] . identifier[id] = identifier[data] [ literal[string] ][ literal[string] ] | def create(self):
"""
Create the SSH Key
"""
input_params = {'name': self.name, 'public_key': self.public_key}
data = self.get_data('account/keys/', type=POST, params=input_params)
if data:
self.id = data['ssh_key']['id'] # depends on [control=['if'], data=[]] |
def assert_optimal(model, message='optimization failed'):
"""Assert model solver status is optimal.
Do nothing if model solver status is optimal, otherwise throw
appropriate exception depending on the status.
Parameters
----------
model : cobra.Model
The model to check the solver status for.
message : str (optional)
Message to for the exception if solver status was not optimal.
"""
status = model.solver.status
if status != OPTIMAL:
exception_cls = OPTLANG_TO_EXCEPTIONS_DICT.get(
status, OptimizationError)
raise exception_cls("{} ({})".format(message, status)) | def function[assert_optimal, parameter[model, message]]:
constant[Assert model solver status is optimal.
Do nothing if model solver status is optimal, otherwise throw
appropriate exception depending on the status.
Parameters
----------
model : cobra.Model
The model to check the solver status for.
message : str (optional)
Message to for the exception if solver status was not optimal.
]
variable[status] assign[=] name[model].solver.status
if compare[name[status] not_equal[!=] name[OPTIMAL]] begin[:]
variable[exception_cls] assign[=] call[name[OPTLANG_TO_EXCEPTIONS_DICT].get, parameter[name[status], name[OptimizationError]]]
<ast.Raise object at 0x7da1b01b9870> | keyword[def] identifier[assert_optimal] ( identifier[model] , identifier[message] = literal[string] ):
literal[string]
identifier[status] = identifier[model] . identifier[solver] . identifier[status]
keyword[if] identifier[status] != identifier[OPTIMAL] :
identifier[exception_cls] = identifier[OPTLANG_TO_EXCEPTIONS_DICT] . identifier[get] (
identifier[status] , identifier[OptimizationError] )
keyword[raise] identifier[exception_cls] ( literal[string] . identifier[format] ( identifier[message] , identifier[status] )) | def assert_optimal(model, message='optimization failed'):
"""Assert model solver status is optimal.
Do nothing if model solver status is optimal, otherwise throw
appropriate exception depending on the status.
Parameters
----------
model : cobra.Model
The model to check the solver status for.
message : str (optional)
Message to for the exception if solver status was not optimal.
"""
status = model.solver.status
if status != OPTIMAL:
exception_cls = OPTLANG_TO_EXCEPTIONS_DICT.get(status, OptimizationError)
raise exception_cls('{} ({})'.format(message, status)) # depends on [control=['if'], data=['status']] |
def _check_distributed_corpora_file(self):
"""Check '~/cltk_data/distributed_corpora.yaml' for any custom,
distributed corpora that the user wants to load locally.
TODO: write check or try if `cltk_data` dir is not present
"""
if self.testing:
distributed_corpora_fp = os.path.expanduser('~/cltk_data/test_distributed_corpora.yaml')
else:
distributed_corpora_fp = os.path.expanduser('~/cltk_data/distributed_corpora.yaml')
try:
with open(distributed_corpora_fp) as file_open:
corpora_dict = yaml.safe_load(file_open)
except FileNotFoundError:
logger.info('`~/cltk_data/distributed_corpora.yaml` file not found.')
return []
except yaml.parser.ParserError as parse_err:
logger.debug('Yaml parsing error: %s' % parse_err)
return []
user_defined_corpora = []
for corpus_name in corpora_dict:
about = corpora_dict[corpus_name]
if about['language'].lower() == self.language:
user_defined_corpus = dict()
# user_defined_corpus['git_remote'] = about['git_remote']
user_defined_corpus['origin'] = about['origin']
user_defined_corpus['type'] = about['type']
user_defined_corpus['name'] = corpus_name
user_defined_corpora.append(user_defined_corpus)
return user_defined_corpora | def function[_check_distributed_corpora_file, parameter[self]]:
constant[Check '~/cltk_data/distributed_corpora.yaml' for any custom,
distributed corpora that the user wants to load locally.
TODO: write check or try if `cltk_data` dir is not present
]
if name[self].testing begin[:]
variable[distributed_corpora_fp] assign[=] call[name[os].path.expanduser, parameter[constant[~/cltk_data/test_distributed_corpora.yaml]]]
<ast.Try object at 0x7da18eb55750>
variable[user_defined_corpora] assign[=] list[[]]
for taget[name[corpus_name]] in starred[name[corpora_dict]] begin[:]
variable[about] assign[=] call[name[corpora_dict]][name[corpus_name]]
if compare[call[call[name[about]][constant[language]].lower, parameter[]] equal[==] name[self].language] begin[:]
variable[user_defined_corpus] assign[=] call[name[dict], parameter[]]
call[name[user_defined_corpus]][constant[origin]] assign[=] call[name[about]][constant[origin]]
call[name[user_defined_corpus]][constant[type]] assign[=] call[name[about]][constant[type]]
call[name[user_defined_corpus]][constant[name]] assign[=] name[corpus_name]
call[name[user_defined_corpora].append, parameter[name[user_defined_corpus]]]
return[name[user_defined_corpora]] | keyword[def] identifier[_check_distributed_corpora_file] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[testing] :
identifier[distributed_corpora_fp] = identifier[os] . identifier[path] . identifier[expanduser] ( literal[string] )
keyword[else] :
identifier[distributed_corpora_fp] = identifier[os] . identifier[path] . identifier[expanduser] ( literal[string] )
keyword[try] :
keyword[with] identifier[open] ( identifier[distributed_corpora_fp] ) keyword[as] identifier[file_open] :
identifier[corpora_dict] = identifier[yaml] . identifier[safe_load] ( identifier[file_open] )
keyword[except] identifier[FileNotFoundError] :
identifier[logger] . identifier[info] ( literal[string] )
keyword[return] []
keyword[except] identifier[yaml] . identifier[parser] . identifier[ParserError] keyword[as] identifier[parse_err] :
identifier[logger] . identifier[debug] ( literal[string] % identifier[parse_err] )
keyword[return] []
identifier[user_defined_corpora] =[]
keyword[for] identifier[corpus_name] keyword[in] identifier[corpora_dict] :
identifier[about] = identifier[corpora_dict] [ identifier[corpus_name] ]
keyword[if] identifier[about] [ literal[string] ]. identifier[lower] ()== identifier[self] . identifier[language] :
identifier[user_defined_corpus] = identifier[dict] ()
identifier[user_defined_corpus] [ literal[string] ]= identifier[about] [ literal[string] ]
identifier[user_defined_corpus] [ literal[string] ]= identifier[about] [ literal[string] ]
identifier[user_defined_corpus] [ literal[string] ]= identifier[corpus_name]
identifier[user_defined_corpora] . identifier[append] ( identifier[user_defined_corpus] )
keyword[return] identifier[user_defined_corpora] | def _check_distributed_corpora_file(self):
"""Check '~/cltk_data/distributed_corpora.yaml' for any custom,
distributed corpora that the user wants to load locally.
TODO: write check or try if `cltk_data` dir is not present
"""
if self.testing:
distributed_corpora_fp = os.path.expanduser('~/cltk_data/test_distributed_corpora.yaml') # depends on [control=['if'], data=[]]
else:
distributed_corpora_fp = os.path.expanduser('~/cltk_data/distributed_corpora.yaml')
try:
with open(distributed_corpora_fp) as file_open:
corpora_dict = yaml.safe_load(file_open) # depends on [control=['with'], data=['file_open']] # depends on [control=['try'], data=[]]
except FileNotFoundError:
logger.info('`~/cltk_data/distributed_corpora.yaml` file not found.')
return [] # depends on [control=['except'], data=[]]
except yaml.parser.ParserError as parse_err:
logger.debug('Yaml parsing error: %s' % parse_err)
return [] # depends on [control=['except'], data=['parse_err']]
user_defined_corpora = []
for corpus_name in corpora_dict:
about = corpora_dict[corpus_name]
if about['language'].lower() == self.language:
user_defined_corpus = dict()
# user_defined_corpus['git_remote'] = about['git_remote']
user_defined_corpus['origin'] = about['origin']
user_defined_corpus['type'] = about['type']
user_defined_corpus['name'] = corpus_name
user_defined_corpora.append(user_defined_corpus) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['corpus_name']]
return user_defined_corpora |
def has_duplicates(self):
"""
Returns ``True`` if the dict contains keys with duplicates.
Recurses through any all keys with value that is ``VDFDict``.
"""
for n in getattr(self.__kcount, _iter_values)():
if n != 1:
return True
def dict_recurse(obj):
for v in getattr(obj, _iter_values)():
if isinstance(v, VDFDict) and v.has_duplicates():
return True
elif isinstance(v, dict):
return dict_recurse(v)
return False
return dict_recurse(self) | def function[has_duplicates, parameter[self]]:
constant[
Returns ``True`` if the dict contains keys with duplicates.
Recurses through any all keys with value that is ``VDFDict``.
]
for taget[name[n]] in starred[call[call[name[getattr], parameter[name[self].__kcount, name[_iter_values]]], parameter[]]] begin[:]
if compare[name[n] not_equal[!=] constant[1]] begin[:]
return[constant[True]]
def function[dict_recurse, parameter[obj]]:
for taget[name[v]] in starred[call[call[name[getattr], parameter[name[obj], name[_iter_values]]], parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da2041d8ac0> begin[:]
return[constant[True]]
return[constant[False]]
return[call[name[dict_recurse], parameter[name[self]]]] | keyword[def] identifier[has_duplicates] ( identifier[self] ):
literal[string]
keyword[for] identifier[n] keyword[in] identifier[getattr] ( identifier[self] . identifier[__kcount] , identifier[_iter_values] )():
keyword[if] identifier[n] != literal[int] :
keyword[return] keyword[True]
keyword[def] identifier[dict_recurse] ( identifier[obj] ):
keyword[for] identifier[v] keyword[in] identifier[getattr] ( identifier[obj] , identifier[_iter_values] )():
keyword[if] identifier[isinstance] ( identifier[v] , identifier[VDFDict] ) keyword[and] identifier[v] . identifier[has_duplicates] ():
keyword[return] keyword[True]
keyword[elif] identifier[isinstance] ( identifier[v] , identifier[dict] ):
keyword[return] identifier[dict_recurse] ( identifier[v] )
keyword[return] keyword[False]
keyword[return] identifier[dict_recurse] ( identifier[self] ) | def has_duplicates(self):
"""
Returns ``True`` if the dict contains keys with duplicates.
Recurses through any all keys with value that is ``VDFDict``.
"""
for n in getattr(self.__kcount, _iter_values)():
if n != 1:
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['n']]
def dict_recurse(obj):
for v in getattr(obj, _iter_values)():
if isinstance(v, VDFDict) and v.has_duplicates():
return True # depends on [control=['if'], data=[]]
elif isinstance(v, dict):
return dict_recurse(v) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['v']]
return False
return dict_recurse(self) |
def parse_report_file(input_, nameservers=None, dns_timeout=2.0,
strip_attachment_payloads=False, parallel=False):
"""Parses a DMARC aggregate or forensic file at the given path, a
file-like object. or bytes
Args:
input_: A path to a file, a file like object, or bytes
nameservers (list): A list of one or more nameservers to use
(Cloudflare's public DNS resolvers by default)
dns_timeout (float): Sets the DNS timeout in seconds
strip_attachment_payloads (bool): Remove attachment payloads from
forensic report results
parallel (bool): Parallel processing
Returns:
OrderedDict: The parsed DMARC report
"""
if type(input_) == str:
file_object = open(input_, "rb")
elif type(input_) == bytes:
file_object = BytesIO(input_)
else:
file_object = input_
content = file_object.read()
try:
report = parse_aggregate_report_file(content, nameservers=nameservers,
dns_timeout=dns_timeout,
parallel=parallel)
results = OrderedDict([("report_type", "aggregate"),
("report", report)])
except InvalidAggregateReport:
try:
sa = strip_attachment_payloads
results = parse_report_email(content,
nameservers=nameservers,
dns_timeout=dns_timeout,
strip_attachment_payloads=sa,
parallel=parallel)
except InvalidDMARCReport:
raise InvalidDMARCReport("Not a valid aggregate or forensic "
"report")
return results | def function[parse_report_file, parameter[input_, nameservers, dns_timeout, strip_attachment_payloads, parallel]]:
constant[Parses a DMARC aggregate or forensic file at the given path, a
file-like object. or bytes
Args:
input_: A path to a file, a file like object, or bytes
nameservers (list): A list of one or more nameservers to use
(Cloudflare's public DNS resolvers by default)
dns_timeout (float): Sets the DNS timeout in seconds
strip_attachment_payloads (bool): Remove attachment payloads from
forensic report results
parallel (bool): Parallel processing
Returns:
OrderedDict: The parsed DMARC report
]
if compare[call[name[type], parameter[name[input_]]] equal[==] name[str]] begin[:]
variable[file_object] assign[=] call[name[open], parameter[name[input_], constant[rb]]]
variable[content] assign[=] call[name[file_object].read, parameter[]]
<ast.Try object at 0x7da18f09efe0>
return[name[results]] | keyword[def] identifier[parse_report_file] ( identifier[input_] , identifier[nameservers] = keyword[None] , identifier[dns_timeout] = literal[int] ,
identifier[strip_attachment_payloads] = keyword[False] , identifier[parallel] = keyword[False] ):
literal[string]
keyword[if] identifier[type] ( identifier[input_] )== identifier[str] :
identifier[file_object] = identifier[open] ( identifier[input_] , literal[string] )
keyword[elif] identifier[type] ( identifier[input_] )== identifier[bytes] :
identifier[file_object] = identifier[BytesIO] ( identifier[input_] )
keyword[else] :
identifier[file_object] = identifier[input_]
identifier[content] = identifier[file_object] . identifier[read] ()
keyword[try] :
identifier[report] = identifier[parse_aggregate_report_file] ( identifier[content] , identifier[nameservers] = identifier[nameservers] ,
identifier[dns_timeout] = identifier[dns_timeout] ,
identifier[parallel] = identifier[parallel] )
identifier[results] = identifier[OrderedDict] ([( literal[string] , literal[string] ),
( literal[string] , identifier[report] )])
keyword[except] identifier[InvalidAggregateReport] :
keyword[try] :
identifier[sa] = identifier[strip_attachment_payloads]
identifier[results] = identifier[parse_report_email] ( identifier[content] ,
identifier[nameservers] = identifier[nameservers] ,
identifier[dns_timeout] = identifier[dns_timeout] ,
identifier[strip_attachment_payloads] = identifier[sa] ,
identifier[parallel] = identifier[parallel] )
keyword[except] identifier[InvalidDMARCReport] :
keyword[raise] identifier[InvalidDMARCReport] ( literal[string]
literal[string] )
keyword[return] identifier[results] | def parse_report_file(input_, nameservers=None, dns_timeout=2.0, strip_attachment_payloads=False, parallel=False):
"""Parses a DMARC aggregate or forensic file at the given path, a
file-like object. or bytes
Args:
input_: A path to a file, a file like object, or bytes
nameservers (list): A list of one or more nameservers to use
(Cloudflare's public DNS resolvers by default)
dns_timeout (float): Sets the DNS timeout in seconds
strip_attachment_payloads (bool): Remove attachment payloads from
forensic report results
parallel (bool): Parallel processing
Returns:
OrderedDict: The parsed DMARC report
"""
if type(input_) == str:
file_object = open(input_, 'rb') # depends on [control=['if'], data=[]]
elif type(input_) == bytes:
file_object = BytesIO(input_) # depends on [control=['if'], data=[]]
else:
file_object = input_
content = file_object.read()
try:
report = parse_aggregate_report_file(content, nameservers=nameservers, dns_timeout=dns_timeout, parallel=parallel)
results = OrderedDict([('report_type', 'aggregate'), ('report', report)]) # depends on [control=['try'], data=[]]
except InvalidAggregateReport:
try:
sa = strip_attachment_payloads
results = parse_report_email(content, nameservers=nameservers, dns_timeout=dns_timeout, strip_attachment_payloads=sa, parallel=parallel) # depends on [control=['try'], data=[]]
except InvalidDMARCReport:
raise InvalidDMARCReport('Not a valid aggregate or forensic report') # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]]
return results |
def createGroup(self, message, user_ids):
"""
Creates a group with the given ids
:param message: The initial message
:param user_ids: A list of users to create the group with.
:return: ID of the new group
:raises: FBchatException if request failed
"""
data = self._getSendData(message=self._oldMessage(message))
if len(user_ids) < 2:
raise FBchatUserError("Error when creating group: Not enough participants")
for i, user_id in enumerate(user_ids + [self._uid]):
data["specific_to_list[{}]".format(i)] = "fbid:{}".format(user_id)
message_id, thread_id = self._doSendRequest(data, get_thread_id=True)
if not thread_id:
raise FBchatException(
"Error when creating group: No thread_id could be found"
)
return thread_id | def function[createGroup, parameter[self, message, user_ids]]:
constant[
Creates a group with the given ids
:param message: The initial message
:param user_ids: A list of users to create the group with.
:return: ID of the new group
:raises: FBchatException if request failed
]
variable[data] assign[=] call[name[self]._getSendData, parameter[]]
if compare[call[name[len], parameter[name[user_ids]]] less[<] constant[2]] begin[:]
<ast.Raise object at 0x7da1b19dbb80>
for taget[tuple[[<ast.Name object at 0x7da1b19da1a0>, <ast.Name object at 0x7da1b19da050>]]] in starred[call[name[enumerate], parameter[binary_operation[name[user_ids] + list[[<ast.Attribute object at 0x7da1b19d9e10>]]]]]] begin[:]
call[name[data]][call[constant[specific_to_list[{}]].format, parameter[name[i]]]] assign[=] call[constant[fbid:{}].format, parameter[name[user_id]]]
<ast.Tuple object at 0x7da1b19dae30> assign[=] call[name[self]._doSendRequest, parameter[name[data]]]
if <ast.UnaryOp object at 0x7da1b18dca30> begin[:]
<ast.Raise object at 0x7da1b18dcfd0>
return[name[thread_id]] | keyword[def] identifier[createGroup] ( identifier[self] , identifier[message] , identifier[user_ids] ):
literal[string]
identifier[data] = identifier[self] . identifier[_getSendData] ( identifier[message] = identifier[self] . identifier[_oldMessage] ( identifier[message] ))
keyword[if] identifier[len] ( identifier[user_ids] )< literal[int] :
keyword[raise] identifier[FBchatUserError] ( literal[string] )
keyword[for] identifier[i] , identifier[user_id] keyword[in] identifier[enumerate] ( identifier[user_ids] +[ identifier[self] . identifier[_uid] ]):
identifier[data] [ literal[string] . identifier[format] ( identifier[i] )]= literal[string] . identifier[format] ( identifier[user_id] )
identifier[message_id] , identifier[thread_id] = identifier[self] . identifier[_doSendRequest] ( identifier[data] , identifier[get_thread_id] = keyword[True] )
keyword[if] keyword[not] identifier[thread_id] :
keyword[raise] identifier[FBchatException] (
literal[string]
)
keyword[return] identifier[thread_id] | def createGroup(self, message, user_ids):
"""
Creates a group with the given ids
:param message: The initial message
:param user_ids: A list of users to create the group with.
:return: ID of the new group
:raises: FBchatException if request failed
"""
data = self._getSendData(message=self._oldMessage(message))
if len(user_ids) < 2:
raise FBchatUserError('Error when creating group: Not enough participants') # depends on [control=['if'], data=[]]
for (i, user_id) in enumerate(user_ids + [self._uid]):
data['specific_to_list[{}]'.format(i)] = 'fbid:{}'.format(user_id) # depends on [control=['for'], data=[]]
(message_id, thread_id) = self._doSendRequest(data, get_thread_id=True)
if not thread_id:
raise FBchatException('Error when creating group: No thread_id could be found') # depends on [control=['if'], data=[]]
return thread_id |
def lookup(name, min_similarity_ratio=.75):
"""
Look up for a Stan function with similar functionality to a Python
function (or even an R function, see examples). If the function is
not present on the lookup table, then attempts to find similar one
and prints the results. This function requires package `pandas`.
Parameters
-----------
name : str
Name of the function one wants to look for.
min_similarity_ratio : float
In case no exact match is found on the lookup table, the
function will attempt to find similar names using
`difflib.SequenceMatcher.ratio()`, and then results with
calculated ratio below `min_similarity_ratio` will be discarded.
Examples
---------
#Look up for a Stan function similar to scipy.stats.skewnorm
lookup("scipy.stats.skewnorm")
#Look up for a Stan function similar to R dnorm
lookup("R.dnorm")
#Look up for a Stan function similar to numpy.hstack
lookup("numpy.hstack")
#List Stan log probability mass functions
lookup("lpmfs")
#List Stan log cumulative density functions
lookup("lcdfs")
Returns
---------
A pandas.core.frame.DataFrame if exact or at least one similar
result is found, None otherwise.
"""
if lookuptable is None:
build()
if name not in lookuptable.keys():
from difflib import SequenceMatcher
from operator import itemgetter
print("No match for " + name + " in the lookup table.")
lkt_keys = list(lookuptable.keys())
mapfunction = lambda x: SequenceMatcher(a=name, b=x).ratio()
similars = list(map(mapfunction, lkt_keys))
similars = zip(range(len(similars)), similars)
similars = list(filter(lambda x: x[1] >= min_similarity_ratio,
similars))
similars = sorted(similars, key=itemgetter(1))
if (len(similars)):
print("But the following similar entries were found: ")
for i in range(len(similars)):
print(lkt_keys[similars[i][0]] + " ===> with similary "
"ratio of " + str(round(similars[i][1], 3)) + "")
print("Will return results for entry"
" " + lkt_keys[similars[i][0]] + " "
"(which is the most similar entry found).")
return lookup(lkt_keys[similars[i][0]])
else:
print("And no similar entry found. You may try to decrease"
"the min_similarity_ratio parameter.")
return
entries = stanftable[lookuptable[name]]
if not len(entries):
return "Found no equivalent Stan function available for " + name
try:
import pandas as pd
except ImportError:
raise ImportError('Package pandas is require to use this '
'function.')
return pd.DataFrame(entries) | def function[lookup, parameter[name, min_similarity_ratio]]:
constant[
Look up for a Stan function with similar functionality to a Python
function (or even an R function, see examples). If the function is
not present on the lookup table, then attempts to find similar one
and prints the results. This function requires package `pandas`.
Parameters
-----------
name : str
Name of the function one wants to look for.
min_similarity_ratio : float
In case no exact match is found on the lookup table, the
function will attempt to find similar names using
`difflib.SequenceMatcher.ratio()`, and then results with
calculated ratio below `min_similarity_ratio` will be discarded.
Examples
---------
#Look up for a Stan function similar to scipy.stats.skewnorm
lookup("scipy.stats.skewnorm")
#Look up for a Stan function similar to R dnorm
lookup("R.dnorm")
#Look up for a Stan function similar to numpy.hstack
lookup("numpy.hstack")
#List Stan log probability mass functions
lookup("lpmfs")
#List Stan log cumulative density functions
lookup("lcdfs")
Returns
---------
A pandas.core.frame.DataFrame if exact or at least one similar
result is found, None otherwise.
]
if compare[name[lookuptable] is constant[None]] begin[:]
call[name[build], parameter[]]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> call[name[lookuptable].keys, parameter[]]] begin[:]
from relative_module[difflib] import module[SequenceMatcher]
from relative_module[operator] import module[itemgetter]
call[name[print], parameter[binary_operation[binary_operation[constant[No match for ] + name[name]] + constant[ in the lookup table.]]]]
variable[lkt_keys] assign[=] call[name[list], parameter[call[name[lookuptable].keys, parameter[]]]]
variable[mapfunction] assign[=] <ast.Lambda object at 0x7da1b1e01810>
variable[similars] assign[=] call[name[list], parameter[call[name[map], parameter[name[mapfunction], name[lkt_keys]]]]]
variable[similars] assign[=] call[name[zip], parameter[call[name[range], parameter[call[name[len], parameter[name[similars]]]]], name[similars]]]
variable[similars] assign[=] call[name[list], parameter[call[name[filter], parameter[<ast.Lambda object at 0x7da1b1e00970>, name[similars]]]]]
variable[similars] assign[=] call[name[sorted], parameter[name[similars]]]
if call[name[len], parameter[name[similars]]] begin[:]
call[name[print], parameter[constant[But the following similar entries were found: ]]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[similars]]]]]] begin[:]
call[name[print], parameter[binary_operation[binary_operation[binary_operation[call[name[lkt_keys]][call[call[name[similars]][name[i]]][constant[0]]] + constant[ ===> with similary ratio of ]] + call[name[str], parameter[call[name[round], parameter[call[call[name[similars]][name[i]]][constant[1]], constant[3]]]]]] + constant[]]]]
call[name[print], parameter[binary_operation[binary_operation[constant[Will return results for entry ] + call[name[lkt_keys]][call[call[name[similars]][name[i]]][constant[0]]]] + constant[ (which is the most similar entry found).]]]]
return[call[name[lookup], parameter[call[name[lkt_keys]][call[call[name[similars]][name[i]]][constant[0]]]]]]
return[None]
variable[entries] assign[=] call[name[stanftable]][call[name[lookuptable]][name[name]]]
if <ast.UnaryOp object at 0x7da2041d91e0> begin[:]
return[binary_operation[constant[Found no equivalent Stan function available for ] + name[name]]]
<ast.Try object at 0x7da2041d9720>
return[call[name[pd].DataFrame, parameter[name[entries]]]] | keyword[def] identifier[lookup] ( identifier[name] , identifier[min_similarity_ratio] = literal[int] ):
literal[string]
keyword[if] identifier[lookuptable] keyword[is] keyword[None] :
identifier[build] ()
keyword[if] identifier[name] keyword[not] keyword[in] identifier[lookuptable] . identifier[keys] ():
keyword[from] identifier[difflib] keyword[import] identifier[SequenceMatcher]
keyword[from] identifier[operator] keyword[import] identifier[itemgetter]
identifier[print] ( literal[string] + identifier[name] + literal[string] )
identifier[lkt_keys] = identifier[list] ( identifier[lookuptable] . identifier[keys] ())
identifier[mapfunction] = keyword[lambda] identifier[x] : identifier[SequenceMatcher] ( identifier[a] = identifier[name] , identifier[b] = identifier[x] ). identifier[ratio] ()
identifier[similars] = identifier[list] ( identifier[map] ( identifier[mapfunction] , identifier[lkt_keys] ))
identifier[similars] = identifier[zip] ( identifier[range] ( identifier[len] ( identifier[similars] )), identifier[similars] )
identifier[similars] = identifier[list] ( identifier[filter] ( keyword[lambda] identifier[x] : identifier[x] [ literal[int] ]>= identifier[min_similarity_ratio] ,
identifier[similars] ))
identifier[similars] = identifier[sorted] ( identifier[similars] , identifier[key] = identifier[itemgetter] ( literal[int] ))
keyword[if] ( identifier[len] ( identifier[similars] )):
identifier[print] ( literal[string] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[similars] )):
identifier[print] ( identifier[lkt_keys] [ identifier[similars] [ identifier[i] ][ literal[int] ]]+ literal[string]
literal[string] + identifier[str] ( identifier[round] ( identifier[similars] [ identifier[i] ][ literal[int] ], literal[int] ))+ literal[string] )
identifier[print] ( literal[string]
literal[string] + identifier[lkt_keys] [ identifier[similars] [ identifier[i] ][ literal[int] ]]+ literal[string]
literal[string] )
keyword[return] identifier[lookup] ( identifier[lkt_keys] [ identifier[similars] [ identifier[i] ][ literal[int] ]])
keyword[else] :
identifier[print] ( literal[string]
literal[string] )
keyword[return]
identifier[entries] = identifier[stanftable] [ identifier[lookuptable] [ identifier[name] ]]
keyword[if] keyword[not] identifier[len] ( identifier[entries] ):
keyword[return] literal[string] + identifier[name]
keyword[try] :
keyword[import] identifier[pandas] keyword[as] identifier[pd]
keyword[except] identifier[ImportError] :
keyword[raise] identifier[ImportError] ( literal[string]
literal[string] )
keyword[return] identifier[pd] . identifier[DataFrame] ( identifier[entries] ) | def lookup(name, min_similarity_ratio=0.75):
"""
Look up for a Stan function with similar functionality to a Python
function (or even an R function, see examples). If the function is
not present on the lookup table, then attempts to find similar one
and prints the results. This function requires package `pandas`.
Parameters
-----------
name : str
Name of the function one wants to look for.
min_similarity_ratio : float
In case no exact match is found on the lookup table, the
function will attempt to find similar names using
`difflib.SequenceMatcher.ratio()`, and then results with
calculated ratio below `min_similarity_ratio` will be discarded.
Examples
---------
#Look up for a Stan function similar to scipy.stats.skewnorm
lookup("scipy.stats.skewnorm")
#Look up for a Stan function similar to R dnorm
lookup("R.dnorm")
#Look up for a Stan function similar to numpy.hstack
lookup("numpy.hstack")
#List Stan log probability mass functions
lookup("lpmfs")
#List Stan log cumulative density functions
lookup("lcdfs")
Returns
---------
A pandas.core.frame.DataFrame if exact or at least one similar
result is found, None otherwise.
"""
if lookuptable is None:
build() # depends on [control=['if'], data=[]]
if name not in lookuptable.keys():
from difflib import SequenceMatcher
from operator import itemgetter
print('No match for ' + name + ' in the lookup table.')
lkt_keys = list(lookuptable.keys())
mapfunction = lambda x: SequenceMatcher(a=name, b=x).ratio()
similars = list(map(mapfunction, lkt_keys))
similars = zip(range(len(similars)), similars)
similars = list(filter(lambda x: x[1] >= min_similarity_ratio, similars))
similars = sorted(similars, key=itemgetter(1))
if len(similars):
print('But the following similar entries were found: ')
for i in range(len(similars)):
print(lkt_keys[similars[i][0]] + ' ===> with similary ratio of ' + str(round(similars[i][1], 3)) + '') # depends on [control=['for'], data=['i']]
print('Will return results for entry ' + lkt_keys[similars[i][0]] + ' (which is the most similar entry found).')
return lookup(lkt_keys[similars[i][0]]) # depends on [control=['if'], data=[]]
else:
print('And no similar entry found. You may try to decreasethe min_similarity_ratio parameter.')
return # depends on [control=['if'], data=['name']]
entries = stanftable[lookuptable[name]]
if not len(entries):
return 'Found no equivalent Stan function available for ' + name # depends on [control=['if'], data=[]]
try:
import pandas as pd # depends on [control=['try'], data=[]]
except ImportError:
raise ImportError('Package pandas is require to use this function.') # depends on [control=['except'], data=[]]
return pd.DataFrame(entries) |
def _get_metadata(self):
'''since the user needs a job id and other parameters, save this
for them.
'''
metadata = {'SREGISTRY_GITLAB_FOLDER': self.artifacts,
'api_base': self.api_base,
'SREGISTRY_GITLAB_BASE': self.base,
'SREGISTRY_GITLAB_JOB': self.job }
return metadata | def function[_get_metadata, parameter[self]]:
constant[since the user needs a job id and other parameters, save this
for them.
]
variable[metadata] assign[=] dictionary[[<ast.Constant object at 0x7da1b03fad70>, <ast.Constant object at 0x7da1b03f8730>, <ast.Constant object at 0x7da1b03fa3e0>, <ast.Constant object at 0x7da1b03faa40>], [<ast.Attribute object at 0x7da1b03fa140>, <ast.Attribute object at 0x7da1b03fb070>, <ast.Attribute object at 0x7da1b03faf50>, <ast.Attribute object at 0x7da1b03f8f70>]]
return[name[metadata]] | keyword[def] identifier[_get_metadata] ( identifier[self] ):
literal[string]
identifier[metadata] ={ literal[string] : identifier[self] . identifier[artifacts] ,
literal[string] : identifier[self] . identifier[api_base] ,
literal[string] : identifier[self] . identifier[base] ,
literal[string] : identifier[self] . identifier[job] }
keyword[return] identifier[metadata] | def _get_metadata(self):
"""since the user needs a job id and other parameters, save this
for them.
"""
metadata = {'SREGISTRY_GITLAB_FOLDER': self.artifacts, 'api_base': self.api_base, 'SREGISTRY_GITLAB_BASE': self.base, 'SREGISTRY_GITLAB_JOB': self.job}
return metadata |
def _recurse_find_trace(self, structure, item, trace=[]):
"""
given a nested structure from _parse_repr and find the trace route to get to item
"""
try:
i = structure.index(item)
except ValueError:
for j,substructure in enumerate(structure):
if isinstance(substructure, list):
return self._recurse_find_trace(substructure, item, trace+[j])
else:
return trace+[i] | def function[_recurse_find_trace, parameter[self, structure, item, trace]]:
constant[
given a nested structure from _parse_repr and find the trace route to get to item
]
<ast.Try object at 0x7da18eb56fb0> | keyword[def] identifier[_recurse_find_trace] ( identifier[self] , identifier[structure] , identifier[item] , identifier[trace] =[]):
literal[string]
keyword[try] :
identifier[i] = identifier[structure] . identifier[index] ( identifier[item] )
keyword[except] identifier[ValueError] :
keyword[for] identifier[j] , identifier[substructure] keyword[in] identifier[enumerate] ( identifier[structure] ):
keyword[if] identifier[isinstance] ( identifier[substructure] , identifier[list] ):
keyword[return] identifier[self] . identifier[_recurse_find_trace] ( identifier[substructure] , identifier[item] , identifier[trace] +[ identifier[j] ])
keyword[else] :
keyword[return] identifier[trace] +[ identifier[i] ] | def _recurse_find_trace(self, structure, item, trace=[]):
"""
given a nested structure from _parse_repr and find the trace route to get to item
"""
try:
i = structure.index(item) # depends on [control=['try'], data=[]]
except ValueError:
for (j, substructure) in enumerate(structure):
if isinstance(substructure, list):
return self._recurse_find_trace(substructure, item, trace + [j]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['except'], data=[]]
else:
return trace + [i] |
def read(self, size=None): # pylint: disable=invalid-name
"""Reads from the buffer."""
if size is None or size < 0:
raise exceptions.NotYetImplementedError(
'Illegal read of size %s requested on BufferedStream. '
'Wrapped stream %s is at position %s-%s, '
'%s bytes remaining.' %
(size, self.__stream, self.__start_pos, self.__end_pos,
self._bytes_remaining))
data = ''
if self._bytes_remaining:
size = min(size, self._bytes_remaining)
data = self.__buffered_data[
self.__buffer_pos:self.__buffer_pos + size]
self.__buffer_pos += size
return data | def function[read, parameter[self, size]]:
constant[Reads from the buffer.]
if <ast.BoolOp object at 0x7da1b08454e0> begin[:]
<ast.Raise object at 0x7da1b0847d60>
variable[data] assign[=] constant[]
if name[self]._bytes_remaining begin[:]
variable[size] assign[=] call[name[min], parameter[name[size], name[self]._bytes_remaining]]
variable[data] assign[=] call[name[self].__buffered_data][<ast.Slice object at 0x7da1b07ba8f0>]
<ast.AugAssign object at 0x7da1b07bbbb0>
return[name[data]] | keyword[def] identifier[read] ( identifier[self] , identifier[size] = keyword[None] ):
literal[string]
keyword[if] identifier[size] keyword[is] keyword[None] keyword[or] identifier[size] < literal[int] :
keyword[raise] identifier[exceptions] . identifier[NotYetImplementedError] (
literal[string]
literal[string]
literal[string] %
( identifier[size] , identifier[self] . identifier[__stream] , identifier[self] . identifier[__start_pos] , identifier[self] . identifier[__end_pos] ,
identifier[self] . identifier[_bytes_remaining] ))
identifier[data] = literal[string]
keyword[if] identifier[self] . identifier[_bytes_remaining] :
identifier[size] = identifier[min] ( identifier[size] , identifier[self] . identifier[_bytes_remaining] )
identifier[data] = identifier[self] . identifier[__buffered_data] [
identifier[self] . identifier[__buffer_pos] : identifier[self] . identifier[__buffer_pos] + identifier[size] ]
identifier[self] . identifier[__buffer_pos] += identifier[size]
keyword[return] identifier[data] | def read(self, size=None): # pylint: disable=invalid-name
'Reads from the buffer.'
if size is None or size < 0:
raise exceptions.NotYetImplementedError('Illegal read of size %s requested on BufferedStream. Wrapped stream %s is at position %s-%s, %s bytes remaining.' % (size, self.__stream, self.__start_pos, self.__end_pos, self._bytes_remaining)) # depends on [control=['if'], data=[]]
data = ''
if self._bytes_remaining:
size = min(size, self._bytes_remaining)
data = self.__buffered_data[self.__buffer_pos:self.__buffer_pos + size]
self.__buffer_pos += size # depends on [control=['if'], data=[]]
return data |
def remove_range(self, start, end, callback=None):
'''Remove a range by rank.
This is equivalent to perform::
del l[start:end]
on a python list.
It returns the number of element removed.
'''
N = len(self)
if start < 0:
start = max(N + start, 0)
if start >= N:
return 0
if end is None:
end = N
elif end < 0:
end = max(N + end, 0)
else:
end = min(end, N)
if start >= end:
return 0
node = self._head
index = 0
chain = [None] * self._level
for i in range(self._level-1, -1, -1):
while node.next[i] and (index + node.width[i]) <= start:
index += node.width[i]
node = node.next[i]
chain[i] = node
node = node.next[0]
initial = self._size
while node and index < end:
next = node.next[0]
self._remove_node(node, chain)
index += 1
if callback:
callback(node.score, node.value)
node = next
return initial - self._size | def function[remove_range, parameter[self, start, end, callback]]:
constant[Remove a range by rank.
This is equivalent to perform::
del l[start:end]
on a python list.
It returns the number of element removed.
]
variable[N] assign[=] call[name[len], parameter[name[self]]]
if compare[name[start] less[<] constant[0]] begin[:]
variable[start] assign[=] call[name[max], parameter[binary_operation[name[N] + name[start]], constant[0]]]
if compare[name[start] greater_or_equal[>=] name[N]] begin[:]
return[constant[0]]
if compare[name[end] is constant[None]] begin[:]
variable[end] assign[=] name[N]
if compare[name[start] greater_or_equal[>=] name[end]] begin[:]
return[constant[0]]
variable[node] assign[=] name[self]._head
variable[index] assign[=] constant[0]
variable[chain] assign[=] binary_operation[list[[<ast.Constant object at 0x7da20c6a9720>]] * name[self]._level]
for taget[name[i]] in starred[call[name[range], parameter[binary_operation[name[self]._level - constant[1]], <ast.UnaryOp object at 0x7da20c6aacb0>, <ast.UnaryOp object at 0x7da20c6abbe0>]]] begin[:]
while <ast.BoolOp object at 0x7da20c6a9c60> begin[:]
<ast.AugAssign object at 0x7da20c6a82b0>
variable[node] assign[=] call[name[node].next][name[i]]
call[name[chain]][name[i]] assign[=] name[node]
variable[node] assign[=] call[name[node].next][constant[0]]
variable[initial] assign[=] name[self]._size
while <ast.BoolOp object at 0x7da20e9b33d0> begin[:]
variable[next] assign[=] call[name[node].next][constant[0]]
call[name[self]._remove_node, parameter[name[node], name[chain]]]
<ast.AugAssign object at 0x7da20e9b22f0>
if name[callback] begin[:]
call[name[callback], parameter[name[node].score, name[node].value]]
variable[node] assign[=] name[next]
return[binary_operation[name[initial] - name[self]._size]] | keyword[def] identifier[remove_range] ( identifier[self] , identifier[start] , identifier[end] , identifier[callback] = keyword[None] ):
literal[string]
identifier[N] = identifier[len] ( identifier[self] )
keyword[if] identifier[start] < literal[int] :
identifier[start] = identifier[max] ( identifier[N] + identifier[start] , literal[int] )
keyword[if] identifier[start] >= identifier[N] :
keyword[return] literal[int]
keyword[if] identifier[end] keyword[is] keyword[None] :
identifier[end] = identifier[N]
keyword[elif] identifier[end] < literal[int] :
identifier[end] = identifier[max] ( identifier[N] + identifier[end] , literal[int] )
keyword[else] :
identifier[end] = identifier[min] ( identifier[end] , identifier[N] )
keyword[if] identifier[start] >= identifier[end] :
keyword[return] literal[int]
identifier[node] = identifier[self] . identifier[_head]
identifier[index] = literal[int]
identifier[chain] =[ keyword[None] ]* identifier[self] . identifier[_level]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[self] . identifier[_level] - literal[int] ,- literal[int] ,- literal[int] ):
keyword[while] identifier[node] . identifier[next] [ identifier[i] ] keyword[and] ( identifier[index] + identifier[node] . identifier[width] [ identifier[i] ])<= identifier[start] :
identifier[index] += identifier[node] . identifier[width] [ identifier[i] ]
identifier[node] = identifier[node] . identifier[next] [ identifier[i] ]
identifier[chain] [ identifier[i] ]= identifier[node]
identifier[node] = identifier[node] . identifier[next] [ literal[int] ]
identifier[initial] = identifier[self] . identifier[_size]
keyword[while] identifier[node] keyword[and] identifier[index] < identifier[end] :
identifier[next] = identifier[node] . identifier[next] [ literal[int] ]
identifier[self] . identifier[_remove_node] ( identifier[node] , identifier[chain] )
identifier[index] += literal[int]
keyword[if] identifier[callback] :
identifier[callback] ( identifier[node] . identifier[score] , identifier[node] . identifier[value] )
identifier[node] = identifier[next]
keyword[return] identifier[initial] - identifier[self] . identifier[_size] | def remove_range(self, start, end, callback=None):
"""Remove a range by rank.
This is equivalent to perform::
del l[start:end]
on a python list.
It returns the number of element removed.
"""
N = len(self)
if start < 0:
start = max(N + start, 0) # depends on [control=['if'], data=['start']]
if start >= N:
return 0 # depends on [control=['if'], data=[]]
if end is None:
end = N # depends on [control=['if'], data=['end']]
elif end < 0:
end = max(N + end, 0) # depends on [control=['if'], data=['end']]
else:
end = min(end, N)
if start >= end:
return 0 # depends on [control=['if'], data=[]]
node = self._head
index = 0
chain = [None] * self._level
for i in range(self._level - 1, -1, -1):
while node.next[i] and index + node.width[i] <= start:
index += node.width[i]
node = node.next[i] # depends on [control=['while'], data=[]]
chain[i] = node # depends on [control=['for'], data=['i']]
node = node.next[0]
initial = self._size
while node and index < end:
next = node.next[0]
self._remove_node(node, chain)
index += 1
if callback:
callback(node.score, node.value) # depends on [control=['if'], data=[]]
node = next # depends on [control=['while'], data=[]]
return initial - self._size |
def _raise_redirect_exceptions(response):
"""Return the new url or None if there are no redirects.
Raise exceptions if appropriate.
"""
if response.status_code not in [301, 302, 307]:
return None
new_url = urljoin(response.url, response.headers['location'])
if 'reddits/search' in new_url: # Handle non-existent subreddit
subreddit = new_url.rsplit('=', 1)[1]
raise InvalidSubreddit('`{0}` is not a valid subreddit'
.format(subreddit))
elif not RE_REDIRECT.search(response.url):
raise RedirectException(response.url, new_url)
return new_url | def function[_raise_redirect_exceptions, parameter[response]]:
constant[Return the new url or None if there are no redirects.
Raise exceptions if appropriate.
]
if compare[name[response].status_code <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da18fe91660>, <ast.Constant object at 0x7da18fe92380>, <ast.Constant object at 0x7da18fe92350>]]] begin[:]
return[constant[None]]
variable[new_url] assign[=] call[name[urljoin], parameter[name[response].url, call[name[response].headers][constant[location]]]]
if compare[constant[reddits/search] in name[new_url]] begin[:]
variable[subreddit] assign[=] call[call[name[new_url].rsplit, parameter[constant[=], constant[1]]]][constant[1]]
<ast.Raise object at 0x7da18fe92b90>
return[name[new_url]] | keyword[def] identifier[_raise_redirect_exceptions] ( identifier[response] ):
literal[string]
keyword[if] identifier[response] . identifier[status_code] keyword[not] keyword[in] [ literal[int] , literal[int] , literal[int] ]:
keyword[return] keyword[None]
identifier[new_url] = identifier[urljoin] ( identifier[response] . identifier[url] , identifier[response] . identifier[headers] [ literal[string] ])
keyword[if] literal[string] keyword[in] identifier[new_url] :
identifier[subreddit] = identifier[new_url] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ]
keyword[raise] identifier[InvalidSubreddit] ( literal[string]
. identifier[format] ( identifier[subreddit] ))
keyword[elif] keyword[not] identifier[RE_REDIRECT] . identifier[search] ( identifier[response] . identifier[url] ):
keyword[raise] identifier[RedirectException] ( identifier[response] . identifier[url] , identifier[new_url] )
keyword[return] identifier[new_url] | def _raise_redirect_exceptions(response):
"""Return the new url or None if there are no redirects.
Raise exceptions if appropriate.
"""
if response.status_code not in [301, 302, 307]:
return None # depends on [control=['if'], data=[]]
new_url = urljoin(response.url, response.headers['location'])
if 'reddits/search' in new_url: # Handle non-existent subreddit
subreddit = new_url.rsplit('=', 1)[1]
raise InvalidSubreddit('`{0}` is not a valid subreddit'.format(subreddit)) # depends on [control=['if'], data=['new_url']]
elif not RE_REDIRECT.search(response.url):
raise RedirectException(response.url, new_url) # depends on [control=['if'], data=[]]
return new_url |
def dump(self, fh, value, context=None):
"""Attempt to transform and write a string-based foreign value to the given file-like object.
Returns the length written.
"""
value = self.dumps(value)
fh.write(value)
return len(value) | def function[dump, parameter[self, fh, value, context]]:
constant[Attempt to transform and write a string-based foreign value to the given file-like object.
Returns the length written.
]
variable[value] assign[=] call[name[self].dumps, parameter[name[value]]]
call[name[fh].write, parameter[name[value]]]
return[call[name[len], parameter[name[value]]]] | keyword[def] identifier[dump] ( identifier[self] , identifier[fh] , identifier[value] , identifier[context] = keyword[None] ):
literal[string]
identifier[value] = identifier[self] . identifier[dumps] ( identifier[value] )
identifier[fh] . identifier[write] ( identifier[value] )
keyword[return] identifier[len] ( identifier[value] ) | def dump(self, fh, value, context=None):
"""Attempt to transform and write a string-based foreign value to the given file-like object.
Returns the length written.
"""
value = self.dumps(value)
fh.write(value)
return len(value) |
def remove_list_member(self, list_id, user_id):
"""
Remove a user from a list
:param list_id: list ID number
:param user_id: user ID number
:return: :class:`~responsebot.models.List` object
"""
return List(tweepy_list_to_json(self._client.remove_list_member(list_id=list_id, user_id=user_id))) | def function[remove_list_member, parameter[self, list_id, user_id]]:
constant[
Remove a user from a list
:param list_id: list ID number
:param user_id: user ID number
:return: :class:`~responsebot.models.List` object
]
return[call[name[List], parameter[call[name[tweepy_list_to_json], parameter[call[name[self]._client.remove_list_member, parameter[]]]]]]] | keyword[def] identifier[remove_list_member] ( identifier[self] , identifier[list_id] , identifier[user_id] ):
literal[string]
keyword[return] identifier[List] ( identifier[tweepy_list_to_json] ( identifier[self] . identifier[_client] . identifier[remove_list_member] ( identifier[list_id] = identifier[list_id] , identifier[user_id] = identifier[user_id] ))) | def remove_list_member(self, list_id, user_id):
"""
Remove a user from a list
:param list_id: list ID number
:param user_id: user ID number
:return: :class:`~responsebot.models.List` object
"""
return List(tweepy_list_to_json(self._client.remove_list_member(list_id=list_id, user_id=user_id))) |
def parse_date(date_str):
"""Parse elastic datetime string."""
if not date_str:
return None
try:
date = ciso8601.parse_datetime(date_str)
if not date:
date = arrow.get(date_str).datetime
except TypeError:
date = arrow.get(date_str[0]).datetime
return date | def function[parse_date, parameter[date_str]]:
constant[Parse elastic datetime string.]
if <ast.UnaryOp object at 0x7da1b26c8ac0> begin[:]
return[constant[None]]
<ast.Try object at 0x7da1b26c8a90>
return[name[date]] | keyword[def] identifier[parse_date] ( identifier[date_str] ):
literal[string]
keyword[if] keyword[not] identifier[date_str] :
keyword[return] keyword[None]
keyword[try] :
identifier[date] = identifier[ciso8601] . identifier[parse_datetime] ( identifier[date_str] )
keyword[if] keyword[not] identifier[date] :
identifier[date] = identifier[arrow] . identifier[get] ( identifier[date_str] ). identifier[datetime]
keyword[except] identifier[TypeError] :
identifier[date] = identifier[arrow] . identifier[get] ( identifier[date_str] [ literal[int] ]). identifier[datetime]
keyword[return] identifier[date] | def parse_date(date_str):
"""Parse elastic datetime string."""
if not date_str:
return None # depends on [control=['if'], data=[]]
try:
date = ciso8601.parse_datetime(date_str)
if not date:
date = arrow.get(date_str).datetime # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except TypeError:
date = arrow.get(date_str[0]).datetime # depends on [control=['except'], data=[]]
return date |
def lock_file(filename):
"""Locks the file by writing a '.lock' file.
Returns True when the file is locked and
False when the file was locked already"""
lockfile = "%s.lock"%filename
if isfile(lockfile):
return False
else:
with open(lockfile, "w"):
pass
return True | def function[lock_file, parameter[filename]]:
constant[Locks the file by writing a '.lock' file.
Returns True when the file is locked and
False when the file was locked already]
variable[lockfile] assign[=] binary_operation[constant[%s.lock] <ast.Mod object at 0x7da2590d6920> name[filename]]
if call[name[isfile], parameter[name[lockfile]]] begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[lock_file] ( identifier[filename] ):
literal[string]
identifier[lockfile] = literal[string] % identifier[filename]
keyword[if] identifier[isfile] ( identifier[lockfile] ):
keyword[return] keyword[False]
keyword[else] :
keyword[with] identifier[open] ( identifier[lockfile] , literal[string] ):
keyword[pass]
keyword[return] keyword[True] | def lock_file(filename):
"""Locks the file by writing a '.lock' file.
Returns True when the file is locked and
False when the file was locked already"""
lockfile = '%s.lock' % filename
if isfile(lockfile):
return False # depends on [control=['if'], data=[]]
else:
with open(lockfile, 'w'):
pass # depends on [control=['with'], data=[]]
return True |
def plot (data, headers=None, pconfig=None):
""" Helper HTML for a beeswarm plot.
:param data: A list of data dicts
:param headers: A list of Dicts / OrderedDicts with information
for the series, such as colour scales, min and
max values etc.
:return: HTML string
"""
if headers is None:
headers = []
if pconfig is None:
pconfig = {}
# Allow user to overwrite any given config for this plot
if 'id' in pconfig and pconfig['id'] and pconfig['id'] in config.custom_plot_config:
for k, v in config.custom_plot_config[pconfig['id']].items():
pconfig[k] = v
# Make a datatable object
dt = table_object.datatable(data, headers, pconfig)
return make_plot( dt ) | def function[plot, parameter[data, headers, pconfig]]:
constant[ Helper HTML for a beeswarm plot.
:param data: A list of data dicts
:param headers: A list of Dicts / OrderedDicts with information
for the series, such as colour scales, min and
max values etc.
:return: HTML string
]
if compare[name[headers] is constant[None]] begin[:]
variable[headers] assign[=] list[[]]
if compare[name[pconfig] is constant[None]] begin[:]
variable[pconfig] assign[=] dictionary[[], []]
if <ast.BoolOp object at 0x7da18bcc9ea0> begin[:]
for taget[tuple[[<ast.Name object at 0x7da18ede4940>, <ast.Name object at 0x7da18ede4c40>]]] in starred[call[call[name[config].custom_plot_config][call[name[pconfig]][constant[id]]].items, parameter[]]] begin[:]
call[name[pconfig]][name[k]] assign[=] name[v]
variable[dt] assign[=] call[name[table_object].datatable, parameter[name[data], name[headers], name[pconfig]]]
return[call[name[make_plot], parameter[name[dt]]]] | keyword[def] identifier[plot] ( identifier[data] , identifier[headers] = keyword[None] , identifier[pconfig] = keyword[None] ):
literal[string]
keyword[if] identifier[headers] keyword[is] keyword[None] :
identifier[headers] =[]
keyword[if] identifier[pconfig] keyword[is] keyword[None] :
identifier[pconfig] ={}
keyword[if] literal[string] keyword[in] identifier[pconfig] keyword[and] identifier[pconfig] [ literal[string] ] keyword[and] identifier[pconfig] [ literal[string] ] keyword[in] identifier[config] . identifier[custom_plot_config] :
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[config] . identifier[custom_plot_config] [ identifier[pconfig] [ literal[string] ]]. identifier[items] ():
identifier[pconfig] [ identifier[k] ]= identifier[v]
identifier[dt] = identifier[table_object] . identifier[datatable] ( identifier[data] , identifier[headers] , identifier[pconfig] )
keyword[return] identifier[make_plot] ( identifier[dt] ) | def plot(data, headers=None, pconfig=None):
""" Helper HTML for a beeswarm plot.
:param data: A list of data dicts
:param headers: A list of Dicts / OrderedDicts with information
for the series, such as colour scales, min and
max values etc.
:return: HTML string
"""
if headers is None:
headers = [] # depends on [control=['if'], data=['headers']]
if pconfig is None:
pconfig = {} # depends on [control=['if'], data=['pconfig']]
# Allow user to overwrite any given config for this plot
if 'id' in pconfig and pconfig['id'] and (pconfig['id'] in config.custom_plot_config):
for (k, v) in config.custom_plot_config[pconfig['id']].items():
pconfig[k] = v # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
# Make a datatable object
dt = table_object.datatable(data, headers, pconfig)
return make_plot(dt) |
def set_dial(self, json_value, index, timezone=None):
"""
:param json_value: The value to set
:param index: The dials index
:param timezone: The time zone to use for a time dial
:return:
"""
values = self.json_state
values["nonce"] = str(random.randint(0, 1000000000))
if timezone is None:
json_value["channel_configuration"] = {"channel_id": "10"}
values["dials"][index] = json_value
response = self.api_interface.set_device_state(self, values)
else:
json_value["channel_configuration"] = {"channel_id": "1", "timezone": timezone}
values["dials"][index] = json_value
response = self.api_interface.set_device_state(self, values)
return response | def function[set_dial, parameter[self, json_value, index, timezone]]:
constant[
:param json_value: The value to set
:param index: The dials index
:param timezone: The time zone to use for a time dial
:return:
]
variable[values] assign[=] name[self].json_state
call[name[values]][constant[nonce]] assign[=] call[name[str], parameter[call[name[random].randint, parameter[constant[0], constant[1000000000]]]]]
if compare[name[timezone] is constant[None]] begin[:]
call[name[json_value]][constant[channel_configuration]] assign[=] dictionary[[<ast.Constant object at 0x7da1b265ec20>], [<ast.Constant object at 0x7da1b265e8f0>]]
call[call[name[values]][constant[dials]]][name[index]] assign[=] name[json_value]
variable[response] assign[=] call[name[self].api_interface.set_device_state, parameter[name[self], name[values]]]
return[name[response]] | keyword[def] identifier[set_dial] ( identifier[self] , identifier[json_value] , identifier[index] , identifier[timezone] = keyword[None] ):
literal[string]
identifier[values] = identifier[self] . identifier[json_state]
identifier[values] [ literal[string] ]= identifier[str] ( identifier[random] . identifier[randint] ( literal[int] , literal[int] ))
keyword[if] identifier[timezone] keyword[is] keyword[None] :
identifier[json_value] [ literal[string] ]={ literal[string] : literal[string] }
identifier[values] [ literal[string] ][ identifier[index] ]= identifier[json_value]
identifier[response] = identifier[self] . identifier[api_interface] . identifier[set_device_state] ( identifier[self] , identifier[values] )
keyword[else] :
identifier[json_value] [ literal[string] ]={ literal[string] : literal[string] , literal[string] : identifier[timezone] }
identifier[values] [ literal[string] ][ identifier[index] ]= identifier[json_value]
identifier[response] = identifier[self] . identifier[api_interface] . identifier[set_device_state] ( identifier[self] , identifier[values] )
keyword[return] identifier[response] | def set_dial(self, json_value, index, timezone=None):
"""
:param json_value: The value to set
:param index: The dials index
:param timezone: The time zone to use for a time dial
:return:
"""
values = self.json_state
values['nonce'] = str(random.randint(0, 1000000000))
if timezone is None:
json_value['channel_configuration'] = {'channel_id': '10'}
values['dials'][index] = json_value
response = self.api_interface.set_device_state(self, values) # depends on [control=['if'], data=[]]
else:
json_value['channel_configuration'] = {'channel_id': '1', 'timezone': timezone}
values['dials'][index] = json_value
response = self.api_interface.set_device_state(self, values)
return response |
def _m2crypto_validate(message, ssldir=None, **config):
""" Return true or false if the message is signed appropriately.
Four things must be true:
1) The X509 cert must be signed by our CA
2) The cert must not be in our CRL.
3) We must be able to verify the signature using the RSA public key
contained in the X509 cert.
4) The topic of the message and the CN on the cert must appear in the
:ref:`conf-routing-policy` dict.
"""
if ssldir is None:
raise ValueError("You must set the ssldir keyword argument.")
def fail(reason):
_log.warn("Failed validation. %s" % reason)
return False
# Some sanity checking
for field in ['signature', 'certificate']:
if field not in message:
return fail("No %r field found." % field)
if not isinstance(message[field], six.text_type):
_log.error('msg[%r] is not a unicode string' % field)
try:
# Make an effort to decode it, it's very likely utf-8 since that's what
# is hardcoded throughout fedmsg. Worst case scenario is it'll cause a
# validation error when there shouldn't be one.
message[field] = message[field].decode('utf-8')
except UnicodeError as e:
_log.error("Unable to decode the message '%s' field: %s", field, str(e))
return False
# Peal off the auth datums
signature = message['signature'].decode('base64')
certificate = message['certificate'].decode('base64')
message = fedmsg.crypto.strip_credentials(message)
# Build an X509 object
cert = M2Crypto.X509.load_cert_string(certificate)
# Validate the cert. Make sure it is signed by our CA.
# validate_certificate will one day be a part of M2Crypto.SSL.Context
# https://bugzilla.osafoundation.org/show_bug.cgi?id=11690
ca_location = config.get('ca_cert_location', 'https://fedoraproject.org/fedmsg/ca.crt')
crl_location = config.get('crl_location', 'https://fedoraproject.org/fedmsg/crl.pem')
fd, cafile = tempfile.mkstemp()
try:
ca_certificate, crl = utils.load_certificates(ca_location, crl_location)
os.write(fd, ca_certificate.encode('ascii'))
os.fsync(fd)
ctx = m2ext.SSL.Context()
ctx.load_verify_locations(cafile=cafile)
if not ctx.validate_certificate(cert):
ca_certificate, crl = utils.load_certificates(
ca_location, crl_location, invalidate_cache=True)
with open(cafile, 'w') as f:
f.write(ca_certificate)
ctx = m2ext.SSL.Context()
ctx.load_verify_locations(cafile=cafile)
if not ctx.validate_certificate(cert):
return fail("X509 certificate is not valid.")
except (IOError, RequestException) as e:
_log.error(str(e))
return False
finally:
os.close(fd)
os.remove(cafile)
if crl:
try:
fd, crlfile = tempfile.mkstemp(text=True)
os.write(fd, crl.encode('ascii'))
os.fsync(fd)
crl = M2Crypto.X509.load_crl(crlfile)
finally:
os.close(fd)
os.remove(crlfile)
# FIXME -- We need to check that the CRL is signed by our own CA.
# See https://bugzilla.osafoundation.org/show_bug.cgi?id=12954#c2
# if not ctx.validate_certificate(crl):
# return fail("X509 CRL is not valid.")
# FIXME -- we check the CRL, but by doing string comparison ourselves.
# This is not what we want to be doing.
# There is a patch into M2Crypto to handle this for us. We should use it
# once its integrated upstream.
# See https://bugzilla.osafoundation.org/show_bug.cgi?id=12954#c2
revoked_serials = [long(line.split(': ')[1].strip(), base=16)
for line in crl.as_text().split('\n')
if 'Serial Number:' in line]
if cert.get_serial_number() in revoked_serials:
subject = cert.get_subject()
signer = '(no CN)'
if subject.nid.get('CN'):
entry = subject.get_entries_by_nid(subject.nid['CN'])[0]
if entry:
signer = entry.get_data().as_text()
return fail("X509 cert %r, %r is in the Revocation List (CRL)" % (
signer, cert.get_serial_number()))
# If the cert is good, then test to see if the signature in the messages
# matches up with the provided cert.
rsa_public = cert.get_pubkey().get_rsa()
digest = M2Crypto.EVP.MessageDigest('sha1')
digest.update(fedmsg.encoding.dumps(message))
try:
if not rsa_public.verify(digest.digest(), signature):
raise M2Crypto.RSA.RSAError("RSA signature failed to validate.")
except M2Crypto.RSA.RSAError as e:
return fail(str(e))
# Now we know that the cert is valid. The message is *authenticated*.
# * Next step: Authorization *
# Load our policy from the config dict.
routing_policy = config.get('routing_policy', {})
# Determine the name of the signer of the message.
# This will be something like "shell-pkgs01.stg.phx2.fedoraproject.org"
subject = cert.get_subject()
signer = subject.get_entries_by_nid(subject.nid['CN'])[0]\
.get_data().as_text()
return utils.validate_policy(
message.get('topic'), signer, routing_policy, config.get('routing_nitpicky', False)) | def function[_m2crypto_validate, parameter[message, ssldir]]:
constant[ Return true or false if the message is signed appropriately.
Four things must be true:
1) The X509 cert must be signed by our CA
2) The cert must not be in our CRL.
3) We must be able to verify the signature using the RSA public key
contained in the X509 cert.
4) The topic of the message and the CN on the cert must appear in the
:ref:`conf-routing-policy` dict.
]
if compare[name[ssldir] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b0479e70>
def function[fail, parameter[reason]]:
call[name[_log].warn, parameter[binary_operation[constant[Failed validation. %s] <ast.Mod object at 0x7da2590d6920> name[reason]]]]
return[constant[False]]
for taget[name[field]] in starred[list[[<ast.Constant object at 0x7da1b047b310>, <ast.Constant object at 0x7da1b047a650>]]] begin[:]
if compare[name[field] <ast.NotIn object at 0x7da2590d7190> name[message]] begin[:]
return[call[name[fail], parameter[binary_operation[constant[No %r field found.] <ast.Mod object at 0x7da2590d6920> name[field]]]]]
if <ast.UnaryOp object at 0x7da1b047b850> begin[:]
call[name[_log].error, parameter[binary_operation[constant[msg[%r] is not a unicode string] <ast.Mod object at 0x7da2590d6920> name[field]]]]
<ast.Try object at 0x7da1b0478bb0>
variable[signature] assign[=] call[call[name[message]][constant[signature]].decode, parameter[constant[base64]]]
variable[certificate] assign[=] call[call[name[message]][constant[certificate]].decode, parameter[constant[base64]]]
variable[message] assign[=] call[name[fedmsg].crypto.strip_credentials, parameter[name[message]]]
variable[cert] assign[=] call[name[M2Crypto].X509.load_cert_string, parameter[name[certificate]]]
variable[ca_location] assign[=] call[name[config].get, parameter[constant[ca_cert_location], constant[https://fedoraproject.org/fedmsg/ca.crt]]]
variable[crl_location] assign[=] call[name[config].get, parameter[constant[crl_location], constant[https://fedoraproject.org/fedmsg/crl.pem]]]
<ast.Tuple object at 0x7da1b0479de0> assign[=] call[name[tempfile].mkstemp, parameter[]]
<ast.Try object at 0x7da1b047b550>
if name[crl] begin[:]
<ast.Try object at 0x7da1b0478fd0>
variable[revoked_serials] assign[=] <ast.ListComp object at 0x7da1b0618d90>
if compare[call[name[cert].get_serial_number, parameter[]] in name[revoked_serials]] begin[:]
variable[subject] assign[=] call[name[cert].get_subject, parameter[]]
variable[signer] assign[=] constant[(no CN)]
if call[name[subject].nid.get, parameter[constant[CN]]] begin[:]
variable[entry] assign[=] call[call[name[subject].get_entries_by_nid, parameter[call[name[subject].nid][constant[CN]]]]][constant[0]]
if name[entry] begin[:]
variable[signer] assign[=] call[call[name[entry].get_data, parameter[]].as_text, parameter[]]
return[call[name[fail], parameter[binary_operation[constant[X509 cert %r, %r is in the Revocation List (CRL)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0619ff0>, <ast.Call object at 0x7da1b0618ca0>]]]]]]
variable[rsa_public] assign[=] call[call[name[cert].get_pubkey, parameter[]].get_rsa, parameter[]]
variable[digest] assign[=] call[name[M2Crypto].EVP.MessageDigest, parameter[constant[sha1]]]
call[name[digest].update, parameter[call[name[fedmsg].encoding.dumps, parameter[name[message]]]]]
<ast.Try object at 0x7da1b061bd60>
variable[routing_policy] assign[=] call[name[config].get, parameter[constant[routing_policy], dictionary[[], []]]]
variable[subject] assign[=] call[name[cert].get_subject, parameter[]]
variable[signer] assign[=] call[call[call[call[name[subject].get_entries_by_nid, parameter[call[name[subject].nid][constant[CN]]]]][constant[0]].get_data, parameter[]].as_text, parameter[]]
return[call[name[utils].validate_policy, parameter[call[name[message].get, parameter[constant[topic]]], name[signer], name[routing_policy], call[name[config].get, parameter[constant[routing_nitpicky], constant[False]]]]]] | keyword[def] identifier[_m2crypto_validate] ( identifier[message] , identifier[ssldir] = keyword[None] ,** identifier[config] ):
literal[string]
keyword[if] identifier[ssldir] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[def] identifier[fail] ( identifier[reason] ):
identifier[_log] . identifier[warn] ( literal[string] % identifier[reason] )
keyword[return] keyword[False]
keyword[for] identifier[field] keyword[in] [ literal[string] , literal[string] ]:
keyword[if] identifier[field] keyword[not] keyword[in] identifier[message] :
keyword[return] identifier[fail] ( literal[string] % identifier[field] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[message] [ identifier[field] ], identifier[six] . identifier[text_type] ):
identifier[_log] . identifier[error] ( literal[string] % identifier[field] )
keyword[try] :
identifier[message] [ identifier[field] ]= identifier[message] [ identifier[field] ]. identifier[decode] ( literal[string] )
keyword[except] identifier[UnicodeError] keyword[as] identifier[e] :
identifier[_log] . identifier[error] ( literal[string] , identifier[field] , identifier[str] ( identifier[e] ))
keyword[return] keyword[False]
identifier[signature] = identifier[message] [ literal[string] ]. identifier[decode] ( literal[string] )
identifier[certificate] = identifier[message] [ literal[string] ]. identifier[decode] ( literal[string] )
identifier[message] = identifier[fedmsg] . identifier[crypto] . identifier[strip_credentials] ( identifier[message] )
identifier[cert] = identifier[M2Crypto] . identifier[X509] . identifier[load_cert_string] ( identifier[certificate] )
identifier[ca_location] = identifier[config] . identifier[get] ( literal[string] , literal[string] )
identifier[crl_location] = identifier[config] . identifier[get] ( literal[string] , literal[string] )
identifier[fd] , identifier[cafile] = identifier[tempfile] . identifier[mkstemp] ()
keyword[try] :
identifier[ca_certificate] , identifier[crl] = identifier[utils] . identifier[load_certificates] ( identifier[ca_location] , identifier[crl_location] )
identifier[os] . identifier[write] ( identifier[fd] , identifier[ca_certificate] . identifier[encode] ( literal[string] ))
identifier[os] . identifier[fsync] ( identifier[fd] )
identifier[ctx] = identifier[m2ext] . identifier[SSL] . identifier[Context] ()
identifier[ctx] . identifier[load_verify_locations] ( identifier[cafile] = identifier[cafile] )
keyword[if] keyword[not] identifier[ctx] . identifier[validate_certificate] ( identifier[cert] ):
identifier[ca_certificate] , identifier[crl] = identifier[utils] . identifier[load_certificates] (
identifier[ca_location] , identifier[crl_location] , identifier[invalidate_cache] = keyword[True] )
keyword[with] identifier[open] ( identifier[cafile] , literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( identifier[ca_certificate] )
identifier[ctx] = identifier[m2ext] . identifier[SSL] . identifier[Context] ()
identifier[ctx] . identifier[load_verify_locations] ( identifier[cafile] = identifier[cafile] )
keyword[if] keyword[not] identifier[ctx] . identifier[validate_certificate] ( identifier[cert] ):
keyword[return] identifier[fail] ( literal[string] )
keyword[except] ( identifier[IOError] , identifier[RequestException] ) keyword[as] identifier[e] :
identifier[_log] . identifier[error] ( identifier[str] ( identifier[e] ))
keyword[return] keyword[False]
keyword[finally] :
identifier[os] . identifier[close] ( identifier[fd] )
identifier[os] . identifier[remove] ( identifier[cafile] )
keyword[if] identifier[crl] :
keyword[try] :
identifier[fd] , identifier[crlfile] = identifier[tempfile] . identifier[mkstemp] ( identifier[text] = keyword[True] )
identifier[os] . identifier[write] ( identifier[fd] , identifier[crl] . identifier[encode] ( literal[string] ))
identifier[os] . identifier[fsync] ( identifier[fd] )
identifier[crl] = identifier[M2Crypto] . identifier[X509] . identifier[load_crl] ( identifier[crlfile] )
keyword[finally] :
identifier[os] . identifier[close] ( identifier[fd] )
identifier[os] . identifier[remove] ( identifier[crlfile] )
identifier[revoked_serials] =[ identifier[long] ( identifier[line] . identifier[split] ( literal[string] )[ literal[int] ]. identifier[strip] (), identifier[base] = literal[int] )
keyword[for] identifier[line] keyword[in] identifier[crl] . identifier[as_text] (). identifier[split] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[line] ]
keyword[if] identifier[cert] . identifier[get_serial_number] () keyword[in] identifier[revoked_serials] :
identifier[subject] = identifier[cert] . identifier[get_subject] ()
identifier[signer] = literal[string]
keyword[if] identifier[subject] . identifier[nid] . identifier[get] ( literal[string] ):
identifier[entry] = identifier[subject] . identifier[get_entries_by_nid] ( identifier[subject] . identifier[nid] [ literal[string] ])[ literal[int] ]
keyword[if] identifier[entry] :
identifier[signer] = identifier[entry] . identifier[get_data] (). identifier[as_text] ()
keyword[return] identifier[fail] ( literal[string] %(
identifier[signer] , identifier[cert] . identifier[get_serial_number] ()))
identifier[rsa_public] = identifier[cert] . identifier[get_pubkey] (). identifier[get_rsa] ()
identifier[digest] = identifier[M2Crypto] . identifier[EVP] . identifier[MessageDigest] ( literal[string] )
identifier[digest] . identifier[update] ( identifier[fedmsg] . identifier[encoding] . identifier[dumps] ( identifier[message] ))
keyword[try] :
keyword[if] keyword[not] identifier[rsa_public] . identifier[verify] ( identifier[digest] . identifier[digest] (), identifier[signature] ):
keyword[raise] identifier[M2Crypto] . identifier[RSA] . identifier[RSAError] ( literal[string] )
keyword[except] identifier[M2Crypto] . identifier[RSA] . identifier[RSAError] keyword[as] identifier[e] :
keyword[return] identifier[fail] ( identifier[str] ( identifier[e] ))
identifier[routing_policy] = identifier[config] . identifier[get] ( literal[string] ,{})
identifier[subject] = identifier[cert] . identifier[get_subject] ()
identifier[signer] = identifier[subject] . identifier[get_entries_by_nid] ( identifier[subject] . identifier[nid] [ literal[string] ])[ literal[int] ]. identifier[get_data] (). identifier[as_text] ()
keyword[return] identifier[utils] . identifier[validate_policy] (
identifier[message] . identifier[get] ( literal[string] ), identifier[signer] , identifier[routing_policy] , identifier[config] . identifier[get] ( literal[string] , keyword[False] )) | def _m2crypto_validate(message, ssldir=None, **config):
""" Return true or false if the message is signed appropriately.
Four things must be true:
1) The X509 cert must be signed by our CA
2) The cert must not be in our CRL.
3) We must be able to verify the signature using the RSA public key
contained in the X509 cert.
4) The topic of the message and the CN on the cert must appear in the
:ref:`conf-routing-policy` dict.
"""
if ssldir is None:
raise ValueError('You must set the ssldir keyword argument.') # depends on [control=['if'], data=[]]
def fail(reason):
_log.warn('Failed validation. %s' % reason)
return False
# Some sanity checking
for field in ['signature', 'certificate']:
if field not in message:
return fail('No %r field found.' % field) # depends on [control=['if'], data=['field']]
if not isinstance(message[field], six.text_type):
_log.error('msg[%r] is not a unicode string' % field)
try:
# Make an effort to decode it, it's very likely utf-8 since that's what
# is hardcoded throughout fedmsg. Worst case scenario is it'll cause a
# validation error when there shouldn't be one.
message[field] = message[field].decode('utf-8') # depends on [control=['try'], data=[]]
except UnicodeError as e:
_log.error("Unable to decode the message '%s' field: %s", field, str(e))
return False # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['field']]
# Peal off the auth datums
signature = message['signature'].decode('base64')
certificate = message['certificate'].decode('base64')
message = fedmsg.crypto.strip_credentials(message)
# Build an X509 object
cert = M2Crypto.X509.load_cert_string(certificate)
# Validate the cert. Make sure it is signed by our CA.
# validate_certificate will one day be a part of M2Crypto.SSL.Context
# https://bugzilla.osafoundation.org/show_bug.cgi?id=11690
ca_location = config.get('ca_cert_location', 'https://fedoraproject.org/fedmsg/ca.crt')
crl_location = config.get('crl_location', 'https://fedoraproject.org/fedmsg/crl.pem')
(fd, cafile) = tempfile.mkstemp()
try:
(ca_certificate, crl) = utils.load_certificates(ca_location, crl_location)
os.write(fd, ca_certificate.encode('ascii'))
os.fsync(fd)
ctx = m2ext.SSL.Context()
ctx.load_verify_locations(cafile=cafile)
if not ctx.validate_certificate(cert):
(ca_certificate, crl) = utils.load_certificates(ca_location, crl_location, invalidate_cache=True)
with open(cafile, 'w') as f:
f.write(ca_certificate) # depends on [control=['with'], data=['f']]
ctx = m2ext.SSL.Context()
ctx.load_verify_locations(cafile=cafile)
if not ctx.validate_certificate(cert):
return fail('X509 certificate is not valid.') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except (IOError, RequestException) as e:
_log.error(str(e))
return False # depends on [control=['except'], data=['e']]
finally:
os.close(fd)
os.remove(cafile)
if crl:
try:
(fd, crlfile) = tempfile.mkstemp(text=True)
os.write(fd, crl.encode('ascii'))
os.fsync(fd)
crl = M2Crypto.X509.load_crl(crlfile) # depends on [control=['try'], data=[]]
finally:
os.close(fd)
os.remove(crlfile)
# FIXME -- We need to check that the CRL is signed by our own CA.
# See https://bugzilla.osafoundation.org/show_bug.cgi?id=12954#c2
# if not ctx.validate_certificate(crl):
# return fail("X509 CRL is not valid.")
# FIXME -- we check the CRL, but by doing string comparison ourselves.
# This is not what we want to be doing.
# There is a patch into M2Crypto to handle this for us. We should use it
# once its integrated upstream.
# See https://bugzilla.osafoundation.org/show_bug.cgi?id=12954#c2
revoked_serials = [long(line.split(': ')[1].strip(), base=16) for line in crl.as_text().split('\n') if 'Serial Number:' in line]
if cert.get_serial_number() in revoked_serials:
subject = cert.get_subject()
signer = '(no CN)'
if subject.nid.get('CN'):
entry = subject.get_entries_by_nid(subject.nid['CN'])[0]
if entry:
signer = entry.get_data().as_text() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return fail('X509 cert %r, %r is in the Revocation List (CRL)' % (signer, cert.get_serial_number())) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# If the cert is good, then test to see if the signature in the messages
# matches up with the provided cert.
rsa_public = cert.get_pubkey().get_rsa()
digest = M2Crypto.EVP.MessageDigest('sha1')
digest.update(fedmsg.encoding.dumps(message))
try:
if not rsa_public.verify(digest.digest(), signature):
raise M2Crypto.RSA.RSAError('RSA signature failed to validate.') # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except M2Crypto.RSA.RSAError as e:
return fail(str(e)) # depends on [control=['except'], data=['e']]
# Now we know that the cert is valid. The message is *authenticated*.
# * Next step: Authorization *
# Load our policy from the config dict.
routing_policy = config.get('routing_policy', {})
# Determine the name of the signer of the message.
# This will be something like "shell-pkgs01.stg.phx2.fedoraproject.org"
subject = cert.get_subject()
signer = subject.get_entries_by_nid(subject.nid['CN'])[0].get_data().as_text()
return utils.validate_policy(message.get('topic'), signer, routing_policy, config.get('routing_nitpicky', False)) |
def _float(self, string):
"""Convert string to float
Take care of numbers in exponential format
"""
string = self._denoise(string)
exp_match = re.match(r'^[-.\d]+x10-(\d)$', string)
if exp_match:
exp = int(exp_match.groups()[0])
fac = 10 ** -exp
string = string.replace('x10-{}'.format(exp), '')
else:
fac = 1
return fac * float(string) | def function[_float, parameter[self, string]]:
constant[Convert string to float
Take care of numbers in exponential format
]
variable[string] assign[=] call[name[self]._denoise, parameter[name[string]]]
variable[exp_match] assign[=] call[name[re].match, parameter[constant[^[-.\d]+x10-(\d)$], name[string]]]
if name[exp_match] begin[:]
variable[exp] assign[=] call[name[int], parameter[call[call[name[exp_match].groups, parameter[]]][constant[0]]]]
variable[fac] assign[=] binary_operation[constant[10] ** <ast.UnaryOp object at 0x7da1b22a7b20>]
variable[string] assign[=] call[name[string].replace, parameter[call[constant[x10-{}].format, parameter[name[exp]]], constant[]]]
return[binary_operation[name[fac] * call[name[float], parameter[name[string]]]]] | keyword[def] identifier[_float] ( identifier[self] , identifier[string] ):
literal[string]
identifier[string] = identifier[self] . identifier[_denoise] ( identifier[string] )
identifier[exp_match] = identifier[re] . identifier[match] ( literal[string] , identifier[string] )
keyword[if] identifier[exp_match] :
identifier[exp] = identifier[int] ( identifier[exp_match] . identifier[groups] ()[ literal[int] ])
identifier[fac] = literal[int] **- identifier[exp]
identifier[string] = identifier[string] . identifier[replace] ( literal[string] . identifier[format] ( identifier[exp] ), literal[string] )
keyword[else] :
identifier[fac] = literal[int]
keyword[return] identifier[fac] * identifier[float] ( identifier[string] ) | def _float(self, string):
"""Convert string to float
Take care of numbers in exponential format
"""
string = self._denoise(string)
exp_match = re.match('^[-.\\d]+x10-(\\d)$', string)
if exp_match:
exp = int(exp_match.groups()[0])
fac = 10 ** (-exp)
string = string.replace('x10-{}'.format(exp), '') # depends on [control=['if'], data=[]]
else:
fac = 1
return fac * float(string) |
def _recomputeRecordFromKNN(self, record):
"""
returns the classified labeling of record
"""
inputs = {
"categoryIn": [None],
"bottomUpIn": self._getStateAnomalyVector(record),
}
outputs = {"categoriesOut": numpy.zeros((1,)),
"bestPrototypeIndices":numpy.zeros((1,)),
"categoryProbabilitiesOut":numpy.zeros((1,))}
# Only use points before record to classify and after the wait period.
classifier_indexes = numpy.array(
self._knnclassifier.getParameter('categoryRecencyList'))
valid_idx = numpy.where(
(classifier_indexes >= self.getParameter('trainRecords')) &
(classifier_indexes < record.ROWID)
)[0].tolist()
if len(valid_idx) == 0:
return None
self._knnclassifier.setParameter('inferenceMode', None, True)
self._knnclassifier.setParameter('learningMode', None, False)
self._knnclassifier.compute(inputs, outputs)
self._knnclassifier.setParameter('learningMode', None, True)
classifier_distances = self._knnclassifier.getLatestDistances()
valid_distances = classifier_distances[valid_idx]
if valid_distances.min() <= self._classificationMaxDist:
classifier_indexes_prev = classifier_indexes[valid_idx]
rowID = classifier_indexes_prev[valid_distances.argmin()]
indexID = numpy.where(classifier_indexes == rowID)[0][0]
category = self._knnclassifier.getCategoryList()[indexID]
return category
return None | def function[_recomputeRecordFromKNN, parameter[self, record]]:
constant[
returns the classified labeling of record
]
variable[inputs] assign[=] dictionary[[<ast.Constant object at 0x7da2047eb8e0>, <ast.Constant object at 0x7da2047ea560>], [<ast.List object at 0x7da2047e9f60>, <ast.Call object at 0x7da2047e8df0>]]
variable[outputs] assign[=] dictionary[[<ast.Constant object at 0x7da18dc9ae30>, <ast.Constant object at 0x7da18dc99330>, <ast.Constant object at 0x7da18dc986a0>], [<ast.Call object at 0x7da18dc9aa70>, <ast.Call object at 0x7da18dc99810>, <ast.Call object at 0x7da18dc9be80>]]
variable[classifier_indexes] assign[=] call[name[numpy].array, parameter[call[name[self]._knnclassifier.getParameter, parameter[constant[categoryRecencyList]]]]]
variable[valid_idx] assign[=] call[call[call[name[numpy].where, parameter[binary_operation[compare[name[classifier_indexes] greater_or_equal[>=] call[name[self].getParameter, parameter[constant[trainRecords]]]] <ast.BitAnd object at 0x7da2590d6b60> compare[name[classifier_indexes] less[<] name[record].ROWID]]]]][constant[0]].tolist, parameter[]]
if compare[call[name[len], parameter[name[valid_idx]]] equal[==] constant[0]] begin[:]
return[constant[None]]
call[name[self]._knnclassifier.setParameter, parameter[constant[inferenceMode], constant[None], constant[True]]]
call[name[self]._knnclassifier.setParameter, parameter[constant[learningMode], constant[None], constant[False]]]
call[name[self]._knnclassifier.compute, parameter[name[inputs], name[outputs]]]
call[name[self]._knnclassifier.setParameter, parameter[constant[learningMode], constant[None], constant[True]]]
variable[classifier_distances] assign[=] call[name[self]._knnclassifier.getLatestDistances, parameter[]]
variable[valid_distances] assign[=] call[name[classifier_distances]][name[valid_idx]]
if compare[call[name[valid_distances].min, parameter[]] less_or_equal[<=] name[self]._classificationMaxDist] begin[:]
variable[classifier_indexes_prev] assign[=] call[name[classifier_indexes]][name[valid_idx]]
variable[rowID] assign[=] call[name[classifier_indexes_prev]][call[name[valid_distances].argmin, parameter[]]]
variable[indexID] assign[=] call[call[call[name[numpy].where, parameter[compare[name[classifier_indexes] equal[==] name[rowID]]]]][constant[0]]][constant[0]]
variable[category] assign[=] call[call[name[self]._knnclassifier.getCategoryList, parameter[]]][name[indexID]]
return[name[category]]
return[constant[None]] | keyword[def] identifier[_recomputeRecordFromKNN] ( identifier[self] , identifier[record] ):
literal[string]
identifier[inputs] ={
literal[string] :[ keyword[None] ],
literal[string] : identifier[self] . identifier[_getStateAnomalyVector] ( identifier[record] ),
}
identifier[outputs] ={ literal[string] : identifier[numpy] . identifier[zeros] (( literal[int] ,)),
literal[string] : identifier[numpy] . identifier[zeros] (( literal[int] ,)),
literal[string] : identifier[numpy] . identifier[zeros] (( literal[int] ,))}
identifier[classifier_indexes] = identifier[numpy] . identifier[array] (
identifier[self] . identifier[_knnclassifier] . identifier[getParameter] ( literal[string] ))
identifier[valid_idx] = identifier[numpy] . identifier[where] (
( identifier[classifier_indexes] >= identifier[self] . identifier[getParameter] ( literal[string] ))&
( identifier[classifier_indexes] < identifier[record] . identifier[ROWID] )
)[ literal[int] ]. identifier[tolist] ()
keyword[if] identifier[len] ( identifier[valid_idx] )== literal[int] :
keyword[return] keyword[None]
identifier[self] . identifier[_knnclassifier] . identifier[setParameter] ( literal[string] , keyword[None] , keyword[True] )
identifier[self] . identifier[_knnclassifier] . identifier[setParameter] ( literal[string] , keyword[None] , keyword[False] )
identifier[self] . identifier[_knnclassifier] . identifier[compute] ( identifier[inputs] , identifier[outputs] )
identifier[self] . identifier[_knnclassifier] . identifier[setParameter] ( literal[string] , keyword[None] , keyword[True] )
identifier[classifier_distances] = identifier[self] . identifier[_knnclassifier] . identifier[getLatestDistances] ()
identifier[valid_distances] = identifier[classifier_distances] [ identifier[valid_idx] ]
keyword[if] identifier[valid_distances] . identifier[min] ()<= identifier[self] . identifier[_classificationMaxDist] :
identifier[classifier_indexes_prev] = identifier[classifier_indexes] [ identifier[valid_idx] ]
identifier[rowID] = identifier[classifier_indexes_prev] [ identifier[valid_distances] . identifier[argmin] ()]
identifier[indexID] = identifier[numpy] . identifier[where] ( identifier[classifier_indexes] == identifier[rowID] )[ literal[int] ][ literal[int] ]
identifier[category] = identifier[self] . identifier[_knnclassifier] . identifier[getCategoryList] ()[ identifier[indexID] ]
keyword[return] identifier[category]
keyword[return] keyword[None] | def _recomputeRecordFromKNN(self, record):
"""
returns the classified labeling of record
"""
inputs = {'categoryIn': [None], 'bottomUpIn': self._getStateAnomalyVector(record)}
outputs = {'categoriesOut': numpy.zeros((1,)), 'bestPrototypeIndices': numpy.zeros((1,)), 'categoryProbabilitiesOut': numpy.zeros((1,))}
# Only use points before record to classify and after the wait period.
classifier_indexes = numpy.array(self._knnclassifier.getParameter('categoryRecencyList'))
valid_idx = numpy.where((classifier_indexes >= self.getParameter('trainRecords')) & (classifier_indexes < record.ROWID))[0].tolist()
if len(valid_idx) == 0:
return None # depends on [control=['if'], data=[]]
self._knnclassifier.setParameter('inferenceMode', None, True)
self._knnclassifier.setParameter('learningMode', None, False)
self._knnclassifier.compute(inputs, outputs)
self._knnclassifier.setParameter('learningMode', None, True)
classifier_distances = self._knnclassifier.getLatestDistances()
valid_distances = classifier_distances[valid_idx]
if valid_distances.min() <= self._classificationMaxDist:
classifier_indexes_prev = classifier_indexes[valid_idx]
rowID = classifier_indexes_prev[valid_distances.argmin()]
indexID = numpy.where(classifier_indexes == rowID)[0][0]
category = self._knnclassifier.getCategoryList()[indexID]
return category # depends on [control=['if'], data=[]]
return None |
async def message_from_token(self, token: Text, payload: Any) \
-> Tuple[Optional[BaseMessage], Optional[Platform]]:
"""
Given an authentication token, find the right platform that can
recognize this token and create a message for this platform.
The payload will be inserted into a Postback layer.
"""
async for platform in self.get_all_platforms():
m = await platform.message_from_token(token, payload)
if m:
return m, platform
return None, None | <ast.AsyncFunctionDef object at 0x7da18ede7760> | keyword[async] keyword[def] identifier[message_from_token] ( identifier[self] , identifier[token] : identifier[Text] , identifier[payload] : identifier[Any] )-> identifier[Tuple] [ identifier[Optional] [ identifier[BaseMessage] ], identifier[Optional] [ identifier[Platform] ]]:
literal[string]
keyword[async] keyword[for] identifier[platform] keyword[in] identifier[self] . identifier[get_all_platforms] ():
identifier[m] = keyword[await] identifier[platform] . identifier[message_from_token] ( identifier[token] , identifier[payload] )
keyword[if] identifier[m] :
keyword[return] identifier[m] , identifier[platform]
keyword[return] keyword[None] , keyword[None] | async def message_from_token(self, token: Text, payload: Any) -> Tuple[Optional[BaseMessage], Optional[Platform]]:
"""
Given an authentication token, find the right platform that can
recognize this token and create a message for this platform.
The payload will be inserted into a Postback layer.
"""
async for platform in self.get_all_platforms():
m = await platform.message_from_token(token, payload)
if m:
return (m, platform) # depends on [control=['if'], data=[]]
return (None, None) |
def delay(self, params, now=None):
"""Determine delay until next request."""
if now is None:
now = time.time()
# Initialize last...
if not self.last:
self.last = now
elif now < self.last:
now = self.last
# How much has leaked out?
leaked = now - self.last
# Update the last message time
self.last = now
# Update the water level
self.level = max(self.level - leaked, 0)
# Are we too full?
difference = self.level + self.limit.cost - self.limit.unit_value
if difference >= self.eps:
self.next = now + difference
return difference
# OK, raise the water level and set next to an appropriate
# value
self.level += self.limit.cost
self.next = now
return None | def function[delay, parameter[self, params, now]]:
constant[Determine delay until next request.]
if compare[name[now] is constant[None]] begin[:]
variable[now] assign[=] call[name[time].time, parameter[]]
if <ast.UnaryOp object at 0x7da2041daef0> begin[:]
name[self].last assign[=] name[now]
variable[leaked] assign[=] binary_operation[name[now] - name[self].last]
name[self].last assign[=] name[now]
name[self].level assign[=] call[name[max], parameter[binary_operation[name[self].level - name[leaked]], constant[0]]]
variable[difference] assign[=] binary_operation[binary_operation[name[self].level + name[self].limit.cost] - name[self].limit.unit_value]
if compare[name[difference] greater_or_equal[>=] name[self].eps] begin[:]
name[self].next assign[=] binary_operation[name[now] + name[difference]]
return[name[difference]]
<ast.AugAssign object at 0x7da2041d9270>
name[self].next assign[=] name[now]
return[constant[None]] | keyword[def] identifier[delay] ( identifier[self] , identifier[params] , identifier[now] = keyword[None] ):
literal[string]
keyword[if] identifier[now] keyword[is] keyword[None] :
identifier[now] = identifier[time] . identifier[time] ()
keyword[if] keyword[not] identifier[self] . identifier[last] :
identifier[self] . identifier[last] = identifier[now]
keyword[elif] identifier[now] < identifier[self] . identifier[last] :
identifier[now] = identifier[self] . identifier[last]
identifier[leaked] = identifier[now] - identifier[self] . identifier[last]
identifier[self] . identifier[last] = identifier[now]
identifier[self] . identifier[level] = identifier[max] ( identifier[self] . identifier[level] - identifier[leaked] , literal[int] )
identifier[difference] = identifier[self] . identifier[level] + identifier[self] . identifier[limit] . identifier[cost] - identifier[self] . identifier[limit] . identifier[unit_value]
keyword[if] identifier[difference] >= identifier[self] . identifier[eps] :
identifier[self] . identifier[next] = identifier[now] + identifier[difference]
keyword[return] identifier[difference]
identifier[self] . identifier[level] += identifier[self] . identifier[limit] . identifier[cost]
identifier[self] . identifier[next] = identifier[now]
keyword[return] keyword[None] | def delay(self, params, now=None):
"""Determine delay until next request."""
if now is None:
now = time.time() # depends on [control=['if'], data=['now']]
# Initialize last...
if not self.last:
self.last = now # depends on [control=['if'], data=[]]
elif now < self.last:
now = self.last # depends on [control=['if'], data=['now']]
# How much has leaked out?
leaked = now - self.last
# Update the last message time
self.last = now
# Update the water level
self.level = max(self.level - leaked, 0)
# Are we too full?
difference = self.level + self.limit.cost - self.limit.unit_value
if difference >= self.eps:
self.next = now + difference
return difference # depends on [control=['if'], data=['difference']]
# OK, raise the water level and set next to an appropriate
# value
self.level += self.limit.cost
self.next = now
return None |
def settargets(self):
"""Set the targets to be used in the analyses. Involves the path of the database files, the database files to
use, and the level of classification for the analysis"""
# Define the set targets call. Include the path to the script, the database path and files, as well
# as the taxonomic rank to use
logging.info('Setting up database')
self.targetcall = 'cd {} && ./set_targets.sh {} {} --{}'.format(self.clarkpath, self.databasepath,
self.database, self.rank)
#
subprocess.call(self.targetcall, shell=True, stdout=self.devnull, stderr=self.devnull) | def function[settargets, parameter[self]]:
constant[Set the targets to be used in the analyses. Involves the path of the database files, the database files to
use, and the level of classification for the analysis]
call[name[logging].info, parameter[constant[Setting up database]]]
name[self].targetcall assign[=] call[constant[cd {} && ./set_targets.sh {} {} --{}].format, parameter[name[self].clarkpath, name[self].databasepath, name[self].database, name[self].rank]]
call[name[subprocess].call, parameter[name[self].targetcall]] | keyword[def] identifier[settargets] ( identifier[self] ):
literal[string]
identifier[logging] . identifier[info] ( literal[string] )
identifier[self] . identifier[targetcall] = literal[string] . identifier[format] ( identifier[self] . identifier[clarkpath] , identifier[self] . identifier[databasepath] ,
identifier[self] . identifier[database] , identifier[self] . identifier[rank] )
identifier[subprocess] . identifier[call] ( identifier[self] . identifier[targetcall] , identifier[shell] = keyword[True] , identifier[stdout] = identifier[self] . identifier[devnull] , identifier[stderr] = identifier[self] . identifier[devnull] ) | def settargets(self):
"""Set the targets to be used in the analyses. Involves the path of the database files, the database files to
use, and the level of classification for the analysis"""
# Define the set targets call. Include the path to the script, the database path and files, as well
# as the taxonomic rank to use
logging.info('Setting up database')
self.targetcall = 'cd {} && ./set_targets.sh {} {} --{}'.format(self.clarkpath, self.databasepath, self.database, self.rank)
#
subprocess.call(self.targetcall, shell=True, stdout=self.devnull, stderr=self.devnull) |
def validate(self):
"""Validates the URL object. The URL object is invalid if it does not represent an absolute URL.
Returns True or False based on this.
"""
if (self.scheme is None or self.scheme != '') \
and (self.host is None or self.host == ''):
return False
return True | def function[validate, parameter[self]]:
constant[Validates the URL object. The URL object is invalid if it does not represent an absolute URL.
Returns True or False based on this.
]
if <ast.BoolOp object at 0x7da2054a6350> begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[validate] ( identifier[self] ):
literal[string]
keyword[if] ( identifier[self] . identifier[scheme] keyword[is] keyword[None] keyword[or] identifier[self] . identifier[scheme] != literal[string] ) keyword[and] ( identifier[self] . identifier[host] keyword[is] keyword[None] keyword[or] identifier[self] . identifier[host] == literal[string] ):
keyword[return] keyword[False]
keyword[return] keyword[True] | def validate(self):
"""Validates the URL object. The URL object is invalid if it does not represent an absolute URL.
Returns True or False based on this.
"""
if (self.scheme is None or self.scheme != '') and (self.host is None or self.host == ''):
return False # depends on [control=['if'], data=[]]
return True |
def run(self):
""" Run the pipeline queue.
The pipeline queue will run forever.
"""
while True:
self._event.clear()
self._queue.get().run(self._event) | def function[run, parameter[self]]:
constant[ Run the pipeline queue.
The pipeline queue will run forever.
]
while constant[True] begin[:]
call[name[self]._event.clear, parameter[]]
call[call[name[self]._queue.get, parameter[]].run, parameter[name[self]._event]] | keyword[def] identifier[run] ( identifier[self] ):
literal[string]
keyword[while] keyword[True] :
identifier[self] . identifier[_event] . identifier[clear] ()
identifier[self] . identifier[_queue] . identifier[get] (). identifier[run] ( identifier[self] . identifier[_event] ) | def run(self):
""" Run the pipeline queue.
The pipeline queue will run forever.
"""
while True:
self._event.clear()
self._queue.get().run(self._event) # depends on [control=['while'], data=[]] |
def kml_master():
"""KML master document for loading all maps in Google Earth"""
kml_doc = KMLMaster(app.config["url_formatter"], app.config["mapsources"].values())
return kml_response(kml_doc) | def function[kml_master, parameter[]]:
constant[KML master document for loading all maps in Google Earth]
variable[kml_doc] assign[=] call[name[KMLMaster], parameter[call[name[app].config][constant[url_formatter]], call[call[name[app].config][constant[mapsources]].values, parameter[]]]]
return[call[name[kml_response], parameter[name[kml_doc]]]] | keyword[def] identifier[kml_master] ():
literal[string]
identifier[kml_doc] = identifier[KMLMaster] ( identifier[app] . identifier[config] [ literal[string] ], identifier[app] . identifier[config] [ literal[string] ]. identifier[values] ())
keyword[return] identifier[kml_response] ( identifier[kml_doc] ) | def kml_master():
"""KML master document for loading all maps in Google Earth"""
kml_doc = KMLMaster(app.config['url_formatter'], app.config['mapsources'].values())
return kml_response(kml_doc) |
def from_esri_code(code):
"""
Load crs object from esri code, via spatialreference.org.
Parses based on the proj4 representation.
Arguments:
- *code*: The ESRI code as an integer.
Returns:
- A CS instance of the indicated type.
"""
# must go online (or look up local table) to get crs details
code = str(code)
proj4 = utils.crscode_to_string("esri", code, "proj4")
crs = from_proj4(proj4)
return crs | def function[from_esri_code, parameter[code]]:
constant[
Load crs object from esri code, via spatialreference.org.
Parses based on the proj4 representation.
Arguments:
- *code*: The ESRI code as an integer.
Returns:
- A CS instance of the indicated type.
]
variable[code] assign[=] call[name[str], parameter[name[code]]]
variable[proj4] assign[=] call[name[utils].crscode_to_string, parameter[constant[esri], name[code], constant[proj4]]]
variable[crs] assign[=] call[name[from_proj4], parameter[name[proj4]]]
return[name[crs]] | keyword[def] identifier[from_esri_code] ( identifier[code] ):
literal[string]
identifier[code] = identifier[str] ( identifier[code] )
identifier[proj4] = identifier[utils] . identifier[crscode_to_string] ( literal[string] , identifier[code] , literal[string] )
identifier[crs] = identifier[from_proj4] ( identifier[proj4] )
keyword[return] identifier[crs] | def from_esri_code(code):
"""
Load crs object from esri code, via spatialreference.org.
Parses based on the proj4 representation.
Arguments:
- *code*: The ESRI code as an integer.
Returns:
- A CS instance of the indicated type.
"""
# must go online (or look up local table) to get crs details
code = str(code)
proj4 = utils.crscode_to_string('esri', code, 'proj4')
crs = from_proj4(proj4)
return crs |
def insert(self, index, filename):
"""Insert a new subclass with filename at index, mockup __module__."""
base = self._base
dct = {'__module__': base.__module__, 'filename': filename, '_stack': self}
cls = type(base.__name__, (base,), dct)
self._map[cls.filename] = cls
self._classes.insert(index, cls) | def function[insert, parameter[self, index, filename]]:
constant[Insert a new subclass with filename at index, mockup __module__.]
variable[base] assign[=] name[self]._base
variable[dct] assign[=] dictionary[[<ast.Constant object at 0x7da20cabf910>, <ast.Constant object at 0x7da20cabfb20>, <ast.Constant object at 0x7da20cabd8a0>], [<ast.Attribute object at 0x7da20cabc700>, <ast.Name object at 0x7da20cabc3a0>, <ast.Name object at 0x7da20cabd930>]]
variable[cls] assign[=] call[name[type], parameter[name[base].__name__, tuple[[<ast.Name object at 0x7da20cabf2e0>]], name[dct]]]
call[name[self]._map][name[cls].filename] assign[=] name[cls]
call[name[self]._classes.insert, parameter[name[index], name[cls]]] | keyword[def] identifier[insert] ( identifier[self] , identifier[index] , identifier[filename] ):
literal[string]
identifier[base] = identifier[self] . identifier[_base]
identifier[dct] ={ literal[string] : identifier[base] . identifier[__module__] , literal[string] : identifier[filename] , literal[string] : identifier[self] }
identifier[cls] = identifier[type] ( identifier[base] . identifier[__name__] ,( identifier[base] ,), identifier[dct] )
identifier[self] . identifier[_map] [ identifier[cls] . identifier[filename] ]= identifier[cls]
identifier[self] . identifier[_classes] . identifier[insert] ( identifier[index] , identifier[cls] ) | def insert(self, index, filename):
"""Insert a new subclass with filename at index, mockup __module__."""
base = self._base
dct = {'__module__': base.__module__, 'filename': filename, '_stack': self}
cls = type(base.__name__, (base,), dct)
self._map[cls.filename] = cls
self._classes.insert(index, cls) |
def tau0_from_mtotal_eta(mtotal, eta, f_lower):
r"""Returns :math:`\tau_0` from the total mass, symmetric mass ratio, and
the given frequency.
"""
# convert to seconds
mtotal = mtotal * lal.MTSUN_SI
# formulae from arxiv.org:0706.4437
return _a0(f_lower) / (mtotal**(5./3.) * eta) | def function[tau0_from_mtotal_eta, parameter[mtotal, eta, f_lower]]:
constant[Returns :math:`\tau_0` from the total mass, symmetric mass ratio, and
the given frequency.
]
variable[mtotal] assign[=] binary_operation[name[mtotal] * name[lal].MTSUN_SI]
return[binary_operation[call[name[_a0], parameter[name[f_lower]]] / binary_operation[binary_operation[name[mtotal] ** binary_operation[constant[5.0] / constant[3.0]]] * name[eta]]]] | keyword[def] identifier[tau0_from_mtotal_eta] ( identifier[mtotal] , identifier[eta] , identifier[f_lower] ):
literal[string]
identifier[mtotal] = identifier[mtotal] * identifier[lal] . identifier[MTSUN_SI]
keyword[return] identifier[_a0] ( identifier[f_lower] )/( identifier[mtotal] **( literal[int] / literal[int] )* identifier[eta] ) | def tau0_from_mtotal_eta(mtotal, eta, f_lower):
"""Returns :math:`\\tau_0` from the total mass, symmetric mass ratio, and
the given frequency.
"""
# convert to seconds
mtotal = mtotal * lal.MTSUN_SI
# formulae from arxiv.org:0706.4437
return _a0(f_lower) / (mtotal ** (5.0 / 3.0) * eta) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.