code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def existing_versions(self):
"""
Returns data with different cfgstr values that were previously computed
with this cacher.
Example:
>>> from ubelt.util_cache import Cacher
>>> # Ensure that some data exists
>>> known_fnames = set()
>>> cacher = Cacher('versioned_data', cfgstr='1')
>>> cacher.ensure(lambda: 'data1')
>>> known_fnames.add(cacher.get_fpath())
>>> cacher = Cacher('versioned_data', cfgstr='2')
>>> cacher.ensure(lambda: 'data2')
>>> known_fnames.add(cacher.get_fpath())
>>> # List previously computed configs for this type
>>> from os.path import basename
>>> cacher = Cacher('versioned_data', cfgstr='2')
>>> exist_fpaths = set(cacher.existing_versions())
>>> exist_fnames = list(map(basename, exist_fpaths))
>>> print(exist_fnames)
>>> assert exist_fpaths == known_fnames
['versioned_data_1.pkl', 'versioned_data_2.pkl']
"""
import glob
pattern = join(self.dpath, self.fname + '_*' + self.ext)
for fname in glob.iglob(pattern):
data_fpath = join(self.dpath, fname)
yield data_fpath | def function[existing_versions, parameter[self]]:
constant[
Returns data with different cfgstr values that were previously computed
with this cacher.
Example:
>>> from ubelt.util_cache import Cacher
>>> # Ensure that some data exists
>>> known_fnames = set()
>>> cacher = Cacher('versioned_data', cfgstr='1')
>>> cacher.ensure(lambda: 'data1')
>>> known_fnames.add(cacher.get_fpath())
>>> cacher = Cacher('versioned_data', cfgstr='2')
>>> cacher.ensure(lambda: 'data2')
>>> known_fnames.add(cacher.get_fpath())
>>> # List previously computed configs for this type
>>> from os.path import basename
>>> cacher = Cacher('versioned_data', cfgstr='2')
>>> exist_fpaths = set(cacher.existing_versions())
>>> exist_fnames = list(map(basename, exist_fpaths))
>>> print(exist_fnames)
>>> assert exist_fpaths == known_fnames
['versioned_data_1.pkl', 'versioned_data_2.pkl']
]
import module[glob]
variable[pattern] assign[=] call[name[join], parameter[name[self].dpath, binary_operation[binary_operation[name[self].fname + constant[_*]] + name[self].ext]]]
for taget[name[fname]] in starred[call[name[glob].iglob, parameter[name[pattern]]]] begin[:]
variable[data_fpath] assign[=] call[name[join], parameter[name[self].dpath, name[fname]]]
<ast.Yield object at 0x7da1b0159bd0> | keyword[def] identifier[existing_versions] ( identifier[self] ):
literal[string]
keyword[import] identifier[glob]
identifier[pattern] = identifier[join] ( identifier[self] . identifier[dpath] , identifier[self] . identifier[fname] + literal[string] + identifier[self] . identifier[ext] )
keyword[for] identifier[fname] keyword[in] identifier[glob] . identifier[iglob] ( identifier[pattern] ):
identifier[data_fpath] = identifier[join] ( identifier[self] . identifier[dpath] , identifier[fname] )
keyword[yield] identifier[data_fpath] | def existing_versions(self):
"""
Returns data with different cfgstr values that were previously computed
with this cacher.
Example:
>>> from ubelt.util_cache import Cacher
>>> # Ensure that some data exists
>>> known_fnames = set()
>>> cacher = Cacher('versioned_data', cfgstr='1')
>>> cacher.ensure(lambda: 'data1')
>>> known_fnames.add(cacher.get_fpath())
>>> cacher = Cacher('versioned_data', cfgstr='2')
>>> cacher.ensure(lambda: 'data2')
>>> known_fnames.add(cacher.get_fpath())
>>> # List previously computed configs for this type
>>> from os.path import basename
>>> cacher = Cacher('versioned_data', cfgstr='2')
>>> exist_fpaths = set(cacher.existing_versions())
>>> exist_fnames = list(map(basename, exist_fpaths))
>>> print(exist_fnames)
>>> assert exist_fpaths == known_fnames
['versioned_data_1.pkl', 'versioned_data_2.pkl']
"""
import glob
pattern = join(self.dpath, self.fname + '_*' + self.ext)
for fname in glob.iglob(pattern):
data_fpath = join(self.dpath, fname)
yield data_fpath # depends on [control=['for'], data=['fname']] |
def restore(name):
"""Restores the database from a snapshot"""
app = get_app()
if not name:
snapshot = app.get_latest_snapshot()
if not snapshot:
click.echo(
"Couldn't find any snapshots for project %s" %
load_config()['project_name']
)
sys.exit(1)
else:
snapshot = app.get_snapshot(name)
if not snapshot:
click.echo(
"Couldn't find snapshot with name %s.\n"
"You can list snapshots with 'stellar list'" % name
)
sys.exit(1)
# Check if slaves are ready
if not snapshot.slaves_ready:
if app.is_copy_process_running(snapshot):
sys.stdout.write(
'Waiting for background process(%s) to finish' %
snapshot.worker_pid
)
sys.stdout.flush()
while not snapshot.slaves_ready:
sys.stdout.write('.')
sys.stdout.flush()
sleep(1)
app.db.session.refresh(snapshot)
click.echo('')
else:
click.echo('Background process missing, doing slow restore.')
app.inline_slave_copy(snapshot)
app.restore(snapshot)
click.echo('Restore complete.') | def function[restore, parameter[name]]:
constant[Restores the database from a snapshot]
variable[app] assign[=] call[name[get_app], parameter[]]
if <ast.UnaryOp object at 0x7da1b0537d00> begin[:]
variable[snapshot] assign[=] call[name[app].get_latest_snapshot, parameter[]]
if <ast.UnaryOp object at 0x7da1b0537e20> begin[:]
call[name[click].echo, parameter[binary_operation[constant[Couldn't find any snapshots for project %s] <ast.Mod object at 0x7da2590d6920> call[call[name[load_config], parameter[]]][constant[project_name]]]]]
call[name[sys].exit, parameter[constant[1]]]
if <ast.UnaryOp object at 0x7da1b0535cc0> begin[:]
if call[name[app].is_copy_process_running, parameter[name[snapshot]]] begin[:]
call[name[sys].stdout.write, parameter[binary_operation[constant[Waiting for background process(%s) to finish] <ast.Mod object at 0x7da2590d6920> name[snapshot].worker_pid]]]
call[name[sys].stdout.flush, parameter[]]
while <ast.UnaryOp object at 0x7da1b0537eb0> begin[:]
call[name[sys].stdout.write, parameter[constant[.]]]
call[name[sys].stdout.flush, parameter[]]
call[name[sleep], parameter[constant[1]]]
call[name[app].db.session.refresh, parameter[name[snapshot]]]
call[name[click].echo, parameter[constant[]]]
call[name[app].restore, parameter[name[snapshot]]]
call[name[click].echo, parameter[constant[Restore complete.]]] | keyword[def] identifier[restore] ( identifier[name] ):
literal[string]
identifier[app] = identifier[get_app] ()
keyword[if] keyword[not] identifier[name] :
identifier[snapshot] = identifier[app] . identifier[get_latest_snapshot] ()
keyword[if] keyword[not] identifier[snapshot] :
identifier[click] . identifier[echo] (
literal[string] %
identifier[load_config] ()[ literal[string] ]
)
identifier[sys] . identifier[exit] ( literal[int] )
keyword[else] :
identifier[snapshot] = identifier[app] . identifier[get_snapshot] ( identifier[name] )
keyword[if] keyword[not] identifier[snapshot] :
identifier[click] . identifier[echo] (
literal[string]
literal[string] % identifier[name]
)
identifier[sys] . identifier[exit] ( literal[int] )
keyword[if] keyword[not] identifier[snapshot] . identifier[slaves_ready] :
keyword[if] identifier[app] . identifier[is_copy_process_running] ( identifier[snapshot] ):
identifier[sys] . identifier[stdout] . identifier[write] (
literal[string] %
identifier[snapshot] . identifier[worker_pid]
)
identifier[sys] . identifier[stdout] . identifier[flush] ()
keyword[while] keyword[not] identifier[snapshot] . identifier[slaves_ready] :
identifier[sys] . identifier[stdout] . identifier[write] ( literal[string] )
identifier[sys] . identifier[stdout] . identifier[flush] ()
identifier[sleep] ( literal[int] )
identifier[app] . identifier[db] . identifier[session] . identifier[refresh] ( identifier[snapshot] )
identifier[click] . identifier[echo] ( literal[string] )
keyword[else] :
identifier[click] . identifier[echo] ( literal[string] )
identifier[app] . identifier[inline_slave_copy] ( identifier[snapshot] )
identifier[app] . identifier[restore] ( identifier[snapshot] )
identifier[click] . identifier[echo] ( literal[string] ) | def restore(name):
"""Restores the database from a snapshot"""
app = get_app()
if not name:
snapshot = app.get_latest_snapshot()
if not snapshot:
click.echo("Couldn't find any snapshots for project %s" % load_config()['project_name'])
sys.exit(1) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
snapshot = app.get_snapshot(name)
if not snapshot:
click.echo("Couldn't find snapshot with name %s.\nYou can list snapshots with 'stellar list'" % name)
sys.exit(1) # depends on [control=['if'], data=[]]
# Check if slaves are ready
if not snapshot.slaves_ready:
if app.is_copy_process_running(snapshot):
sys.stdout.write('Waiting for background process(%s) to finish' % snapshot.worker_pid)
sys.stdout.flush()
while not snapshot.slaves_ready:
sys.stdout.write('.')
sys.stdout.flush()
sleep(1)
app.db.session.refresh(snapshot) # depends on [control=['while'], data=[]]
click.echo('') # depends on [control=['if'], data=[]]
else:
click.echo('Background process missing, doing slow restore.')
app.inline_slave_copy(snapshot) # depends on [control=['if'], data=[]]
app.restore(snapshot)
click.echo('Restore complete.') |
def get_attribute(cls, soup, key, unknown=None):
"""
Get attribute for Beautifulsoup object
:param soup: Beautifulsoup object
:param key: attribute key
:param unknown: attribute key not exists value(default:None)
:return: attribute value
"""
if key in soup.attrs:
return soup.get(key)
return unknown | def function[get_attribute, parameter[cls, soup, key, unknown]]:
constant[
Get attribute for Beautifulsoup object
:param soup: Beautifulsoup object
:param key: attribute key
:param unknown: attribute key not exists value(default:None)
:return: attribute value
]
if compare[name[key] in name[soup].attrs] begin[:]
return[call[name[soup].get, parameter[name[key]]]]
return[name[unknown]] | keyword[def] identifier[get_attribute] ( identifier[cls] , identifier[soup] , identifier[key] , identifier[unknown] = keyword[None] ):
literal[string]
keyword[if] identifier[key] keyword[in] identifier[soup] . identifier[attrs] :
keyword[return] identifier[soup] . identifier[get] ( identifier[key] )
keyword[return] identifier[unknown] | def get_attribute(cls, soup, key, unknown=None):
"""
Get attribute for Beautifulsoup object
:param soup: Beautifulsoup object
:param key: attribute key
:param unknown: attribute key not exists value(default:None)
:return: attribute value
"""
if key in soup.attrs:
return soup.get(key) # depends on [control=['if'], data=['key']]
return unknown |
def leaveEvent(self, event):
""" Reimplemented to start the hide timer.
"""
super(CallTipWidget, self).leaveEvent(event)
self._leave_event_hide() | def function[leaveEvent, parameter[self, event]]:
constant[ Reimplemented to start the hide timer.
]
call[call[name[super], parameter[name[CallTipWidget], name[self]]].leaveEvent, parameter[name[event]]]
call[name[self]._leave_event_hide, parameter[]] | keyword[def] identifier[leaveEvent] ( identifier[self] , identifier[event] ):
literal[string]
identifier[super] ( identifier[CallTipWidget] , identifier[self] ). identifier[leaveEvent] ( identifier[event] )
identifier[self] . identifier[_leave_event_hide] () | def leaveEvent(self, event):
""" Reimplemented to start the hide timer.
"""
super(CallTipWidget, self).leaveEvent(event)
self._leave_event_hide() |
def printMe(self, selfTag, selfValue):
'''Parse the single and its value and return the parsed str.
Args:
selfTag (str): The tag. Normally just ``self.tag``
selfValue (list): a list of value elements(single, subclasses, str, int). Normally just ``self.value``
Returns:
str: A parsed text
'''
if len(selfValue) == 0:
return ''
# if value have only one element and it is not another single
# print differently
elif len(selfValue) == 1 and not ancestor(selfValue[0]) is Single:
text = '<{tag}>{value}</{tag}>\n'.format(
tag=selfTag, value=selfValue[0])
return text
else:
valueText = ''
for element in selfValue:
# if the element is another single
# or merely an object
# both possibility should not happen in the same time
# if so, user is not doing the right thing
if singleOrPair(element) == 'Single':
# ask that single to print itself
valueText += element.printMe(element.tag, element.value)
elif singleOrPair(element) == 'Pair':
valueText += element.printMe(element.key, element.value)
else:
# simply print that element
valueText += str(element) + '\n'
valueText = indent(valueText, 4)
text = '<{tag}>\n'.format(
tag=selfTag) + valueText + '</{tag}>\n'.format(tag=selfTag)
return text | def function[printMe, parameter[self, selfTag, selfValue]]:
constant[Parse the single and its value and return the parsed str.
Args:
selfTag (str): The tag. Normally just ``self.tag``
selfValue (list): a list of value elements(single, subclasses, str, int). Normally just ``self.value``
Returns:
str: A parsed text
]
if compare[call[name[len], parameter[name[selfValue]]] equal[==] constant[0]] begin[:]
return[constant[]] | keyword[def] identifier[printMe] ( identifier[self] , identifier[selfTag] , identifier[selfValue] ):
literal[string]
keyword[if] identifier[len] ( identifier[selfValue] )== literal[int] :
keyword[return] literal[string]
keyword[elif] identifier[len] ( identifier[selfValue] )== literal[int] keyword[and] keyword[not] identifier[ancestor] ( identifier[selfValue] [ literal[int] ]) keyword[is] identifier[Single] :
identifier[text] = literal[string] . identifier[format] (
identifier[tag] = identifier[selfTag] , identifier[value] = identifier[selfValue] [ literal[int] ])
keyword[return] identifier[text]
keyword[else] :
identifier[valueText] = literal[string]
keyword[for] identifier[element] keyword[in] identifier[selfValue] :
keyword[if] identifier[singleOrPair] ( identifier[element] )== literal[string] :
identifier[valueText] += identifier[element] . identifier[printMe] ( identifier[element] . identifier[tag] , identifier[element] . identifier[value] )
keyword[elif] identifier[singleOrPair] ( identifier[element] )== literal[string] :
identifier[valueText] += identifier[element] . identifier[printMe] ( identifier[element] . identifier[key] , identifier[element] . identifier[value] )
keyword[else] :
identifier[valueText] += identifier[str] ( identifier[element] )+ literal[string]
identifier[valueText] = identifier[indent] ( identifier[valueText] , literal[int] )
identifier[text] = literal[string] . identifier[format] (
identifier[tag] = identifier[selfTag] )+ identifier[valueText] + literal[string] . identifier[format] ( identifier[tag] = identifier[selfTag] )
keyword[return] identifier[text] | def printMe(self, selfTag, selfValue):
"""Parse the single and its value and return the parsed str.
Args:
selfTag (str): The tag. Normally just ``self.tag``
selfValue (list): a list of value elements(single, subclasses, str, int). Normally just ``self.value``
Returns:
str: A parsed text
"""
if len(selfValue) == 0:
return '' # depends on [control=['if'], data=[]]
# if value have only one element and it is not another single
# print differently
elif len(selfValue) == 1 and (not ancestor(selfValue[0]) is Single):
text = '<{tag}>{value}</{tag}>\n'.format(tag=selfTag, value=selfValue[0])
return text # depends on [control=['if'], data=[]]
else:
valueText = ''
for element in selfValue:
# if the element is another single
# or merely an object
# both possibility should not happen in the same time
# if so, user is not doing the right thing
if singleOrPair(element) == 'Single':
# ask that single to print itself
valueText += element.printMe(element.tag, element.value) # depends on [control=['if'], data=[]]
elif singleOrPair(element) == 'Pair':
valueText += element.printMe(element.key, element.value) # depends on [control=['if'], data=[]]
else:
# simply print that element
valueText += str(element) + '\n' # depends on [control=['for'], data=['element']]
valueText = indent(valueText, 4)
text = '<{tag}>\n'.format(tag=selfTag) + valueText + '</{tag}>\n'.format(tag=selfTag)
return text |
def move(self, partition, source, dest):
"""Return a new state that is the result of moving a single partition.
:param partition: The partition index of the partition to move.
:param source: The broker index of the broker to move the partition
from.
:param dest: The broker index of the broker to move the partition to.
"""
new_state = copy(self)
# Update the partition replica tuple
source_index = self.replicas[partition].index(source)
new_state.replicas = tuple_alter(
self.replicas,
(partition, lambda replicas: tuple_replace(
replicas,
(source_index, dest),
)),
)
new_state.pending_partitions = self.pending_partitions + (partition, )
# Update the broker weights
partition_weight = self.partition_weights[partition]
new_state.broker_weights = tuple_alter(
self.broker_weights,
(source, lambda broker_weight: broker_weight - partition_weight),
(dest, lambda broker_weight: broker_weight + partition_weight),
)
# Update the broker partition count
new_state.broker_partition_counts = tuple_alter(
self.broker_partition_counts,
(source, lambda partition_count: partition_count - 1),
(dest, lambda partition_count: partition_count + 1),
)
# Update the broker leader weights
if source_index == 0:
new_state.broker_leader_weights = tuple_alter(
self.broker_leader_weights,
(source, lambda lw: lw - partition_weight),
(dest, lambda lw: lw + partition_weight),
)
new_state.broker_leader_counts = tuple_alter(
self.broker_leader_counts,
(source, lambda leader_count: leader_count - 1),
(dest, lambda leader_count: leader_count + 1),
)
new_state.leader_movement_count += 1
# Update the topic broker counts
topic = self.partition_topic[partition]
new_state.topic_broker_count = tuple_alter(
self.topic_broker_count,
(topic, lambda broker_count: tuple_alter(
broker_count,
(source, lambda count: count - 1),
(dest, lambda count: count + 1),
)),
)
# Update the topic broker imbalance
new_state.topic_broker_imbalance = tuple_replace(
self.topic_broker_imbalance,
(topic, new_state._calculate_topic_imbalance(topic)),
)
new_state._weighted_topic_broker_imbalance = (
self._weighted_topic_broker_imbalance +
self.topic_weights[topic] * (
new_state.topic_broker_imbalance[topic] -
self.topic_broker_imbalance[topic]
)
)
# Update the replication group replica counts
source_rg = self.broker_rg[source]
dest_rg = self.broker_rg[dest]
if source_rg != dest_rg:
new_state.rg_replicas = tuple_alter(
self.rg_replicas,
(source_rg, lambda replica_counts: tuple_alter(
replica_counts,
(partition, lambda replica_count: replica_count - 1),
)),
(dest_rg, lambda replica_counts: tuple_alter(
replica_counts,
(partition, lambda replica_count: replica_count + 1),
)),
)
# Update the movement sizes
new_state.movement_size += self.partition_sizes[partition]
new_state.movement_count += 1
return new_state | def function[move, parameter[self, partition, source, dest]]:
constant[Return a new state that is the result of moving a single partition.
:param partition: The partition index of the partition to move.
:param source: The broker index of the broker to move the partition
from.
:param dest: The broker index of the broker to move the partition to.
]
variable[new_state] assign[=] call[name[copy], parameter[name[self]]]
variable[source_index] assign[=] call[call[name[self].replicas][name[partition]].index, parameter[name[source]]]
name[new_state].replicas assign[=] call[name[tuple_alter], parameter[name[self].replicas, tuple[[<ast.Name object at 0x7da2054a73d0>, <ast.Lambda object at 0x7da2054a5cc0>]]]]
name[new_state].pending_partitions assign[=] binary_operation[name[self].pending_partitions + tuple[[<ast.Name object at 0x7da2054a6020>]]]
variable[partition_weight] assign[=] call[name[self].partition_weights][name[partition]]
name[new_state].broker_weights assign[=] call[name[tuple_alter], parameter[name[self].broker_weights, tuple[[<ast.Name object at 0x7da1b084d0c0>, <ast.Lambda object at 0x7da1b084e380>]], tuple[[<ast.Name object at 0x7da1b084d3c0>, <ast.Lambda object at 0x7da1b084ef80>]]]]
name[new_state].broker_partition_counts assign[=] call[name[tuple_alter], parameter[name[self].broker_partition_counts, tuple[[<ast.Name object at 0x7da1b084ef50>, <ast.Lambda object at 0x7da1b084d1e0>]], tuple[[<ast.Name object at 0x7da1b084f130>, <ast.Lambda object at 0x7da1b084f970>]]]]
if compare[name[source_index] equal[==] constant[0]] begin[:]
name[new_state].broker_leader_weights assign[=] call[name[tuple_alter], parameter[name[self].broker_leader_weights, tuple[[<ast.Name object at 0x7da1b084f6d0>, <ast.Lambda object at 0x7da1b084f4f0>]], tuple[[<ast.Name object at 0x7da1b084f220>, <ast.Lambda object at 0x7da1b084d6c0>]]]]
name[new_state].broker_leader_counts assign[=] call[name[tuple_alter], parameter[name[self].broker_leader_counts, tuple[[<ast.Name object at 0x7da1b07990f0>, <ast.Lambda object at 0x7da1b079a170>]], tuple[[<ast.Name object at 0x7da1b079b6a0>, <ast.Lambda object at 0x7da1b079a680>]]]]
<ast.AugAssign object at 0x7da1b079bca0>
variable[topic] assign[=] call[name[self].partition_topic][name[partition]]
name[new_state].topic_broker_count assign[=] call[name[tuple_alter], parameter[name[self].topic_broker_count, tuple[[<ast.Name object at 0x7da1b0799ab0>, <ast.Lambda object at 0x7da1b0798d90>]]]]
name[new_state].topic_broker_imbalance assign[=] call[name[tuple_replace], parameter[name[self].topic_broker_imbalance, tuple[[<ast.Name object at 0x7da1b079bee0>, <ast.Call object at 0x7da1b079ab00>]]]]
name[new_state]._weighted_topic_broker_imbalance assign[=] binary_operation[name[self]._weighted_topic_broker_imbalance + binary_operation[call[name[self].topic_weights][name[topic]] * binary_operation[call[name[new_state].topic_broker_imbalance][name[topic]] - call[name[self].topic_broker_imbalance][name[topic]]]]]
variable[source_rg] assign[=] call[name[self].broker_rg][name[source]]
variable[dest_rg] assign[=] call[name[self].broker_rg][name[dest]]
if compare[name[source_rg] not_equal[!=] name[dest_rg]] begin[:]
name[new_state].rg_replicas assign[=] call[name[tuple_alter], parameter[name[self].rg_replicas, tuple[[<ast.Name object at 0x7da1b079bb80>, <ast.Lambda object at 0x7da1b079b5b0>]], tuple[[<ast.Name object at 0x7da1b079bc40>, <ast.Lambda object at 0x7da1b0798580>]]]]
<ast.AugAssign object at 0x7da1b079b7c0>
<ast.AugAssign object at 0x7da1b079a860>
return[name[new_state]] | keyword[def] identifier[move] ( identifier[self] , identifier[partition] , identifier[source] , identifier[dest] ):
literal[string]
identifier[new_state] = identifier[copy] ( identifier[self] )
identifier[source_index] = identifier[self] . identifier[replicas] [ identifier[partition] ]. identifier[index] ( identifier[source] )
identifier[new_state] . identifier[replicas] = identifier[tuple_alter] (
identifier[self] . identifier[replicas] ,
( identifier[partition] , keyword[lambda] identifier[replicas] : identifier[tuple_replace] (
identifier[replicas] ,
( identifier[source_index] , identifier[dest] ),
)),
)
identifier[new_state] . identifier[pending_partitions] = identifier[self] . identifier[pending_partitions] +( identifier[partition] ,)
identifier[partition_weight] = identifier[self] . identifier[partition_weights] [ identifier[partition] ]
identifier[new_state] . identifier[broker_weights] = identifier[tuple_alter] (
identifier[self] . identifier[broker_weights] ,
( identifier[source] , keyword[lambda] identifier[broker_weight] : identifier[broker_weight] - identifier[partition_weight] ),
( identifier[dest] , keyword[lambda] identifier[broker_weight] : identifier[broker_weight] + identifier[partition_weight] ),
)
identifier[new_state] . identifier[broker_partition_counts] = identifier[tuple_alter] (
identifier[self] . identifier[broker_partition_counts] ,
( identifier[source] , keyword[lambda] identifier[partition_count] : identifier[partition_count] - literal[int] ),
( identifier[dest] , keyword[lambda] identifier[partition_count] : identifier[partition_count] + literal[int] ),
)
keyword[if] identifier[source_index] == literal[int] :
identifier[new_state] . identifier[broker_leader_weights] = identifier[tuple_alter] (
identifier[self] . identifier[broker_leader_weights] ,
( identifier[source] , keyword[lambda] identifier[lw] : identifier[lw] - identifier[partition_weight] ),
( identifier[dest] , keyword[lambda] identifier[lw] : identifier[lw] + identifier[partition_weight] ),
)
identifier[new_state] . identifier[broker_leader_counts] = identifier[tuple_alter] (
identifier[self] . identifier[broker_leader_counts] ,
( identifier[source] , keyword[lambda] identifier[leader_count] : identifier[leader_count] - literal[int] ),
( identifier[dest] , keyword[lambda] identifier[leader_count] : identifier[leader_count] + literal[int] ),
)
identifier[new_state] . identifier[leader_movement_count] += literal[int]
identifier[topic] = identifier[self] . identifier[partition_topic] [ identifier[partition] ]
identifier[new_state] . identifier[topic_broker_count] = identifier[tuple_alter] (
identifier[self] . identifier[topic_broker_count] ,
( identifier[topic] , keyword[lambda] identifier[broker_count] : identifier[tuple_alter] (
identifier[broker_count] ,
( identifier[source] , keyword[lambda] identifier[count] : identifier[count] - literal[int] ),
( identifier[dest] , keyword[lambda] identifier[count] : identifier[count] + literal[int] ),
)),
)
identifier[new_state] . identifier[topic_broker_imbalance] = identifier[tuple_replace] (
identifier[self] . identifier[topic_broker_imbalance] ,
( identifier[topic] , identifier[new_state] . identifier[_calculate_topic_imbalance] ( identifier[topic] )),
)
identifier[new_state] . identifier[_weighted_topic_broker_imbalance] =(
identifier[self] . identifier[_weighted_topic_broker_imbalance] +
identifier[self] . identifier[topic_weights] [ identifier[topic] ]*(
identifier[new_state] . identifier[topic_broker_imbalance] [ identifier[topic] ]-
identifier[self] . identifier[topic_broker_imbalance] [ identifier[topic] ]
)
)
identifier[source_rg] = identifier[self] . identifier[broker_rg] [ identifier[source] ]
identifier[dest_rg] = identifier[self] . identifier[broker_rg] [ identifier[dest] ]
keyword[if] identifier[source_rg] != identifier[dest_rg] :
identifier[new_state] . identifier[rg_replicas] = identifier[tuple_alter] (
identifier[self] . identifier[rg_replicas] ,
( identifier[source_rg] , keyword[lambda] identifier[replica_counts] : identifier[tuple_alter] (
identifier[replica_counts] ,
( identifier[partition] , keyword[lambda] identifier[replica_count] : identifier[replica_count] - literal[int] ),
)),
( identifier[dest_rg] , keyword[lambda] identifier[replica_counts] : identifier[tuple_alter] (
identifier[replica_counts] ,
( identifier[partition] , keyword[lambda] identifier[replica_count] : identifier[replica_count] + literal[int] ),
)),
)
identifier[new_state] . identifier[movement_size] += identifier[self] . identifier[partition_sizes] [ identifier[partition] ]
identifier[new_state] . identifier[movement_count] += literal[int]
keyword[return] identifier[new_state] | def move(self, partition, source, dest):
"""Return a new state that is the result of moving a single partition.
:param partition: The partition index of the partition to move.
:param source: The broker index of the broker to move the partition
from.
:param dest: The broker index of the broker to move the partition to.
"""
new_state = copy(self)
# Update the partition replica tuple
source_index = self.replicas[partition].index(source)
new_state.replicas = tuple_alter(self.replicas, (partition, lambda replicas: tuple_replace(replicas, (source_index, dest))))
new_state.pending_partitions = self.pending_partitions + (partition,)
# Update the broker weights
partition_weight = self.partition_weights[partition]
new_state.broker_weights = tuple_alter(self.broker_weights, (source, lambda broker_weight: broker_weight - partition_weight), (dest, lambda broker_weight: broker_weight + partition_weight))
# Update the broker partition count
new_state.broker_partition_counts = tuple_alter(self.broker_partition_counts, (source, lambda partition_count: partition_count - 1), (dest, lambda partition_count: partition_count + 1))
# Update the broker leader weights
if source_index == 0:
new_state.broker_leader_weights = tuple_alter(self.broker_leader_weights, (source, lambda lw: lw - partition_weight), (dest, lambda lw: lw + partition_weight))
new_state.broker_leader_counts = tuple_alter(self.broker_leader_counts, (source, lambda leader_count: leader_count - 1), (dest, lambda leader_count: leader_count + 1))
new_state.leader_movement_count += 1 # depends on [control=['if'], data=[]]
# Update the topic broker counts
topic = self.partition_topic[partition]
new_state.topic_broker_count = tuple_alter(self.topic_broker_count, (topic, lambda broker_count: tuple_alter(broker_count, (source, lambda count: count - 1), (dest, lambda count: count + 1))))
# Update the topic broker imbalance
new_state.topic_broker_imbalance = tuple_replace(self.topic_broker_imbalance, (topic, new_state._calculate_topic_imbalance(topic)))
new_state._weighted_topic_broker_imbalance = self._weighted_topic_broker_imbalance + self.topic_weights[topic] * (new_state.topic_broker_imbalance[topic] - self.topic_broker_imbalance[topic])
# Update the replication group replica counts
source_rg = self.broker_rg[source]
dest_rg = self.broker_rg[dest]
if source_rg != dest_rg:
new_state.rg_replicas = tuple_alter(self.rg_replicas, (source_rg, lambda replica_counts: tuple_alter(replica_counts, (partition, lambda replica_count: replica_count - 1))), (dest_rg, lambda replica_counts: tuple_alter(replica_counts, (partition, lambda replica_count: replica_count + 1)))) # depends on [control=['if'], data=['source_rg', 'dest_rg']]
# Update the movement sizes
new_state.movement_size += self.partition_sizes[partition]
new_state.movement_count += 1
return new_state |
def setup_file_logging(log_filename, log_file_level="DEBUG", str_format=None,
date_format=None, log_restart=False, log_history=False,
formatter=None, silence_modules=None, log_filter=None):
"""
This will setup logging for a single file but can be called more than once
LOG LEVELS are "CRITICAL", "ERROR", "INFO", "DEBUG"
:param log_filename: str of the file location
:param log_file_level str of the log level to use on this file
:param str_format: str of the logging format
:param date_format: str of the date format
:param log_restart: bool if True the log file will be deleted first
:param log_history: bool if True will save another log file in a folder
called history with the datetime
:param formatter: logging.Format instance to use
:param log_filter: logging.filter instance to add to handler
:param silence_modules list of str of modules to silence
:return: None
"""
from seaborn_timestamp.timestamp import datetime_to_str
if os.path.exists(log_filename) and log_restart:
os.remove(log_filename)
add_file_handler(log_file_level, log_filename, str_format=str_format,
date_format=date_format, formatter=formatter,
log_filter=log_filter)
if log_history:
base_name = os.path.basename(log_filename).split('.')[0] + \
'_%s' % datetime_to_str(str_format='%Y-%m-%d_%H-%M-%S')
history_log = os.path.join(os.path.dirname(log_filename),
'history', base_name + '.log')
add_file_handler(log_file_level, history_log, str_format=str_format,
date_format=date_format, log_filter=log_filter)
silence_module_logging(silence_modules) | def function[setup_file_logging, parameter[log_filename, log_file_level, str_format, date_format, log_restart, log_history, formatter, silence_modules, log_filter]]:
constant[
This will setup logging for a single file but can be called more than once
LOG LEVELS are "CRITICAL", "ERROR", "INFO", "DEBUG"
:param log_filename: str of the file location
:param log_file_level str of the log level to use on this file
:param str_format: str of the logging format
:param date_format: str of the date format
:param log_restart: bool if True the log file will be deleted first
:param log_history: bool if True will save another log file in a folder
called history with the datetime
:param formatter: logging.Format instance to use
:param log_filter: logging.filter instance to add to handler
:param silence_modules list of str of modules to silence
:return: None
]
from relative_module[seaborn_timestamp.timestamp] import module[datetime_to_str]
if <ast.BoolOp object at 0x7da18fe91d50> begin[:]
call[name[os].remove, parameter[name[log_filename]]]
call[name[add_file_handler], parameter[name[log_file_level], name[log_filename]]]
if name[log_history] begin[:]
variable[base_name] assign[=] binary_operation[call[call[call[name[os].path.basename, parameter[name[log_filename]]].split, parameter[constant[.]]]][constant[0]] + binary_operation[constant[_%s] <ast.Mod object at 0x7da2590d6920> call[name[datetime_to_str], parameter[]]]]
variable[history_log] assign[=] call[name[os].path.join, parameter[call[name[os].path.dirname, parameter[name[log_filename]]], constant[history], binary_operation[name[base_name] + constant[.log]]]]
call[name[add_file_handler], parameter[name[log_file_level], name[history_log]]]
call[name[silence_module_logging], parameter[name[silence_modules]]] | keyword[def] identifier[setup_file_logging] ( identifier[log_filename] , identifier[log_file_level] = literal[string] , identifier[str_format] = keyword[None] ,
identifier[date_format] = keyword[None] , identifier[log_restart] = keyword[False] , identifier[log_history] = keyword[False] ,
identifier[formatter] = keyword[None] , identifier[silence_modules] = keyword[None] , identifier[log_filter] = keyword[None] ):
literal[string]
keyword[from] identifier[seaborn_timestamp] . identifier[timestamp] keyword[import] identifier[datetime_to_str]
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[log_filename] ) keyword[and] identifier[log_restart] :
identifier[os] . identifier[remove] ( identifier[log_filename] )
identifier[add_file_handler] ( identifier[log_file_level] , identifier[log_filename] , identifier[str_format] = identifier[str_format] ,
identifier[date_format] = identifier[date_format] , identifier[formatter] = identifier[formatter] ,
identifier[log_filter] = identifier[log_filter] )
keyword[if] identifier[log_history] :
identifier[base_name] = identifier[os] . identifier[path] . identifier[basename] ( identifier[log_filename] ). identifier[split] ( literal[string] )[ literal[int] ]+ literal[string] % identifier[datetime_to_str] ( identifier[str_format] = literal[string] )
identifier[history_log] = identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[log_filename] ),
literal[string] , identifier[base_name] + literal[string] )
identifier[add_file_handler] ( identifier[log_file_level] , identifier[history_log] , identifier[str_format] = identifier[str_format] ,
identifier[date_format] = identifier[date_format] , identifier[log_filter] = identifier[log_filter] )
identifier[silence_module_logging] ( identifier[silence_modules] ) | def setup_file_logging(log_filename, log_file_level='DEBUG', str_format=None, date_format=None, log_restart=False, log_history=False, formatter=None, silence_modules=None, log_filter=None):
"""
This will setup logging for a single file but can be called more than once
LOG LEVELS are "CRITICAL", "ERROR", "INFO", "DEBUG"
:param log_filename: str of the file location
:param log_file_level str of the log level to use on this file
:param str_format: str of the logging format
:param date_format: str of the date format
:param log_restart: bool if True the log file will be deleted first
:param log_history: bool if True will save another log file in a folder
called history with the datetime
:param formatter: logging.Format instance to use
:param log_filter: logging.filter instance to add to handler
:param silence_modules list of str of modules to silence
:return: None
"""
from seaborn_timestamp.timestamp import datetime_to_str
if os.path.exists(log_filename) and log_restart:
os.remove(log_filename) # depends on [control=['if'], data=[]]
add_file_handler(log_file_level, log_filename, str_format=str_format, date_format=date_format, formatter=formatter, log_filter=log_filter)
if log_history:
base_name = os.path.basename(log_filename).split('.')[0] + '_%s' % datetime_to_str(str_format='%Y-%m-%d_%H-%M-%S')
history_log = os.path.join(os.path.dirname(log_filename), 'history', base_name + '.log')
add_file_handler(log_file_level, history_log, str_format=str_format, date_format=date_format, log_filter=log_filter) # depends on [control=['if'], data=[]]
silence_module_logging(silence_modules) |
def patch_python_logging_handlers():
'''
Patch the python logging handlers with out mixed-in classes
'''
logging.StreamHandler = StreamHandler
logging.FileHandler = FileHandler
logging.handlers.SysLogHandler = SysLogHandler
logging.handlers.WatchedFileHandler = WatchedFileHandler
logging.handlers.RotatingFileHandler = RotatingFileHandler
if sys.version_info >= (3, 2):
logging.handlers.QueueHandler = QueueHandler | def function[patch_python_logging_handlers, parameter[]]:
constant[
Patch the python logging handlers with out mixed-in classes
]
name[logging].StreamHandler assign[=] name[StreamHandler]
name[logging].FileHandler assign[=] name[FileHandler]
name[logging].handlers.SysLogHandler assign[=] name[SysLogHandler]
name[logging].handlers.WatchedFileHandler assign[=] name[WatchedFileHandler]
name[logging].handlers.RotatingFileHandler assign[=] name[RotatingFileHandler]
if compare[name[sys].version_info greater_or_equal[>=] tuple[[<ast.Constant object at 0x7da1b215d930>, <ast.Constant object at 0x7da1b215e7a0>]]] begin[:]
name[logging].handlers.QueueHandler assign[=] name[QueueHandler] | keyword[def] identifier[patch_python_logging_handlers] ():
literal[string]
identifier[logging] . identifier[StreamHandler] = identifier[StreamHandler]
identifier[logging] . identifier[FileHandler] = identifier[FileHandler]
identifier[logging] . identifier[handlers] . identifier[SysLogHandler] = identifier[SysLogHandler]
identifier[logging] . identifier[handlers] . identifier[WatchedFileHandler] = identifier[WatchedFileHandler]
identifier[logging] . identifier[handlers] . identifier[RotatingFileHandler] = identifier[RotatingFileHandler]
keyword[if] identifier[sys] . identifier[version_info] >=( literal[int] , literal[int] ):
identifier[logging] . identifier[handlers] . identifier[QueueHandler] = identifier[QueueHandler] | def patch_python_logging_handlers():
"""
Patch the python logging handlers with out mixed-in classes
"""
logging.StreamHandler = StreamHandler
logging.FileHandler = FileHandler
logging.handlers.SysLogHandler = SysLogHandler
logging.handlers.WatchedFileHandler = WatchedFileHandler
logging.handlers.RotatingFileHandler = RotatingFileHandler
if sys.version_info >= (3, 2):
logging.handlers.QueueHandler = QueueHandler # depends on [control=['if'], data=[]] |
def height(self, minimum: float = 1.5, maximum: float = 2.0) -> str:
"""Generate a random height in M (Meter).
:param minimum: Minimum value.
:param float maximum: Maximum value.
:return: Height.
:Example:
1.85.
"""
h = self.random.uniform(minimum, maximum)
return '{:0.2f}'.format(h) | def function[height, parameter[self, minimum, maximum]]:
constant[Generate a random height in M (Meter).
:param minimum: Minimum value.
:param float maximum: Maximum value.
:return: Height.
:Example:
1.85.
]
variable[h] assign[=] call[name[self].random.uniform, parameter[name[minimum], name[maximum]]]
return[call[constant[{:0.2f}].format, parameter[name[h]]]] | keyword[def] identifier[height] ( identifier[self] , identifier[minimum] : identifier[float] = literal[int] , identifier[maximum] : identifier[float] = literal[int] )-> identifier[str] :
literal[string]
identifier[h] = identifier[self] . identifier[random] . identifier[uniform] ( identifier[minimum] , identifier[maximum] )
keyword[return] literal[string] . identifier[format] ( identifier[h] ) | def height(self, minimum: float=1.5, maximum: float=2.0) -> str:
"""Generate a random height in M (Meter).
:param minimum: Minimum value.
:param float maximum: Maximum value.
:return: Height.
:Example:
1.85.
"""
h = self.random.uniform(minimum, maximum)
return '{:0.2f}'.format(h) |
def get_agenda(self,
conservative: bool = False):
"""
Returns an agenda that can be used guide search.
Parameters
----------
conservative : ``bool``
Setting this flag will return a subset of the agenda items that correspond to high
confidence lexical matches. You'll need this if you are going to use this agenda to
penalize a model for producing logical forms that do not contain some items in it. In
that case, you'll want this agenda to have close to perfect precision, at the cost of a
lower recall. You may not want to set this flag if you are sorting the output from a
search procedure based on how much of this agenda is satisfied.
"""
agenda_items = []
question_tokens = [token.text for token in self.table_context.question_tokens]
question = " ".join(question_tokens)
added_number_filters = False
if self._table_has_number_columns:
if "at least" in question:
agenda_items.append("filter_number_greater_equals")
if "at most" in question:
agenda_items.append("filter_number_lesser_equals")
comparison_triggers = ["greater", "larger", "more"]
if any(f"no {word} than" in question for word in comparison_triggers):
agenda_items.append("filter_number_lesser_equals")
elif any(f"{word} than" in question for word in comparison_triggers):
agenda_items.append("filter_number_greater")
# We want to keep track of this because we do not want to add both number and date
# filters to the agenda if we want to be conservative.
if agenda_items:
added_number_filters = True
for token in question_tokens:
if token in ["next", "below"] or (token == "after" and not conservative):
agenda_items.append("next")
if token in ["previous", "above"] or (token == "before" and not conservative):
agenda_items.append("previous")
if token in ["first", "top"]:
agenda_items.append("first")
if token in ["last", "bottom"]:
agenda_items.append("last")
if token == "same":
agenda_items.append("same_as")
if self._table_has_number_columns:
# "total" does not always map to an actual summing operation.
if token == "total" and not conservative:
agenda_items.append("sum")
if token == "difference" or "how many more" in question or "how much more" in question:
agenda_items.append("diff")
if token == "average":
agenda_items.append("average")
if token in ["least", "smallest", "shortest", "lowest"] and "at least" not in question:
# This condition is too brittle. But for most logical forms with "min", there are
# semantically equivalent ones with "argmin". The exceptions are rare.
if "what is the least" not in question:
agenda_items.append("argmin")
if token in ["most", "largest", "highest", "longest", "greatest"] and "at most" not in question:
# This condition is too brittle. But for most logical forms with "max", there are
# semantically equivalent ones with "argmax". The exceptions are rare.
if "what is the most" not in question:
agenda_items.append("argmax")
if self._table_has_date_columns:
if token in MONTH_NUMBERS or (token.isdigit() and len(token) == 4 and
int(token) < 2100 and int(token) > 1100):
# Token is either a month or an year. We'll add date functions.
if not added_number_filters or not conservative:
if "after" in question_tokens:
agenda_items.append("filter_date_greater")
elif "before" in question_tokens:
agenda_items.append("filter_date_lesser")
elif "not" in question_tokens:
agenda_items.append("filter_date_not_equals")
else:
agenda_items.append("filter_date_equals")
if "what is the least" in question and self._table_has_number_columns:
agenda_items.append("min_number")
if "what is the most" in question and self._table_has_number_columns:
agenda_items.append("max_number")
if "when" in question_tokens and self._table_has_date_columns:
if "last" in question_tokens:
agenda_items.append("max_date")
elif "first" in question_tokens:
agenda_items.append("min_date")
else:
agenda_items.append("select_date")
if "how many" in question:
if "sum" not in agenda_items and "average" not in agenda_items:
# The question probably just requires counting the rows. But this is not very
# accurate. The question could also be asking for a value that is in the table.
agenda_items.append("count")
agenda = []
# Adding productions from the global set.
for agenda_item in set(agenda_items):
# Some agenda items may not be present in the terminal productions because some of these
# terminals are table-content specific. For example, if the question triggered "sum",
# and the table does not have number columns, we should not add "<r,<f,n>> -> sum" to
# the agenda.
if agenda_item in self.terminal_productions:
agenda.append(self.terminal_productions[agenda_item])
if conservative:
# Some of the columns in the table have multiple types, and thus occur in the KG as
# different columns. We do not want to add them all to the agenda if their names,
# because it is unlikely that logical forms use them all. In fact, to be conservative,
# we won't add any of them. So we'll first identify such column names.
refined_column_productions: Dict[str, str] = {}
for column_name, signature in self._column_productions_for_agenda.items():
column_type, name = column_name.split(":")
if column_type == "string_column":
if f"number_column:{name}" not in self._column_productions_for_agenda and \
f"date_column:{name}" not in self._column_productions_for_agenda:
refined_column_productions[column_name] = signature
elif column_type == "number_column":
if f"string_column:{name}" not in self._column_productions_for_agenda and \
f"date_column:{name}" not in self._column_productions_for_agenda:
refined_column_productions[column_name] = signature
else:
if f"string_column:{name}" not in self._column_productions_for_agenda and \
f"number_column:{name}" not in self._column_productions_for_agenda:
refined_column_productions[column_name] = signature
# Similarly, we do not want the same spans in the question to be added to the agenda as
# both string and number productions.
refined_entities: List[str] = []
refined_numbers: List[str] = []
for entity in self._question_entities:
if entity.replace("string:", "") not in self._question_numbers:
refined_entities.append(entity)
for number in self._question_numbers:
if f"string:{number}" not in self._question_entities:
refined_numbers.append(number)
else:
refined_column_productions = dict(self._column_productions_for_agenda)
refined_entities = list(self._question_entities)
refined_numbers = list(self._question_numbers)
# Adding column names that occur in question.
question_with_underscores = "_".join(question_tokens)
normalized_question = re.sub("[^a-z0-9_]", "", question_with_underscores)
# We keep track of tokens that are in column names being added to the agenda. We will not
# add string productions to the agenda if those tokens were already captured as column
# names.
# Note: If the same string occurs multiple times, this may cause string productions being
# omitted from the agenda unnecessarily. That is fine, as we want to err on the side of
# adding fewer rules to the agenda.
tokens_in_column_names: Set[str] = set()
for column_name_with_type, signature in refined_column_productions.items():
column_name = column_name_with_type.split(":")[1]
# Underscores ensure that the match is of whole words.
if f"_{column_name}_" in normalized_question:
agenda.append(signature)
for token in column_name.split("_"):
tokens_in_column_names.add(token)
# Adding all productions that lead to entities and numbers extracted from the question.
for entity in refined_entities:
if entity.replace("string:", "") not in tokens_in_column_names:
agenda.append(f"str -> {entity}")
for number in refined_numbers:
# The reason we check for the presence of the number in the question again is because
# some of these numbers are extracted from number words like month names and ordinals
# like "first". On looking at some agenda outputs, I found that they hurt more than help
# in the agenda.
if f"_{number}_" in normalized_question:
agenda.append(f"Number -> {number}")
return agenda | def function[get_agenda, parameter[self, conservative]]:
constant[
Returns an agenda that can be used guide search.
Parameters
----------
conservative : ``bool``
Setting this flag will return a subset of the agenda items that correspond to high
confidence lexical matches. You'll need this if you are going to use this agenda to
penalize a model for producing logical forms that do not contain some items in it. In
that case, you'll want this agenda to have close to perfect precision, at the cost of a
lower recall. You may not want to set this flag if you are sorting the output from a
search procedure based on how much of this agenda is satisfied.
]
variable[agenda_items] assign[=] list[[]]
variable[question_tokens] assign[=] <ast.ListComp object at 0x7da1b1ffdde0>
variable[question] assign[=] call[constant[ ].join, parameter[name[question_tokens]]]
variable[added_number_filters] assign[=] constant[False]
if name[self]._table_has_number_columns begin[:]
if compare[constant[at least] in name[question]] begin[:]
call[name[agenda_items].append, parameter[constant[filter_number_greater_equals]]]
if compare[constant[at most] in name[question]] begin[:]
call[name[agenda_items].append, parameter[constant[filter_number_lesser_equals]]]
variable[comparison_triggers] assign[=] list[[<ast.Constant object at 0x7da1b1ffd3f0>, <ast.Constant object at 0x7da1b1ffd3c0>, <ast.Constant object at 0x7da1b1ffd540>]]
if call[name[any], parameter[<ast.GeneratorExp object at 0x7da1b1ffd8d0>]] begin[:]
call[name[agenda_items].append, parameter[constant[filter_number_lesser_equals]]]
if name[agenda_items] begin[:]
variable[added_number_filters] assign[=] constant[True]
for taget[name[token]] in starred[name[question_tokens]] begin[:]
if <ast.BoolOp object at 0x7da1b1ffec50> begin[:]
call[name[agenda_items].append, parameter[constant[next]]]
if <ast.BoolOp object at 0x7da1b1ffef50> begin[:]
call[name[agenda_items].append, parameter[constant[previous]]]
if compare[name[token] in list[[<ast.Constant object at 0x7da1b1fff640>, <ast.Constant object at 0x7da1b1fff460>]]] begin[:]
call[name[agenda_items].append, parameter[constant[first]]]
if compare[name[token] in list[[<ast.Constant object at 0x7da1b1fff820>, <ast.Constant object at 0x7da1b1fff7c0>]]] begin[:]
call[name[agenda_items].append, parameter[constant[last]]]
if compare[name[token] equal[==] constant[same]] begin[:]
call[name[agenda_items].append, parameter[constant[same_as]]]
if name[self]._table_has_number_columns begin[:]
if <ast.BoolOp object at 0x7da1b1ffd4e0> begin[:]
call[name[agenda_items].append, parameter[constant[sum]]]
if <ast.BoolOp object at 0x7da1b1fffe50> begin[:]
call[name[agenda_items].append, parameter[constant[diff]]]
if compare[name[token] equal[==] constant[average]] begin[:]
call[name[agenda_items].append, parameter[constant[average]]]
if <ast.BoolOp object at 0x7da1b1fff9a0> begin[:]
if compare[constant[what is the least] <ast.NotIn object at 0x7da2590d7190> name[question]] begin[:]
call[name[agenda_items].append, parameter[constant[argmin]]]
if <ast.BoolOp object at 0x7da1b1ffcf40> begin[:]
if compare[constant[what is the most] <ast.NotIn object at 0x7da2590d7190> name[question]] begin[:]
call[name[agenda_items].append, parameter[constant[argmax]]]
if name[self]._table_has_date_columns begin[:]
if <ast.BoolOp object at 0x7da1b1ffefe0> begin[:]
if <ast.BoolOp object at 0x7da1b1ffeb60> begin[:]
if compare[constant[after] in name[question_tokens]] begin[:]
call[name[agenda_items].append, parameter[constant[filter_date_greater]]]
if <ast.BoolOp object at 0x7da1b1f96470> begin[:]
call[name[agenda_items].append, parameter[constant[min_number]]]
if <ast.BoolOp object at 0x7da1b1f95480> begin[:]
call[name[agenda_items].append, parameter[constant[max_number]]]
if <ast.BoolOp object at 0x7da1b1f95e10> begin[:]
if compare[constant[last] in name[question_tokens]] begin[:]
call[name[agenda_items].append, parameter[constant[max_date]]]
if compare[constant[how many] in name[question]] begin[:]
if <ast.BoolOp object at 0x7da1b1f94c10> begin[:]
call[name[agenda_items].append, parameter[constant[count]]]
variable[agenda] assign[=] list[[]]
for taget[name[agenda_item]] in starred[call[name[set], parameter[name[agenda_items]]]] begin[:]
if compare[name[agenda_item] in name[self].terminal_productions] begin[:]
call[name[agenda].append, parameter[call[name[self].terminal_productions][name[agenda_item]]]]
if name[conservative] begin[:]
<ast.AnnAssign object at 0x7da1b1f96e30>
for taget[tuple[[<ast.Name object at 0x7da1b1f96b90>, <ast.Name object at 0x7da1b1f96d10>]]] in starred[call[name[self]._column_productions_for_agenda.items, parameter[]]] begin[:]
<ast.Tuple object at 0x7da1b1f96860> assign[=] call[name[column_name].split, parameter[constant[:]]]
if compare[name[column_type] equal[==] constant[string_column]] begin[:]
if <ast.BoolOp object at 0x7da1b1f96800> begin[:]
call[name[refined_column_productions]][name[column_name]] assign[=] name[signature]
<ast.AnnAssign object at 0x7da1b1f95900>
<ast.AnnAssign object at 0x7da1b1f97a90>
for taget[name[entity]] in starred[name[self]._question_entities] begin[:]
if compare[call[name[entity].replace, parameter[constant[string:], constant[]]] <ast.NotIn object at 0x7da2590d7190> name[self]._question_numbers] begin[:]
call[name[refined_entities].append, parameter[name[entity]]]
for taget[name[number]] in starred[name[self]._question_numbers] begin[:]
if compare[<ast.JoinedStr object at 0x7da1b1f951b0> <ast.NotIn object at 0x7da2590d7190> name[self]._question_entities] begin[:]
call[name[refined_numbers].append, parameter[name[number]]]
variable[question_with_underscores] assign[=] call[constant[_].join, parameter[name[question_tokens]]]
variable[normalized_question] assign[=] call[name[re].sub, parameter[constant[[^a-z0-9_]], constant[], name[question_with_underscores]]]
<ast.AnnAssign object at 0x7da20c795990>
for taget[tuple[[<ast.Name object at 0x7da20c794220>, <ast.Name object at 0x7da20c796a70>]]] in starred[call[name[refined_column_productions].items, parameter[]]] begin[:]
variable[column_name] assign[=] call[call[name[column_name_with_type].split, parameter[constant[:]]]][constant[1]]
if compare[<ast.JoinedStr object at 0x7da20c7940d0> in name[normalized_question]] begin[:]
call[name[agenda].append, parameter[name[signature]]]
for taget[name[token]] in starred[call[name[column_name].split, parameter[constant[_]]]] begin[:]
call[name[tokens_in_column_names].add, parameter[name[token]]]
for taget[name[entity]] in starred[name[refined_entities]] begin[:]
if compare[call[name[entity].replace, parameter[constant[string:], constant[]]] <ast.NotIn object at 0x7da2590d7190> name[tokens_in_column_names]] begin[:]
call[name[agenda].append, parameter[<ast.JoinedStr object at 0x7da20c7950c0>]]
for taget[name[number]] in starred[name[refined_numbers]] begin[:]
if compare[<ast.JoinedStr object at 0x7da20c7966b0> in name[normalized_question]] begin[:]
call[name[agenda].append, parameter[<ast.JoinedStr object at 0x7da20c794eb0>]]
return[name[agenda]] | keyword[def] identifier[get_agenda] ( identifier[self] ,
identifier[conservative] : identifier[bool] = keyword[False] ):
literal[string]
identifier[agenda_items] =[]
identifier[question_tokens] =[ identifier[token] . identifier[text] keyword[for] identifier[token] keyword[in] identifier[self] . identifier[table_context] . identifier[question_tokens] ]
identifier[question] = literal[string] . identifier[join] ( identifier[question_tokens] )
identifier[added_number_filters] = keyword[False]
keyword[if] identifier[self] . identifier[_table_has_number_columns] :
keyword[if] literal[string] keyword[in] identifier[question] :
identifier[agenda_items] . identifier[append] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[question] :
identifier[agenda_items] . identifier[append] ( literal[string] )
identifier[comparison_triggers] =[ literal[string] , literal[string] , literal[string] ]
keyword[if] identifier[any] ( literal[string] keyword[in] identifier[question] keyword[for] identifier[word] keyword[in] identifier[comparison_triggers] ):
identifier[agenda_items] . identifier[append] ( literal[string] )
keyword[elif] identifier[any] ( literal[string] keyword[in] identifier[question] keyword[for] identifier[word] keyword[in] identifier[comparison_triggers] ):
identifier[agenda_items] . identifier[append] ( literal[string] )
keyword[if] identifier[agenda_items] :
identifier[added_number_filters] = keyword[True]
keyword[for] identifier[token] keyword[in] identifier[question_tokens] :
keyword[if] identifier[token] keyword[in] [ literal[string] , literal[string] ] keyword[or] ( identifier[token] == literal[string] keyword[and] keyword[not] identifier[conservative] ):
identifier[agenda_items] . identifier[append] ( literal[string] )
keyword[if] identifier[token] keyword[in] [ literal[string] , literal[string] ] keyword[or] ( identifier[token] == literal[string] keyword[and] keyword[not] identifier[conservative] ):
identifier[agenda_items] . identifier[append] ( literal[string] )
keyword[if] identifier[token] keyword[in] [ literal[string] , literal[string] ]:
identifier[agenda_items] . identifier[append] ( literal[string] )
keyword[if] identifier[token] keyword[in] [ literal[string] , literal[string] ]:
identifier[agenda_items] . identifier[append] ( literal[string] )
keyword[if] identifier[token] == literal[string] :
identifier[agenda_items] . identifier[append] ( literal[string] )
keyword[if] identifier[self] . identifier[_table_has_number_columns] :
keyword[if] identifier[token] == literal[string] keyword[and] keyword[not] identifier[conservative] :
identifier[agenda_items] . identifier[append] ( literal[string] )
keyword[if] identifier[token] == literal[string] keyword[or] literal[string] keyword[in] identifier[question] keyword[or] literal[string] keyword[in] identifier[question] :
identifier[agenda_items] . identifier[append] ( literal[string] )
keyword[if] identifier[token] == literal[string] :
identifier[agenda_items] . identifier[append] ( literal[string] )
keyword[if] identifier[token] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] ] keyword[and] literal[string] keyword[not] keyword[in] identifier[question] :
keyword[if] literal[string] keyword[not] keyword[in] identifier[question] :
identifier[agenda_items] . identifier[append] ( literal[string] )
keyword[if] identifier[token] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ] keyword[and] literal[string] keyword[not] keyword[in] identifier[question] :
keyword[if] literal[string] keyword[not] keyword[in] identifier[question] :
identifier[agenda_items] . identifier[append] ( literal[string] )
keyword[if] identifier[self] . identifier[_table_has_date_columns] :
keyword[if] identifier[token] keyword[in] identifier[MONTH_NUMBERS] keyword[or] ( identifier[token] . identifier[isdigit] () keyword[and] identifier[len] ( identifier[token] )== literal[int] keyword[and]
identifier[int] ( identifier[token] )< literal[int] keyword[and] identifier[int] ( identifier[token] )> literal[int] ):
keyword[if] keyword[not] identifier[added_number_filters] keyword[or] keyword[not] identifier[conservative] :
keyword[if] literal[string] keyword[in] identifier[question_tokens] :
identifier[agenda_items] . identifier[append] ( literal[string] )
keyword[elif] literal[string] keyword[in] identifier[question_tokens] :
identifier[agenda_items] . identifier[append] ( literal[string] )
keyword[elif] literal[string] keyword[in] identifier[question_tokens] :
identifier[agenda_items] . identifier[append] ( literal[string] )
keyword[else] :
identifier[agenda_items] . identifier[append] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[question] keyword[and] identifier[self] . identifier[_table_has_number_columns] :
identifier[agenda_items] . identifier[append] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[question] keyword[and] identifier[self] . identifier[_table_has_number_columns] :
identifier[agenda_items] . identifier[append] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[question_tokens] keyword[and] identifier[self] . identifier[_table_has_date_columns] :
keyword[if] literal[string] keyword[in] identifier[question_tokens] :
identifier[agenda_items] . identifier[append] ( literal[string] )
keyword[elif] literal[string] keyword[in] identifier[question_tokens] :
identifier[agenda_items] . identifier[append] ( literal[string] )
keyword[else] :
identifier[agenda_items] . identifier[append] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[question] :
keyword[if] literal[string] keyword[not] keyword[in] identifier[agenda_items] keyword[and] literal[string] keyword[not] keyword[in] identifier[agenda_items] :
identifier[agenda_items] . identifier[append] ( literal[string] )
identifier[agenda] =[]
keyword[for] identifier[agenda_item] keyword[in] identifier[set] ( identifier[agenda_items] ):
keyword[if] identifier[agenda_item] keyword[in] identifier[self] . identifier[terminal_productions] :
identifier[agenda] . identifier[append] ( identifier[self] . identifier[terminal_productions] [ identifier[agenda_item] ])
keyword[if] identifier[conservative] :
identifier[refined_column_productions] : identifier[Dict] [ identifier[str] , identifier[str] ]={}
keyword[for] identifier[column_name] , identifier[signature] keyword[in] identifier[self] . identifier[_column_productions_for_agenda] . identifier[items] ():
identifier[column_type] , identifier[name] = identifier[column_name] . identifier[split] ( literal[string] )
keyword[if] identifier[column_type] == literal[string] :
keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[_column_productions_for_agenda] keyword[and] literal[string] keyword[not] keyword[in] identifier[self] . identifier[_column_productions_for_agenda] :
identifier[refined_column_productions] [ identifier[column_name] ]= identifier[signature]
keyword[elif] identifier[column_type] == literal[string] :
keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[_column_productions_for_agenda] keyword[and] literal[string] keyword[not] keyword[in] identifier[self] . identifier[_column_productions_for_agenda] :
identifier[refined_column_productions] [ identifier[column_name] ]= identifier[signature]
keyword[else] :
keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[_column_productions_for_agenda] keyword[and] literal[string] keyword[not] keyword[in] identifier[self] . identifier[_column_productions_for_agenda] :
identifier[refined_column_productions] [ identifier[column_name] ]= identifier[signature]
identifier[refined_entities] : identifier[List] [ identifier[str] ]=[]
identifier[refined_numbers] : identifier[List] [ identifier[str] ]=[]
keyword[for] identifier[entity] keyword[in] identifier[self] . identifier[_question_entities] :
keyword[if] identifier[entity] . identifier[replace] ( literal[string] , literal[string] ) keyword[not] keyword[in] identifier[self] . identifier[_question_numbers] :
identifier[refined_entities] . identifier[append] ( identifier[entity] )
keyword[for] identifier[number] keyword[in] identifier[self] . identifier[_question_numbers] :
keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[_question_entities] :
identifier[refined_numbers] . identifier[append] ( identifier[number] )
keyword[else] :
identifier[refined_column_productions] = identifier[dict] ( identifier[self] . identifier[_column_productions_for_agenda] )
identifier[refined_entities] = identifier[list] ( identifier[self] . identifier[_question_entities] )
identifier[refined_numbers] = identifier[list] ( identifier[self] . identifier[_question_numbers] )
identifier[question_with_underscores] = literal[string] . identifier[join] ( identifier[question_tokens] )
identifier[normalized_question] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[question_with_underscores] )
identifier[tokens_in_column_names] : identifier[Set] [ identifier[str] ]= identifier[set] ()
keyword[for] identifier[column_name_with_type] , identifier[signature] keyword[in] identifier[refined_column_productions] . identifier[items] ():
identifier[column_name] = identifier[column_name_with_type] . identifier[split] ( literal[string] )[ literal[int] ]
keyword[if] literal[string] keyword[in] identifier[normalized_question] :
identifier[agenda] . identifier[append] ( identifier[signature] )
keyword[for] identifier[token] keyword[in] identifier[column_name] . identifier[split] ( literal[string] ):
identifier[tokens_in_column_names] . identifier[add] ( identifier[token] )
keyword[for] identifier[entity] keyword[in] identifier[refined_entities] :
keyword[if] identifier[entity] . identifier[replace] ( literal[string] , literal[string] ) keyword[not] keyword[in] identifier[tokens_in_column_names] :
identifier[agenda] . identifier[append] ( literal[string] )
keyword[for] identifier[number] keyword[in] identifier[refined_numbers] :
keyword[if] literal[string] keyword[in] identifier[normalized_question] :
identifier[agenda] . identifier[append] ( literal[string] )
keyword[return] identifier[agenda] | def get_agenda(self, conservative: bool=False):
"""
Returns an agenda that can be used guide search.
Parameters
----------
conservative : ``bool``
Setting this flag will return a subset of the agenda items that correspond to high
confidence lexical matches. You'll need this if you are going to use this agenda to
penalize a model for producing logical forms that do not contain some items in it. In
that case, you'll want this agenda to have close to perfect precision, at the cost of a
lower recall. You may not want to set this flag if you are sorting the output from a
search procedure based on how much of this agenda is satisfied.
"""
agenda_items = []
question_tokens = [token.text for token in self.table_context.question_tokens]
question = ' '.join(question_tokens)
added_number_filters = False
if self._table_has_number_columns:
if 'at least' in question:
agenda_items.append('filter_number_greater_equals') # depends on [control=['if'], data=[]]
if 'at most' in question:
agenda_items.append('filter_number_lesser_equals') # depends on [control=['if'], data=[]]
comparison_triggers = ['greater', 'larger', 'more']
if any((f'no {word} than' in question for word in comparison_triggers)):
agenda_items.append('filter_number_lesser_equals') # depends on [control=['if'], data=[]]
elif any((f'{word} than' in question for word in comparison_triggers)):
agenda_items.append('filter_number_greater') # depends on [control=['if'], data=[]]
# We want to keep track of this because we do not want to add both number and date
# filters to the agenda if we want to be conservative.
if agenda_items:
added_number_filters = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
for token in question_tokens:
if token in ['next', 'below'] or (token == 'after' and (not conservative)):
agenda_items.append('next') # depends on [control=['if'], data=[]]
if token in ['previous', 'above'] or (token == 'before' and (not conservative)):
agenda_items.append('previous') # depends on [control=['if'], data=[]]
if token in ['first', 'top']:
agenda_items.append('first') # depends on [control=['if'], data=[]]
if token in ['last', 'bottom']:
agenda_items.append('last') # depends on [control=['if'], data=[]]
if token == 'same':
agenda_items.append('same_as') # depends on [control=['if'], data=[]]
if self._table_has_number_columns:
# "total" does not always map to an actual summing operation.
if token == 'total' and (not conservative):
agenda_items.append('sum') # depends on [control=['if'], data=[]]
if token == 'difference' or 'how many more' in question or 'how much more' in question:
agenda_items.append('diff') # depends on [control=['if'], data=[]]
if token == 'average':
agenda_items.append('average') # depends on [control=['if'], data=[]]
if token in ['least', 'smallest', 'shortest', 'lowest'] and 'at least' not in question:
# This condition is too brittle. But for most logical forms with "min", there are
# semantically equivalent ones with "argmin". The exceptions are rare.
if 'what is the least' not in question:
agenda_items.append('argmin') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if token in ['most', 'largest', 'highest', 'longest', 'greatest'] and 'at most' not in question:
# This condition is too brittle. But for most logical forms with "max", there are
# semantically equivalent ones with "argmax". The exceptions are rare.
if 'what is the most' not in question:
agenda_items.append('argmax') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if self._table_has_date_columns:
if token in MONTH_NUMBERS or (token.isdigit() and len(token) == 4 and (int(token) < 2100) and (int(token) > 1100)):
# Token is either a month or an year. We'll add date functions.
if not added_number_filters or not conservative:
if 'after' in question_tokens:
agenda_items.append('filter_date_greater') # depends on [control=['if'], data=[]]
elif 'before' in question_tokens:
agenda_items.append('filter_date_lesser') # depends on [control=['if'], data=[]]
elif 'not' in question_tokens:
agenda_items.append('filter_date_not_equals') # depends on [control=['if'], data=[]]
else:
agenda_items.append('filter_date_equals') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if 'what is the least' in question and self._table_has_number_columns:
agenda_items.append('min_number') # depends on [control=['if'], data=[]]
if 'what is the most' in question and self._table_has_number_columns:
agenda_items.append('max_number') # depends on [control=['if'], data=[]]
if 'when' in question_tokens and self._table_has_date_columns:
if 'last' in question_tokens:
agenda_items.append('max_date') # depends on [control=['if'], data=[]]
elif 'first' in question_tokens:
agenda_items.append('min_date') # depends on [control=['if'], data=[]]
else:
agenda_items.append('select_date') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['token']]
if 'how many' in question:
if 'sum' not in agenda_items and 'average' not in agenda_items:
# The question probably just requires counting the rows. But this is not very
# accurate. The question could also be asking for a value that is in the table.
agenda_items.append('count') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
agenda = []
# Adding productions from the global set.
for agenda_item in set(agenda_items):
# Some agenda items may not be present in the terminal productions because some of these
# terminals are table-content specific. For example, if the question triggered "sum",
# and the table does not have number columns, we should not add "<r,<f,n>> -> sum" to
# the agenda.
if agenda_item in self.terminal_productions:
agenda.append(self.terminal_productions[agenda_item]) # depends on [control=['if'], data=['agenda_item']] # depends on [control=['for'], data=['agenda_item']]
if conservative:
# Some of the columns in the table have multiple types, and thus occur in the KG as
# different columns. We do not want to add them all to the agenda if their names,
# because it is unlikely that logical forms use them all. In fact, to be conservative,
# we won't add any of them. So we'll first identify such column names.
refined_column_productions: Dict[str, str] = {}
for (column_name, signature) in self._column_productions_for_agenda.items():
(column_type, name) = column_name.split(':')
if column_type == 'string_column':
if f'number_column:{name}' not in self._column_productions_for_agenda and f'date_column:{name}' not in self._column_productions_for_agenda:
refined_column_productions[column_name] = signature # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif column_type == 'number_column':
if f'string_column:{name}' not in self._column_productions_for_agenda and f'date_column:{name}' not in self._column_productions_for_agenda:
refined_column_productions[column_name] = signature # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif f'string_column:{name}' not in self._column_productions_for_agenda and f'number_column:{name}' not in self._column_productions_for_agenda:
refined_column_productions[column_name] = signature # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
# Similarly, we do not want the same spans in the question to be added to the agenda as
# both string and number productions.
refined_entities: List[str] = []
refined_numbers: List[str] = []
for entity in self._question_entities:
if entity.replace('string:', '') not in self._question_numbers:
refined_entities.append(entity) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['entity']]
for number in self._question_numbers:
if f'string:{number}' not in self._question_entities:
refined_numbers.append(number) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['number']] # depends on [control=['if'], data=[]]
else:
refined_column_productions = dict(self._column_productions_for_agenda)
refined_entities = list(self._question_entities)
refined_numbers = list(self._question_numbers)
# Adding column names that occur in question.
question_with_underscores = '_'.join(question_tokens)
normalized_question = re.sub('[^a-z0-9_]', '', question_with_underscores)
# We keep track of tokens that are in column names being added to the agenda. We will not
# add string productions to the agenda if those tokens were already captured as column
# names.
# Note: If the same string occurs multiple times, this may cause string productions being
# omitted from the agenda unnecessarily. That is fine, as we want to err on the side of
# adding fewer rules to the agenda.
tokens_in_column_names: Set[str] = set()
for (column_name_with_type, signature) in refined_column_productions.items():
column_name = column_name_with_type.split(':')[1]
# Underscores ensure that the match is of whole words.
if f'_{column_name}_' in normalized_question:
agenda.append(signature)
for token in column_name.split('_'):
tokens_in_column_names.add(token) # depends on [control=['for'], data=['token']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
# Adding all productions that lead to entities and numbers extracted from the question.
for entity in refined_entities:
if entity.replace('string:', '') not in tokens_in_column_names:
agenda.append(f'str -> {entity}') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['entity']]
for number in refined_numbers:
# The reason we check for the presence of the number in the question again is because
# some of these numbers are extracted from number words like month names and ordinals
# like "first". On looking at some agenda outputs, I found that they hurt more than help
# in the agenda.
if f'_{number}_' in normalized_question:
agenda.append(f'Number -> {number}') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['number']]
return agenda |
def check_sufficient_inputs(self):
'''Method to an exception if none of the pairs (T, P), (T, V), or
(P, V) are given. '''
if not ((self.T and self.P) or (self.T and self.V) or (self.P and self.V)):
raise Exception('Either T and P, or T and V, or P and V are required') | def function[check_sufficient_inputs, parameter[self]]:
constant[Method to an exception if none of the pairs (T, P), (T, V), or
(P, V) are given. ]
if <ast.UnaryOp object at 0x7da1b2345db0> begin[:]
<ast.Raise object at 0x7da1b2345120> | keyword[def] identifier[check_sufficient_inputs] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] (( identifier[self] . identifier[T] keyword[and] identifier[self] . identifier[P] ) keyword[or] ( identifier[self] . identifier[T] keyword[and] identifier[self] . identifier[V] ) keyword[or] ( identifier[self] . identifier[P] keyword[and] identifier[self] . identifier[V] )):
keyword[raise] identifier[Exception] ( literal[string] ) | def check_sufficient_inputs(self):
"""Method to an exception if none of the pairs (T, P), (T, V), or
(P, V) are given. """
if not (self.T and self.P or (self.T and self.V) or (self.P and self.V)):
raise Exception('Either T and P, or T and V, or P and V are required') # depends on [control=['if'], data=[]] |
def _write_new_tag_to_init(self):
"""
Write version to __init__.py by editing in place
"""
for line in fileinput.input(self.init_file, inplace=1):
if line.strip().startswith("__version__"):
line = "__version__ = \"" + self.tag + "\""
print(line.strip("\n")) | def function[_write_new_tag_to_init, parameter[self]]:
constant[
Write version to __init__.py by editing in place
]
for taget[name[line]] in starred[call[name[fileinput].input, parameter[name[self].init_file]]] begin[:]
if call[call[name[line].strip, parameter[]].startswith, parameter[constant[__version__]]] begin[:]
variable[line] assign[=] binary_operation[binary_operation[constant[__version__ = "] + name[self].tag] + constant["]]
call[name[print], parameter[call[name[line].strip, parameter[constant[
]]]]] | keyword[def] identifier[_write_new_tag_to_init] ( identifier[self] ):
literal[string]
keyword[for] identifier[line] keyword[in] identifier[fileinput] . identifier[input] ( identifier[self] . identifier[init_file] , identifier[inplace] = literal[int] ):
keyword[if] identifier[line] . identifier[strip] (). identifier[startswith] ( literal[string] ):
identifier[line] = literal[string] + identifier[self] . identifier[tag] + literal[string]
identifier[print] ( identifier[line] . identifier[strip] ( literal[string] )) | def _write_new_tag_to_init(self):
"""
Write version to __init__.py by editing in place
"""
for line in fileinput.input(self.init_file, inplace=1):
if line.strip().startswith('__version__'):
line = '__version__ = "' + self.tag + '"' # depends on [control=['if'], data=[]]
print(line.strip('\n')) # depends on [control=['for'], data=['line']] |
def model(self,
voltages=True,
sensitivities=False,
potentials=False,
output_directory=None,
silent=False,
):
"""Forward model the tomodir and read in the results
"""
self._check_state()
if self.can_model:
if output_directory is not None:
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
tempdir = output_directory
self._model(voltages, sensitivities, potentials, tempdir)
else:
raise IOError(
'output directory already exists: {0}'.format(
output_directory
)
)
else:
with tempfile.TemporaryDirectory(dir=self.tempdir) as tempdir:
self._model(
voltages, sensitivities, potentials, tempdir,
silent=silent
)
return 1
else:
print('Sorry, not all required information to model are present')
print('Check:')
print('1) configurations present: self.configs.configs')
print('2) is a model present')
return None | def function[model, parameter[self, voltages, sensitivities, potentials, output_directory, silent]]:
constant[Forward model the tomodir and read in the results
]
call[name[self]._check_state, parameter[]]
if name[self].can_model begin[:]
if compare[name[output_directory] is_not constant[None]] begin[:]
if <ast.UnaryOp object at 0x7da1b2290310> begin[:]
call[name[os].makedirs, parameter[name[output_directory]]]
variable[tempdir] assign[=] name[output_directory]
call[name[self]._model, parameter[name[voltages], name[sensitivities], name[potentials], name[tempdir]]]
return[constant[1]] | keyword[def] identifier[model] ( identifier[self] ,
identifier[voltages] = keyword[True] ,
identifier[sensitivities] = keyword[False] ,
identifier[potentials] = keyword[False] ,
identifier[output_directory] = keyword[None] ,
identifier[silent] = keyword[False] ,
):
literal[string]
identifier[self] . identifier[_check_state] ()
keyword[if] identifier[self] . identifier[can_model] :
keyword[if] identifier[output_directory] keyword[is] keyword[not] keyword[None] :
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[output_directory] ):
identifier[os] . identifier[makedirs] ( identifier[output_directory] )
identifier[tempdir] = identifier[output_directory]
identifier[self] . identifier[_model] ( identifier[voltages] , identifier[sensitivities] , identifier[potentials] , identifier[tempdir] )
keyword[else] :
keyword[raise] identifier[IOError] (
literal[string] . identifier[format] (
identifier[output_directory]
)
)
keyword[else] :
keyword[with] identifier[tempfile] . identifier[TemporaryDirectory] ( identifier[dir] = identifier[self] . identifier[tempdir] ) keyword[as] identifier[tempdir] :
identifier[self] . identifier[_model] (
identifier[voltages] , identifier[sensitivities] , identifier[potentials] , identifier[tempdir] ,
identifier[silent] = identifier[silent]
)
keyword[return] literal[int]
keyword[else] :
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
keyword[return] keyword[None] | def model(self, voltages=True, sensitivities=False, potentials=False, output_directory=None, silent=False):
"""Forward model the tomodir and read in the results
"""
self._check_state()
if self.can_model:
if output_directory is not None:
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
tempdir = output_directory
self._model(voltages, sensitivities, potentials, tempdir) # depends on [control=['if'], data=[]]
else:
raise IOError('output directory already exists: {0}'.format(output_directory)) # depends on [control=['if'], data=['output_directory']]
else:
with tempfile.TemporaryDirectory(dir=self.tempdir) as tempdir:
self._model(voltages, sensitivities, potentials, tempdir, silent=silent) # depends on [control=['with'], data=['tempdir']]
return 1 # depends on [control=['if'], data=[]]
else:
print('Sorry, not all required information to model are present')
print('Check:')
print('1) configurations present: self.configs.configs')
print('2) is a model present')
return None |
def write(self, data):
"""Print any command sent in raw format.
:param bytes data: arbitrary code to be printed.
"""
self.device.write(data)
if self.auto_flush:
self.flush() | def function[write, parameter[self, data]]:
constant[Print any command sent in raw format.
:param bytes data: arbitrary code to be printed.
]
call[name[self].device.write, parameter[name[data]]]
if name[self].auto_flush begin[:]
call[name[self].flush, parameter[]] | keyword[def] identifier[write] ( identifier[self] , identifier[data] ):
literal[string]
identifier[self] . identifier[device] . identifier[write] ( identifier[data] )
keyword[if] identifier[self] . identifier[auto_flush] :
identifier[self] . identifier[flush] () | def write(self, data):
"""Print any command sent in raw format.
:param bytes data: arbitrary code to be printed.
"""
self.device.write(data)
if self.auto_flush:
self.flush() # depends on [control=['if'], data=[]] |
def urlencode(query):
"""Encode string to be used in urls (percent encoding).
:param query: string to be encoded
:type query: str
:return: urlencoded string
:rtype: str
:Example:
>>> urlencode('pekná líščička')
'pekn%C3%A1%20l%C3%AD%C5%A1%C4%8Di%C4%8Dka'
"""
if hasattr(urllib, 'parse'):
return urllib.parse.urlencode(query)
else:
return urllib.urlencode(query) | def function[urlencode, parameter[query]]:
constant[Encode string to be used in urls (percent encoding).
:param query: string to be encoded
:type query: str
:return: urlencoded string
:rtype: str
:Example:
>>> urlencode('pekná líščička')
'pekn%C3%A1%20l%C3%AD%C5%A1%C4%8Di%C4%8Dka'
]
if call[name[hasattr], parameter[name[urllib], constant[parse]]] begin[:]
return[call[name[urllib].parse.urlencode, parameter[name[query]]]] | keyword[def] identifier[urlencode] ( identifier[query] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[urllib] , literal[string] ):
keyword[return] identifier[urllib] . identifier[parse] . identifier[urlencode] ( identifier[query] )
keyword[else] :
keyword[return] identifier[urllib] . identifier[urlencode] ( identifier[query] ) | def urlencode(query):
"""Encode string to be used in urls (percent encoding).
:param query: string to be encoded
:type query: str
:return: urlencoded string
:rtype: str
:Example:
>>> urlencode('pekná líščička')
'pekn%C3%A1%20l%C3%AD%C5%A1%C4%8Di%C4%8Dka'
"""
if hasattr(urllib, 'parse'):
return urllib.parse.urlencode(query) # depends on [control=['if'], data=[]]
else:
return urllib.urlencode(query) |
def fetch(self):
'''
Gives all the data it has stored, and remembers what it has given.
Later we need to call commit() to actually remove the data from the
cache.
'''
if self._fetched is not None:
raise RuntimeError('fetch() was called but the previous one has '
'not yet been applied. Not supported')
if self._cache:
self._fetched = len(self._cache)
return self._cache[0:self._fetched] | def function[fetch, parameter[self]]:
constant[
Gives all the data it has stored, and remembers what it has given.
Later we need to call commit() to actually remove the data from the
cache.
]
if compare[name[self]._fetched is_not constant[None]] begin[:]
<ast.Raise object at 0x7da20c794b50>
if name[self]._cache begin[:]
name[self]._fetched assign[=] call[name[len], parameter[name[self]._cache]]
return[call[name[self]._cache][<ast.Slice object at 0x7da20c794e20>]] | keyword[def] identifier[fetch] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_fetched] keyword[is] keyword[not] keyword[None] :
keyword[raise] identifier[RuntimeError] ( literal[string]
literal[string] )
keyword[if] identifier[self] . identifier[_cache] :
identifier[self] . identifier[_fetched] = identifier[len] ( identifier[self] . identifier[_cache] )
keyword[return] identifier[self] . identifier[_cache] [ literal[int] : identifier[self] . identifier[_fetched] ] | def fetch(self):
"""
Gives all the data it has stored, and remembers what it has given.
Later we need to call commit() to actually remove the data from the
cache.
"""
if self._fetched is not None:
raise RuntimeError('fetch() was called but the previous one has not yet been applied. Not supported') # depends on [control=['if'], data=[]]
if self._cache:
self._fetched = len(self._cache) # depends on [control=['if'], data=[]]
return self._cache[0:self._fetched] |
def _dispatch_event(self, event, data=None):
"""Dispatches the event and executes any associated callbacks.
Note: To prevent the app from crashing due to callback errors. We
catch all exceptions and send all data to the logger.
Args:
event (str): The type of event. e.g. 'bot_added'
data (dict): The data Slack sent. e.g.
{
"type": "bot_added",
"bot": {
"id": "B024BE7LH",
"app_id": "A4H1JB4AZ",
"name": "hugbot"
}
}
"""
for callback in self._callbacks[event]:
self._logger.debug(
"Running %s callbacks for event: '%s'",
len(self._callbacks[event]),
event,
)
try:
if self._stopped and event not in ["close", "error"]:
# Don't run callbacks if client was stopped unless they're close/error callbacks.
break
if self.run_async:
self._execute_callback_async(callback, data)
else:
self._execute_callback(callback, data)
except Exception as err:
name = callback.__name__
module = callback.__module__
msg = f"When calling '#{name}()' in the '{module}' module the following error was raised: {err}"
self._logger.error(msg)
raise | def function[_dispatch_event, parameter[self, event, data]]:
constant[Dispatches the event and executes any associated callbacks.
Note: To prevent the app from crashing due to callback errors. We
catch all exceptions and send all data to the logger.
Args:
event (str): The type of event. e.g. 'bot_added'
data (dict): The data Slack sent. e.g.
{
"type": "bot_added",
"bot": {
"id": "B024BE7LH",
"app_id": "A4H1JB4AZ",
"name": "hugbot"
}
}
]
for taget[name[callback]] in starred[call[name[self]._callbacks][name[event]]] begin[:]
call[name[self]._logger.debug, parameter[constant[Running %s callbacks for event: '%s'], call[name[len], parameter[call[name[self]._callbacks][name[event]]]], name[event]]]
<ast.Try object at 0x7da1b1bf9e70> | keyword[def] identifier[_dispatch_event] ( identifier[self] , identifier[event] , identifier[data] = keyword[None] ):
literal[string]
keyword[for] identifier[callback] keyword[in] identifier[self] . identifier[_callbacks] [ identifier[event] ]:
identifier[self] . identifier[_logger] . identifier[debug] (
literal[string] ,
identifier[len] ( identifier[self] . identifier[_callbacks] [ identifier[event] ]),
identifier[event] ,
)
keyword[try] :
keyword[if] identifier[self] . identifier[_stopped] keyword[and] identifier[event] keyword[not] keyword[in] [ literal[string] , literal[string] ]:
keyword[break]
keyword[if] identifier[self] . identifier[run_async] :
identifier[self] . identifier[_execute_callback_async] ( identifier[callback] , identifier[data] )
keyword[else] :
identifier[self] . identifier[_execute_callback] ( identifier[callback] , identifier[data] )
keyword[except] identifier[Exception] keyword[as] identifier[err] :
identifier[name] = identifier[callback] . identifier[__name__]
identifier[module] = identifier[callback] . identifier[__module__]
identifier[msg] = literal[string]
identifier[self] . identifier[_logger] . identifier[error] ( identifier[msg] )
keyword[raise] | def _dispatch_event(self, event, data=None):
"""Dispatches the event and executes any associated callbacks.
Note: To prevent the app from crashing due to callback errors. We
catch all exceptions and send all data to the logger.
Args:
event (str): The type of event. e.g. 'bot_added'
data (dict): The data Slack sent. e.g.
{
"type": "bot_added",
"bot": {
"id": "B024BE7LH",
"app_id": "A4H1JB4AZ",
"name": "hugbot"
}
}
"""
for callback in self._callbacks[event]:
self._logger.debug("Running %s callbacks for event: '%s'", len(self._callbacks[event]), event)
try:
if self._stopped and event not in ['close', 'error']:
# Don't run callbacks if client was stopped unless they're close/error callbacks.
break # depends on [control=['if'], data=[]]
if self.run_async:
self._execute_callback_async(callback, data) # depends on [control=['if'], data=[]]
else:
self._execute_callback(callback, data) # depends on [control=['try'], data=[]]
except Exception as err:
name = callback.__name__
module = callback.__module__
msg = f"When calling '#{name}()' in the '{module}' module the following error was raised: {err}"
self._logger.error(msg)
raise # depends on [control=['except'], data=['err']] # depends on [control=['for'], data=['callback']] |
def set_application_property(self, key, value):
"""Set the application property.
:param key: key of the property to set
:type key: str
:param value: value to assign to the property
:type value: str
"""
url = self._options['server'] + \
'/rest/api/latest/application-properties/' + key
payload = {
'id': key,
'value': value}
return self._session.put(
url, data=json.dumps(payload)) | def function[set_application_property, parameter[self, key, value]]:
constant[Set the application property.
:param key: key of the property to set
:type key: str
:param value: value to assign to the property
:type value: str
]
variable[url] assign[=] binary_operation[binary_operation[call[name[self]._options][constant[server]] + constant[/rest/api/latest/application-properties/]] + name[key]]
variable[payload] assign[=] dictionary[[<ast.Constant object at 0x7da1b1c49f00>, <ast.Constant object at 0x7da1b1c49b40>], [<ast.Name object at 0x7da1b1c49990>, <ast.Name object at 0x7da1b1c499f0>]]
return[call[name[self]._session.put, parameter[name[url]]]] | keyword[def] identifier[set_application_property] ( identifier[self] , identifier[key] , identifier[value] ):
literal[string]
identifier[url] = identifier[self] . identifier[_options] [ literal[string] ]+ literal[string] + identifier[key]
identifier[payload] ={
literal[string] : identifier[key] ,
literal[string] : identifier[value] }
keyword[return] identifier[self] . identifier[_session] . identifier[put] (
identifier[url] , identifier[data] = identifier[json] . identifier[dumps] ( identifier[payload] )) | def set_application_property(self, key, value):
"""Set the application property.
:param key: key of the property to set
:type key: str
:param value: value to assign to the property
:type value: str
"""
url = self._options['server'] + '/rest/api/latest/application-properties/' + key
payload = {'id': key, 'value': value}
return self._session.put(url, data=json.dumps(payload)) |
def exactly(self, number):
"""
Inspected function should be called exactly number times
Return: self
"""
def check(): #pylint: disable=missing-docstring
return True if number == super(SinonExpectation, self).callCount else False
self.valid_list.append(check)
return self | def function[exactly, parameter[self, number]]:
constant[
Inspected function should be called exactly number times
Return: self
]
def function[check, parameter[]]:
return[<ast.IfExp object at 0x7da1b2347820>]
call[name[self].valid_list.append, parameter[name[check]]]
return[name[self]] | keyword[def] identifier[exactly] ( identifier[self] , identifier[number] ):
literal[string]
keyword[def] identifier[check] ():
keyword[return] keyword[True] keyword[if] identifier[number] == identifier[super] ( identifier[SinonExpectation] , identifier[self] ). identifier[callCount] keyword[else] keyword[False]
identifier[self] . identifier[valid_list] . identifier[append] ( identifier[check] )
keyword[return] identifier[self] | def exactly(self, number):
"""
Inspected function should be called exactly number times
Return: self
"""
def check(): #pylint: disable=missing-docstring
return True if number == super(SinonExpectation, self).callCount else False
self.valid_list.append(check)
return self |
def removeAnalysis(self, analysis):
""" Unassigns the analysis passed in from the worksheet.
Delegates to 'unassign' transition for the analysis passed in
"""
# We need to bypass the guard's check for current context!
api.get_request().set("ws_uid", api.get_uid(self))
if analysis.getWorksheet() == self:
doActionFor(analysis, "unassign") | def function[removeAnalysis, parameter[self, analysis]]:
constant[ Unassigns the analysis passed in from the worksheet.
Delegates to 'unassign' transition for the analysis passed in
]
call[call[name[api].get_request, parameter[]].set, parameter[constant[ws_uid], call[name[api].get_uid, parameter[name[self]]]]]
if compare[call[name[analysis].getWorksheet, parameter[]] equal[==] name[self]] begin[:]
call[name[doActionFor], parameter[name[analysis], constant[unassign]]] | keyword[def] identifier[removeAnalysis] ( identifier[self] , identifier[analysis] ):
literal[string]
identifier[api] . identifier[get_request] (). identifier[set] ( literal[string] , identifier[api] . identifier[get_uid] ( identifier[self] ))
keyword[if] identifier[analysis] . identifier[getWorksheet] ()== identifier[self] :
identifier[doActionFor] ( identifier[analysis] , literal[string] ) | def removeAnalysis(self, analysis):
""" Unassigns the analysis passed in from the worksheet.
Delegates to 'unassign' transition for the analysis passed in
"""
# We need to bypass the guard's check for current context!
api.get_request().set('ws_uid', api.get_uid(self))
if analysis.getWorksheet() == self:
doActionFor(analysis, 'unassign') # depends on [control=['if'], data=[]] |
def get_title(src_name, src_type=None):
"""Normalizes a source name as a string to be used for viewer's title."""
if src_type == 'tcp':
return '{0}:{1}'.format(*src_name)
return os.path.basename(src_name) | def function[get_title, parameter[src_name, src_type]]:
constant[Normalizes a source name as a string to be used for viewer's title.]
if compare[name[src_type] equal[==] constant[tcp]] begin[:]
return[call[constant[{0}:{1}].format, parameter[<ast.Starred object at 0x7da1b1197790>]]]
return[call[name[os].path.basename, parameter[name[src_name]]]] | keyword[def] identifier[get_title] ( identifier[src_name] , identifier[src_type] = keyword[None] ):
literal[string]
keyword[if] identifier[src_type] == literal[string] :
keyword[return] literal[string] . identifier[format] (* identifier[src_name] )
keyword[return] identifier[os] . identifier[path] . identifier[basename] ( identifier[src_name] ) | def get_title(src_name, src_type=None):
"""Normalizes a source name as a string to be used for viewer's title."""
if src_type == 'tcp':
return '{0}:{1}'.format(*src_name) # depends on [control=['if'], data=[]]
return os.path.basename(src_name) |
def __get_stack_id(self, value, values, height):
"""
Returns the index of the column representation of the given value
▁ ▂ ▃ ▄ ▅ ▆ ▇' ...
▁ ▂ ▃ ▄ ▅ ▆ ▇' ▇ ▇ ▇ ▇ ▇ ▇ ▇ ...
▁ ▂ ▃ ▄ ▅ ▆ ▇' ▇ ▇ ▇ ▇ ▇ ▇ ▇ ▇ ▇ ▇ ▇ ▇ ▇ ▇ ...
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 ...
For example given the values: 1, 2, 3, ..., 20, 21:
And we are looking for the index value of 21:
This function will return index 20
"""
def step(values, height):
step_range = max(values) - min(values)
return (((step_range / float((len(self.__list) * height) - 1)))
or 1)
step_value = step(values, height)
return int(round((value - min(values)) / step_value)) | def function[__get_stack_id, parameter[self, value, values, height]]:
constant[
Returns the index of the column representation of the given value
▁ ▂ ▃ ▄ ▅ ▆ ▇' ...
▁ ▂ ▃ ▄ ▅ ▆ ▇' ▇ ▇ ▇ ▇ ▇ ▇ ▇ ...
▁ ▂ ▃ ▄ ▅ ▆ ▇' ▇ ▇ ▇ ▇ ▇ ▇ ▇ ▇ ▇ ▇ ▇ ▇ ▇ ▇ ...
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 ...
For example given the values: 1, 2, 3, ..., 20, 21:
And we are looking for the index value of 21:
This function will return index 20
]
def function[step, parameter[values, height]]:
variable[step_range] assign[=] binary_operation[call[name[max], parameter[name[values]]] - call[name[min], parameter[name[values]]]]
return[<ast.BoolOp object at 0x7da20c6e5ea0>]
variable[step_value] assign[=] call[name[step], parameter[name[values], name[height]]]
return[call[name[int], parameter[call[name[round], parameter[binary_operation[binary_operation[name[value] - call[name[min], parameter[name[values]]]] / name[step_value]]]]]]] | keyword[def] identifier[__get_stack_id] ( identifier[self] , identifier[value] , identifier[values] , identifier[height] ):
literal[string]
keyword[def] identifier[step] ( identifier[values] , identifier[height] ):
identifier[step_range] = identifier[max] ( identifier[values] )- identifier[min] ( identifier[values] )
keyword[return] ((( identifier[step_range] / identifier[float] (( identifier[len] ( identifier[self] . identifier[__list] )* identifier[height] )- literal[int] )))
keyword[or] literal[int] )
identifier[step_value] = identifier[step] ( identifier[values] , identifier[height] )
keyword[return] identifier[int] ( identifier[round] (( identifier[value] - identifier[min] ( identifier[values] ))/ identifier[step_value] )) | def __get_stack_id(self, value, values, height):
"""
Returns the index of the column representation of the given value
▁ ▂ ▃ ▄ ▅ ▆ ▇' ...
▁ ▂ ▃ ▄ ▅ ▆ ▇' ▇ ▇ ▇ ▇ ▇ ▇ ▇ ...
▁ ▂ ▃ ▄ ▅ ▆ ▇' ▇ ▇ ▇ ▇ ▇ ▇ ▇ ▇ ▇ ▇ ▇ ▇ ▇ ▇ ...
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 ...
For example given the values: 1, 2, 3, ..., 20, 21:
And we are looking for the index value of 21:
This function will return index 20
"""
def step(values, height):
step_range = max(values) - min(values)
return step_range / float(len(self.__list) * height - 1) or 1
step_value = step(values, height)
return int(round((value - min(values)) / step_value)) |
def dSbr_dV(self, Yf, Yt, V, buses=None, branches=None):
""" Based on dSbr_dV.m from MATPOWER by Ray Zimmerman, developed at
PSERC Cornell. See U{http://www.pserc.cornell.edu/matpower/} for more
information.
@return: The branch power flow vectors and the partial derivatives of
branch power flow w.r.t voltage magnitude and voltage angle.
@rtype: tuple
"""
buses = self.buses if buses is None else buses
branches = self.branches if branches is None else branches
nl = len(branches)
nb = len(V)
il = range(nl)
ib = range(nb)
f = [l.from_bus._i for l in branches]
t = [l.to_bus._i for l in branches]
# Compute currents.
If = Yf * V
It = Yt * V
Vnorm = V / abs(V)
diagVf = csr_matrix((V[f], (il, il)))
diagIf = csr_matrix((If, (il, il)))
diagVt = csr_matrix((V[t], (il, il)))
diagIt = csr_matrix((It, (il, il)))
diagV = csr_matrix((V, (ib, ib)))
diagVnorm = csr_matrix((Vnorm, (ib, ib)))
shape = (nl, nb)
# Partial derivative of S w.r.t voltage phase angle.
dSf_dVa = 1j * (conj(diagIf) *
csr_matrix((V[f], (il, f)), shape) - diagVf * conj(Yf * diagV))
dSt_dVa = 1j * (conj(diagIt) *
csr_matrix((V[t], (il, t)), shape) - diagVt * conj(Yt * diagV))
# Partial derivative of S w.r.t. voltage amplitude.
dSf_dVm = diagVf * conj(Yf * diagVnorm) + conj(diagIf) * \
csr_matrix((Vnorm[f], (il, f)), shape)
dSt_dVm = diagVt * conj(Yt * diagVnorm) + conj(diagIt) * \
csr_matrix((Vnorm[t], (il, t)), shape)
# Compute power flow vectors.
Sf = V[f] * conj(If)
St = V[t] * conj(It)
return dSf_dVa, dSf_dVm, dSt_dVa, dSt_dVm, Sf, St | def function[dSbr_dV, parameter[self, Yf, Yt, V, buses, branches]]:
constant[ Based on dSbr_dV.m from MATPOWER by Ray Zimmerman, developed at
PSERC Cornell. See U{http://www.pserc.cornell.edu/matpower/} for more
information.
@return: The branch power flow vectors and the partial derivatives of
branch power flow w.r.t voltage magnitude and voltage angle.
@rtype: tuple
]
variable[buses] assign[=] <ast.IfExp object at 0x7da1b25d3190>
variable[branches] assign[=] <ast.IfExp object at 0x7da1b25d1540>
variable[nl] assign[=] call[name[len], parameter[name[branches]]]
variable[nb] assign[=] call[name[len], parameter[name[V]]]
variable[il] assign[=] call[name[range], parameter[name[nl]]]
variable[ib] assign[=] call[name[range], parameter[name[nb]]]
variable[f] assign[=] <ast.ListComp object at 0x7da1b25d3a30>
variable[t] assign[=] <ast.ListComp object at 0x7da1b25d1ab0>
variable[If] assign[=] binary_operation[name[Yf] * name[V]]
variable[It] assign[=] binary_operation[name[Yt] * name[V]]
variable[Vnorm] assign[=] binary_operation[name[V] / call[name[abs], parameter[name[V]]]]
variable[diagVf] assign[=] call[name[csr_matrix], parameter[tuple[[<ast.Subscript object at 0x7da1b25d37c0>, <ast.Tuple object at 0x7da1b25d3850>]]]]
variable[diagIf] assign[=] call[name[csr_matrix], parameter[tuple[[<ast.Name object at 0x7da1b25d2800>, <ast.Tuple object at 0x7da1b25d1de0>]]]]
variable[diagVt] assign[=] call[name[csr_matrix], parameter[tuple[[<ast.Subscript object at 0x7da1b25d3a60>, <ast.Tuple object at 0x7da1b25d39a0>]]]]
variable[diagIt] assign[=] call[name[csr_matrix], parameter[tuple[[<ast.Name object at 0x7da18dc9b370>, <ast.Tuple object at 0x7da18dc9bf40>]]]]
variable[diagV] assign[=] call[name[csr_matrix], parameter[tuple[[<ast.Name object at 0x7da18dc98820>, <ast.Tuple object at 0x7da18dc9aef0>]]]]
variable[diagVnorm] assign[=] call[name[csr_matrix], parameter[tuple[[<ast.Name object at 0x7da18dc9a8f0>, <ast.Tuple object at 0x7da18dc9b6d0>]]]]
variable[shape] assign[=] tuple[[<ast.Name object at 0x7da18dc9a890>, <ast.Name object at 0x7da18dc9b580>]]
variable[dSf_dVa] assign[=] binary_operation[constant[1j] * binary_operation[binary_operation[call[name[conj], parameter[name[diagIf]]] * call[name[csr_matrix], parameter[tuple[[<ast.Subscript object at 0x7da18dc9be80>, <ast.Tuple object at 0x7da18dc997b0>]], name[shape]]]] - binary_operation[name[diagVf] * call[name[conj], parameter[binary_operation[name[Yf] * name[diagV]]]]]]]
variable[dSt_dVa] assign[=] binary_operation[constant[1j] * binary_operation[binary_operation[call[name[conj], parameter[name[diagIt]]] * call[name[csr_matrix], parameter[tuple[[<ast.Subscript object at 0x7da18dc999c0>, <ast.Tuple object at 0x7da18dc98d00>]], name[shape]]]] - binary_operation[name[diagVt] * call[name[conj], parameter[binary_operation[name[Yt] * name[diagV]]]]]]]
variable[dSf_dVm] assign[=] binary_operation[binary_operation[name[diagVf] * call[name[conj], parameter[binary_operation[name[Yf] * name[diagVnorm]]]]] + binary_operation[call[name[conj], parameter[name[diagIf]]] * call[name[csr_matrix], parameter[tuple[[<ast.Subscript object at 0x7da18dc9bac0>, <ast.Tuple object at 0x7da18dc9a1d0>]], name[shape]]]]]
variable[dSt_dVm] assign[=] binary_operation[binary_operation[name[diagVt] * call[name[conj], parameter[binary_operation[name[Yt] * name[diagVnorm]]]]] + binary_operation[call[name[conj], parameter[name[diagIt]]] * call[name[csr_matrix], parameter[tuple[[<ast.Subscript object at 0x7da18dc9a7d0>, <ast.Tuple object at 0x7da18dc98fd0>]], name[shape]]]]]
variable[Sf] assign[=] binary_operation[call[name[V]][name[f]] * call[name[conj], parameter[name[If]]]]
variable[St] assign[=] binary_operation[call[name[V]][name[t]] * call[name[conj], parameter[name[It]]]]
return[tuple[[<ast.Name object at 0x7da18dc9b460>, <ast.Name object at 0x7da18dc9a1a0>, <ast.Name object at 0x7da18dc99d80>, <ast.Name object at 0x7da18dc9b280>, <ast.Name object at 0x7da18dc98220>, <ast.Name object at 0x7da18dc99e40>]]] | keyword[def] identifier[dSbr_dV] ( identifier[self] , identifier[Yf] , identifier[Yt] , identifier[V] , identifier[buses] = keyword[None] , identifier[branches] = keyword[None] ):
literal[string]
identifier[buses] = identifier[self] . identifier[buses] keyword[if] identifier[buses] keyword[is] keyword[None] keyword[else] identifier[buses]
identifier[branches] = identifier[self] . identifier[branches] keyword[if] identifier[branches] keyword[is] keyword[None] keyword[else] identifier[branches]
identifier[nl] = identifier[len] ( identifier[branches] )
identifier[nb] = identifier[len] ( identifier[V] )
identifier[il] = identifier[range] ( identifier[nl] )
identifier[ib] = identifier[range] ( identifier[nb] )
identifier[f] =[ identifier[l] . identifier[from_bus] . identifier[_i] keyword[for] identifier[l] keyword[in] identifier[branches] ]
identifier[t] =[ identifier[l] . identifier[to_bus] . identifier[_i] keyword[for] identifier[l] keyword[in] identifier[branches] ]
identifier[If] = identifier[Yf] * identifier[V]
identifier[It] = identifier[Yt] * identifier[V]
identifier[Vnorm] = identifier[V] / identifier[abs] ( identifier[V] )
identifier[diagVf] = identifier[csr_matrix] (( identifier[V] [ identifier[f] ],( identifier[il] , identifier[il] )))
identifier[diagIf] = identifier[csr_matrix] (( identifier[If] ,( identifier[il] , identifier[il] )))
identifier[diagVt] = identifier[csr_matrix] (( identifier[V] [ identifier[t] ],( identifier[il] , identifier[il] )))
identifier[diagIt] = identifier[csr_matrix] (( identifier[It] ,( identifier[il] , identifier[il] )))
identifier[diagV] = identifier[csr_matrix] (( identifier[V] ,( identifier[ib] , identifier[ib] )))
identifier[diagVnorm] = identifier[csr_matrix] (( identifier[Vnorm] ,( identifier[ib] , identifier[ib] )))
identifier[shape] =( identifier[nl] , identifier[nb] )
identifier[dSf_dVa] = literal[int] *( identifier[conj] ( identifier[diagIf] )*
identifier[csr_matrix] (( identifier[V] [ identifier[f] ],( identifier[il] , identifier[f] )), identifier[shape] )- identifier[diagVf] * identifier[conj] ( identifier[Yf] * identifier[diagV] ))
identifier[dSt_dVa] = literal[int] *( identifier[conj] ( identifier[diagIt] )*
identifier[csr_matrix] (( identifier[V] [ identifier[t] ],( identifier[il] , identifier[t] )), identifier[shape] )- identifier[diagVt] * identifier[conj] ( identifier[Yt] * identifier[diagV] ))
identifier[dSf_dVm] = identifier[diagVf] * identifier[conj] ( identifier[Yf] * identifier[diagVnorm] )+ identifier[conj] ( identifier[diagIf] )* identifier[csr_matrix] (( identifier[Vnorm] [ identifier[f] ],( identifier[il] , identifier[f] )), identifier[shape] )
identifier[dSt_dVm] = identifier[diagVt] * identifier[conj] ( identifier[Yt] * identifier[diagVnorm] )+ identifier[conj] ( identifier[diagIt] )* identifier[csr_matrix] (( identifier[Vnorm] [ identifier[t] ],( identifier[il] , identifier[t] )), identifier[shape] )
identifier[Sf] = identifier[V] [ identifier[f] ]* identifier[conj] ( identifier[If] )
identifier[St] = identifier[V] [ identifier[t] ]* identifier[conj] ( identifier[It] )
keyword[return] identifier[dSf_dVa] , identifier[dSf_dVm] , identifier[dSt_dVa] , identifier[dSt_dVm] , identifier[Sf] , identifier[St] | def dSbr_dV(self, Yf, Yt, V, buses=None, branches=None):
""" Based on dSbr_dV.m from MATPOWER by Ray Zimmerman, developed at
PSERC Cornell. See U{http://www.pserc.cornell.edu/matpower/} for more
information.
@return: The branch power flow vectors and the partial derivatives of
branch power flow w.r.t voltage magnitude and voltage angle.
@rtype: tuple
"""
buses = self.buses if buses is None else buses
branches = self.branches if branches is None else branches
nl = len(branches)
nb = len(V)
il = range(nl)
ib = range(nb)
f = [l.from_bus._i for l in branches]
t = [l.to_bus._i for l in branches]
# Compute currents.
If = Yf * V
It = Yt * V
Vnorm = V / abs(V)
diagVf = csr_matrix((V[f], (il, il)))
diagIf = csr_matrix((If, (il, il)))
diagVt = csr_matrix((V[t], (il, il)))
diagIt = csr_matrix((It, (il, il)))
diagV = csr_matrix((V, (ib, ib)))
diagVnorm = csr_matrix((Vnorm, (ib, ib)))
shape = (nl, nb)
# Partial derivative of S w.r.t voltage phase angle.
dSf_dVa = 1j * (conj(diagIf) * csr_matrix((V[f], (il, f)), shape) - diagVf * conj(Yf * diagV))
dSt_dVa = 1j * (conj(diagIt) * csr_matrix((V[t], (il, t)), shape) - diagVt * conj(Yt * diagV))
# Partial derivative of S w.r.t. voltage amplitude.
dSf_dVm = diagVf * conj(Yf * diagVnorm) + conj(diagIf) * csr_matrix((Vnorm[f], (il, f)), shape)
dSt_dVm = diagVt * conj(Yt * diagVnorm) + conj(diagIt) * csr_matrix((Vnorm[t], (il, t)), shape)
# Compute power flow vectors.
Sf = V[f] * conj(If)
St = V[t] * conj(It)
return (dSf_dVa, dSf_dVm, dSt_dVa, dSt_dVm, Sf, St) |
def p_statement_list_1(self, p):
'''statement_list : statement SEMICOLON statement_list'''
p[0] = p[3]
if p[1] is not None:
p[0].children.insert(0, p[1]) | def function[p_statement_list_1, parameter[self, p]]:
constant[statement_list : statement SEMICOLON statement_list]
call[name[p]][constant[0]] assign[=] call[name[p]][constant[3]]
if compare[call[name[p]][constant[1]] is_not constant[None]] begin[:]
call[call[name[p]][constant[0]].children.insert, parameter[constant[0], call[name[p]][constant[1]]]] | keyword[def] identifier[p_statement_list_1] ( identifier[self] , identifier[p] ):
literal[string]
identifier[p] [ literal[int] ]= identifier[p] [ literal[int] ]
keyword[if] identifier[p] [ literal[int] ] keyword[is] keyword[not] keyword[None] :
identifier[p] [ literal[int] ]. identifier[children] . identifier[insert] ( literal[int] , identifier[p] [ literal[int] ]) | def p_statement_list_1(self, p):
"""statement_list : statement SEMICOLON statement_list"""
p[0] = p[3]
if p[1] is not None:
p[0].children.insert(0, p[1]) # depends on [control=['if'], data=[]] |
def get_rendering_cache_key(placeholder_name, contentitem):
"""
Return a cache key for the content item output.
.. seealso::
The :func:`ContentItem.clear_cache() <fluent_contents.models.ContentItem.clear_cache>` function
can be used to remove the cache keys of a retrieved object.
"""
if not contentitem.pk:
return None
return "contentitem.@{0}.{1}.{2}".format(
placeholder_name,
contentitem.plugin.type_name, # always returns the upcasted name.
contentitem.pk, # already unique per language_code
) | def function[get_rendering_cache_key, parameter[placeholder_name, contentitem]]:
constant[
Return a cache key for the content item output.
.. seealso::
The :func:`ContentItem.clear_cache() <fluent_contents.models.ContentItem.clear_cache>` function
can be used to remove the cache keys of a retrieved object.
]
if <ast.UnaryOp object at 0x7da1b1175b40> begin[:]
return[constant[None]]
return[call[constant[contentitem.@{0}.{1}.{2}].format, parameter[name[placeholder_name], name[contentitem].plugin.type_name, name[contentitem].pk]]] | keyword[def] identifier[get_rendering_cache_key] ( identifier[placeholder_name] , identifier[contentitem] ):
literal[string]
keyword[if] keyword[not] identifier[contentitem] . identifier[pk] :
keyword[return] keyword[None]
keyword[return] literal[string] . identifier[format] (
identifier[placeholder_name] ,
identifier[contentitem] . identifier[plugin] . identifier[type_name] ,
identifier[contentitem] . identifier[pk] ,
) | def get_rendering_cache_key(placeholder_name, contentitem):
"""
Return a cache key for the content item output.
.. seealso::
The :func:`ContentItem.clear_cache() <fluent_contents.models.ContentItem.clear_cache>` function
can be used to remove the cache keys of a retrieved object.
"""
if not contentitem.pk:
return None # depends on [control=['if'], data=[]] # always returns the upcasted name.
# already unique per language_code
return 'contentitem.@{0}.{1}.{2}'.format(placeholder_name, contentitem.plugin.type_name, contentitem.pk) |
def ensemble_mean_std_max_min(ens):
"""Calculate ensemble statistics between a results from an ensemble of climate simulations
Returns a dataset containing ensemble mean, standard-deviation,
minimum and maximum for input climate simulations.
Parameters
----------
ens : Ensemble dataset (see xclim.utils.create_ensemble)
Returns
-------
xarray dataset with containing data variables of ensemble statistics
Examples
--------
>>> from xclim import utils
>>> import glob
>>> ncfiles = glob.glob('/*tas*.nc')
Create ensemble dataset
>>> ens = utils.create_ensemble(ncfiles)
Calculate ensemble statistics
>>> ens_means_std = utils.ensemble_mean_std_max_min(ens)
>>> print(ens_mean_std['tas_mean'])
"""
dsOut = ens.drop(ens.data_vars)
for v in ens.data_vars:
dsOut[v + '_mean'] = ens[v].mean(dim='realization')
dsOut[v + '_stdev'] = ens[v].std(dim='realization')
dsOut[v + '_max'] = ens[v].max(dim='realization')
dsOut[v + '_min'] = ens[v].min(dim='realization')
for vv in dsOut.data_vars:
dsOut[vv].attrs = ens[v].attrs
if 'description' in dsOut[vv].attrs.keys():
vv.split()
dsOut[vv].attrs['description'] = dsOut[vv].attrs['description'] + ' : ' + vv.split('_')[
-1] + ' of ensemble'
return dsOut | def function[ensemble_mean_std_max_min, parameter[ens]]:
constant[Calculate ensemble statistics between a results from an ensemble of climate simulations
Returns a dataset containing ensemble mean, standard-deviation,
minimum and maximum for input climate simulations.
Parameters
----------
ens : Ensemble dataset (see xclim.utils.create_ensemble)
Returns
-------
xarray dataset with containing data variables of ensemble statistics
Examples
--------
>>> from xclim import utils
>>> import glob
>>> ncfiles = glob.glob('/*tas*.nc')
Create ensemble dataset
>>> ens = utils.create_ensemble(ncfiles)
Calculate ensemble statistics
>>> ens_means_std = utils.ensemble_mean_std_max_min(ens)
>>> print(ens_mean_std['tas_mean'])
]
variable[dsOut] assign[=] call[name[ens].drop, parameter[name[ens].data_vars]]
for taget[name[v]] in starred[name[ens].data_vars] begin[:]
call[name[dsOut]][binary_operation[name[v] + constant[_mean]]] assign[=] call[call[name[ens]][name[v]].mean, parameter[]]
call[name[dsOut]][binary_operation[name[v] + constant[_stdev]]] assign[=] call[call[name[ens]][name[v]].std, parameter[]]
call[name[dsOut]][binary_operation[name[v] + constant[_max]]] assign[=] call[call[name[ens]][name[v]].max, parameter[]]
call[name[dsOut]][binary_operation[name[v] + constant[_min]]] assign[=] call[call[name[ens]][name[v]].min, parameter[]]
for taget[name[vv]] in starred[name[dsOut].data_vars] begin[:]
call[name[dsOut]][name[vv]].attrs assign[=] call[name[ens]][name[v]].attrs
if compare[constant[description] in call[call[name[dsOut]][name[vv]].attrs.keys, parameter[]]] begin[:]
call[name[vv].split, parameter[]]
call[call[name[dsOut]][name[vv]].attrs][constant[description]] assign[=] binary_operation[binary_operation[binary_operation[call[call[name[dsOut]][name[vv]].attrs][constant[description]] + constant[ : ]] + call[call[name[vv].split, parameter[constant[_]]]][<ast.UnaryOp object at 0x7da1b1d4df30>]] + constant[ of ensemble]]
return[name[dsOut]] | keyword[def] identifier[ensemble_mean_std_max_min] ( identifier[ens] ):
literal[string]
identifier[dsOut] = identifier[ens] . identifier[drop] ( identifier[ens] . identifier[data_vars] )
keyword[for] identifier[v] keyword[in] identifier[ens] . identifier[data_vars] :
identifier[dsOut] [ identifier[v] + literal[string] ]= identifier[ens] [ identifier[v] ]. identifier[mean] ( identifier[dim] = literal[string] )
identifier[dsOut] [ identifier[v] + literal[string] ]= identifier[ens] [ identifier[v] ]. identifier[std] ( identifier[dim] = literal[string] )
identifier[dsOut] [ identifier[v] + literal[string] ]= identifier[ens] [ identifier[v] ]. identifier[max] ( identifier[dim] = literal[string] )
identifier[dsOut] [ identifier[v] + literal[string] ]= identifier[ens] [ identifier[v] ]. identifier[min] ( identifier[dim] = literal[string] )
keyword[for] identifier[vv] keyword[in] identifier[dsOut] . identifier[data_vars] :
identifier[dsOut] [ identifier[vv] ]. identifier[attrs] = identifier[ens] [ identifier[v] ]. identifier[attrs]
keyword[if] literal[string] keyword[in] identifier[dsOut] [ identifier[vv] ]. identifier[attrs] . identifier[keys] ():
identifier[vv] . identifier[split] ()
identifier[dsOut] [ identifier[vv] ]. identifier[attrs] [ literal[string] ]= identifier[dsOut] [ identifier[vv] ]. identifier[attrs] [ literal[string] ]+ literal[string] + identifier[vv] . identifier[split] ( literal[string] )[
- literal[int] ]+ literal[string]
keyword[return] identifier[dsOut] | def ensemble_mean_std_max_min(ens):
"""Calculate ensemble statistics between a results from an ensemble of climate simulations
Returns a dataset containing ensemble mean, standard-deviation,
minimum and maximum for input climate simulations.
Parameters
----------
ens : Ensemble dataset (see xclim.utils.create_ensemble)
Returns
-------
xarray dataset with containing data variables of ensemble statistics
Examples
--------
>>> from xclim import utils
>>> import glob
>>> ncfiles = glob.glob('/*tas*.nc')
Create ensemble dataset
>>> ens = utils.create_ensemble(ncfiles)
Calculate ensemble statistics
>>> ens_means_std = utils.ensemble_mean_std_max_min(ens)
>>> print(ens_mean_std['tas_mean'])
"""
dsOut = ens.drop(ens.data_vars)
for v in ens.data_vars:
dsOut[v + '_mean'] = ens[v].mean(dim='realization')
dsOut[v + '_stdev'] = ens[v].std(dim='realization')
dsOut[v + '_max'] = ens[v].max(dim='realization')
dsOut[v + '_min'] = ens[v].min(dim='realization')
for vv in dsOut.data_vars:
dsOut[vv].attrs = ens[v].attrs
if 'description' in dsOut[vv].attrs.keys():
vv.split()
dsOut[vv].attrs['description'] = dsOut[vv].attrs['description'] + ' : ' + vv.split('_')[-1] + ' of ensemble' # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['vv']] # depends on [control=['for'], data=['v']]
return dsOut |
def _get_span(self, m):
"""
Gets a tuple that identifies a span for the specific mention class
that m belongs to.
"""
return (m.sentence.id, m.char_start, m.char_end) | def function[_get_span, parameter[self, m]]:
constant[
Gets a tuple that identifies a span for the specific mention class
that m belongs to.
]
return[tuple[[<ast.Attribute object at 0x7da204622bf0>, <ast.Attribute object at 0x7da204621b70>, <ast.Attribute object at 0x7da204623580>]]] | keyword[def] identifier[_get_span] ( identifier[self] , identifier[m] ):
literal[string]
keyword[return] ( identifier[m] . identifier[sentence] . identifier[id] , identifier[m] . identifier[char_start] , identifier[m] . identifier[char_end] ) | def _get_span(self, m):
"""
Gets a tuple that identifies a span for the specific mention class
that m belongs to.
"""
return (m.sentence.id, m.char_start, m.char_end) |
def init_optimizer(self, optimizer):
"""Init the optimizer.
Args:
optimizer (dict or :obj:`~torch.optim.Optimizer`): Either an
optimizer object or a dict used for constructing the optimizer.
Returns:
:obj:`~torch.optim.Optimizer`: An optimizer object.
Examples:
>>> optimizer = dict(type='SGD', lr=0.01, momentum=0.9)
>>> type(runner.init_optimizer(optimizer))
<class 'torch.optim.sgd.SGD'>
"""
if isinstance(optimizer, dict):
optimizer = obj_from_dict(
optimizer, torch.optim, dict(params=self.model.parameters()))
elif not isinstance(optimizer, torch.optim.Optimizer):
raise TypeError(
'optimizer must be either an Optimizer object or a dict, '
'but got {}'.format(type(optimizer)))
return optimizer | def function[init_optimizer, parameter[self, optimizer]]:
constant[Init the optimizer.
Args:
optimizer (dict or :obj:`~torch.optim.Optimizer`): Either an
optimizer object or a dict used for constructing the optimizer.
Returns:
:obj:`~torch.optim.Optimizer`: An optimizer object.
Examples:
>>> optimizer = dict(type='SGD', lr=0.01, momentum=0.9)
>>> type(runner.init_optimizer(optimizer))
<class 'torch.optim.sgd.SGD'>
]
if call[name[isinstance], parameter[name[optimizer], name[dict]]] begin[:]
variable[optimizer] assign[=] call[name[obj_from_dict], parameter[name[optimizer], name[torch].optim, call[name[dict], parameter[]]]]
return[name[optimizer]] | keyword[def] identifier[init_optimizer] ( identifier[self] , identifier[optimizer] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[optimizer] , identifier[dict] ):
identifier[optimizer] = identifier[obj_from_dict] (
identifier[optimizer] , identifier[torch] . identifier[optim] , identifier[dict] ( identifier[params] = identifier[self] . identifier[model] . identifier[parameters] ()))
keyword[elif] keyword[not] identifier[isinstance] ( identifier[optimizer] , identifier[torch] . identifier[optim] . identifier[Optimizer] ):
keyword[raise] identifier[TypeError] (
literal[string]
literal[string] . identifier[format] ( identifier[type] ( identifier[optimizer] )))
keyword[return] identifier[optimizer] | def init_optimizer(self, optimizer):
"""Init the optimizer.
Args:
optimizer (dict or :obj:`~torch.optim.Optimizer`): Either an
optimizer object or a dict used for constructing the optimizer.
Returns:
:obj:`~torch.optim.Optimizer`: An optimizer object.
Examples:
>>> optimizer = dict(type='SGD', lr=0.01, momentum=0.9)
>>> type(runner.init_optimizer(optimizer))
<class 'torch.optim.sgd.SGD'>
"""
if isinstance(optimizer, dict):
optimizer = obj_from_dict(optimizer, torch.optim, dict(params=self.model.parameters())) # depends on [control=['if'], data=[]]
elif not isinstance(optimizer, torch.optim.Optimizer):
raise TypeError('optimizer must be either an Optimizer object or a dict, but got {}'.format(type(optimizer))) # depends on [control=['if'], data=[]]
return optimizer |
def value(self):
"""
Return the value of this SpinBox.
"""
if self.opts['int']:
return int(self.val)
else:
return float(self.val) | def function[value, parameter[self]]:
constant[
Return the value of this SpinBox.
]
if call[name[self].opts][constant[int]] begin[:]
return[call[name[int], parameter[name[self].val]]] | keyword[def] identifier[value] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[opts] [ literal[string] ]:
keyword[return] identifier[int] ( identifier[self] . identifier[val] )
keyword[else] :
keyword[return] identifier[float] ( identifier[self] . identifier[val] ) | def value(self):
"""
Return the value of this SpinBox.
"""
if self.opts['int']:
return int(self.val) # depends on [control=['if'], data=[]]
else:
return float(self.val) |
def intermediate_cpfs(self) -> List[CPF]:
'''Returns list of intermediate-fluent CPFs in level order.'''
_, cpfs = self.cpfs
interm_cpfs = [cpf for cpf in cpfs if cpf.name in self.intermediate_fluents]
interm_cpfs = sorted(interm_cpfs, key=lambda cpf: (self.intermediate_fluents[cpf.name].level, cpf.name))
return interm_cpfs | def function[intermediate_cpfs, parameter[self]]:
constant[Returns list of intermediate-fluent CPFs in level order.]
<ast.Tuple object at 0x7da1b0926140> assign[=] name[self].cpfs
variable[interm_cpfs] assign[=] <ast.ListComp object at 0x7da1b0925930>
variable[interm_cpfs] assign[=] call[name[sorted], parameter[name[interm_cpfs]]]
return[name[interm_cpfs]] | keyword[def] identifier[intermediate_cpfs] ( identifier[self] )-> identifier[List] [ identifier[CPF] ]:
literal[string]
identifier[_] , identifier[cpfs] = identifier[self] . identifier[cpfs]
identifier[interm_cpfs] =[ identifier[cpf] keyword[for] identifier[cpf] keyword[in] identifier[cpfs] keyword[if] identifier[cpf] . identifier[name] keyword[in] identifier[self] . identifier[intermediate_fluents] ]
identifier[interm_cpfs] = identifier[sorted] ( identifier[interm_cpfs] , identifier[key] = keyword[lambda] identifier[cpf] :( identifier[self] . identifier[intermediate_fluents] [ identifier[cpf] . identifier[name] ]. identifier[level] , identifier[cpf] . identifier[name] ))
keyword[return] identifier[interm_cpfs] | def intermediate_cpfs(self) -> List[CPF]:
"""Returns list of intermediate-fluent CPFs in level order."""
(_, cpfs) = self.cpfs
interm_cpfs = [cpf for cpf in cpfs if cpf.name in self.intermediate_fluents]
interm_cpfs = sorted(interm_cpfs, key=lambda cpf: (self.intermediate_fluents[cpf.name].level, cpf.name))
return interm_cpfs |
def sink(wrapped):
"""Creates an SPL operator with a single input port.
A SPL operator with a single input port and no output ports.
For each tuple on the input port the decorated function
is called passing the contents of the tuple.
.. deprecated:: 1.8
Recommended to use :py:class:`@spl.for_each <for_each>` instead.
"""
if not inspect.isfunction(wrapped):
raise TypeError('A function is required')
return _wrapforsplop(_OperatorType.Sink, wrapped, 'position', False) | def function[sink, parameter[wrapped]]:
constant[Creates an SPL operator with a single input port.
A SPL operator with a single input port and no output ports.
For each tuple on the input port the decorated function
is called passing the contents of the tuple.
.. deprecated:: 1.8
Recommended to use :py:class:`@spl.for_each <for_each>` instead.
]
if <ast.UnaryOp object at 0x7da1b23455d0> begin[:]
<ast.Raise object at 0x7da1b23460b0>
return[call[name[_wrapforsplop], parameter[name[_OperatorType].Sink, name[wrapped], constant[position], constant[False]]]] | keyword[def] identifier[sink] ( identifier[wrapped] ):
literal[string]
keyword[if] keyword[not] identifier[inspect] . identifier[isfunction] ( identifier[wrapped] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[return] identifier[_wrapforsplop] ( identifier[_OperatorType] . identifier[Sink] , identifier[wrapped] , literal[string] , keyword[False] ) | def sink(wrapped):
"""Creates an SPL operator with a single input port.
A SPL operator with a single input port and no output ports.
For each tuple on the input port the decorated function
is called passing the contents of the tuple.
.. deprecated:: 1.8
Recommended to use :py:class:`@spl.for_each <for_each>` instead.
"""
if not inspect.isfunction(wrapped):
raise TypeError('A function is required') # depends on [control=['if'], data=[]]
return _wrapforsplop(_OperatorType.Sink, wrapped, 'position', False) |
def _assign_numbers(self):
"""
Assign numbers in preparation for validating these receipts.
WARNING: Don't call the method manually unless you know what you're
doing!
"""
first = self.select_related('point_of_sales', 'receipt_type').first()
next_num = Receipt.objects.fetch_last_receipt_number(
first.point_of_sales,
first.receipt_type,
) + 1
for receipt in self.filter(receipt_number__isnull=True):
# Atomically update receipt number
Receipt.objects.filter(
pk=receipt.id,
receipt_number__isnull=True,
).update(
receipt_number=next_num,
)
next_num += 1 | def function[_assign_numbers, parameter[self]]:
constant[
Assign numbers in preparation for validating these receipts.
WARNING: Don't call the method manually unless you know what you're
doing!
]
variable[first] assign[=] call[call[name[self].select_related, parameter[constant[point_of_sales], constant[receipt_type]]].first, parameter[]]
variable[next_num] assign[=] binary_operation[call[name[Receipt].objects.fetch_last_receipt_number, parameter[name[first].point_of_sales, name[first].receipt_type]] + constant[1]]
for taget[name[receipt]] in starred[call[name[self].filter, parameter[]]] begin[:]
call[call[name[Receipt].objects.filter, parameter[]].update, parameter[]]
<ast.AugAssign object at 0x7da1b1a1c430> | keyword[def] identifier[_assign_numbers] ( identifier[self] ):
literal[string]
identifier[first] = identifier[self] . identifier[select_related] ( literal[string] , literal[string] ). identifier[first] ()
identifier[next_num] = identifier[Receipt] . identifier[objects] . identifier[fetch_last_receipt_number] (
identifier[first] . identifier[point_of_sales] ,
identifier[first] . identifier[receipt_type] ,
)+ literal[int]
keyword[for] identifier[receipt] keyword[in] identifier[self] . identifier[filter] ( identifier[receipt_number__isnull] = keyword[True] ):
identifier[Receipt] . identifier[objects] . identifier[filter] (
identifier[pk] = identifier[receipt] . identifier[id] ,
identifier[receipt_number__isnull] = keyword[True] ,
). identifier[update] (
identifier[receipt_number] = identifier[next_num] ,
)
identifier[next_num] += literal[int] | def _assign_numbers(self):
"""
Assign numbers in preparation for validating these receipts.
WARNING: Don't call the method manually unless you know what you're
doing!
"""
first = self.select_related('point_of_sales', 'receipt_type').first()
next_num = Receipt.objects.fetch_last_receipt_number(first.point_of_sales, first.receipt_type) + 1
for receipt in self.filter(receipt_number__isnull=True):
# Atomically update receipt number
Receipt.objects.filter(pk=receipt.id, receipt_number__isnull=True).update(receipt_number=next_num)
next_num += 1 # depends on [control=['for'], data=['receipt']] |
def open(self, output, opts=None):
"""Use this to set where to write to. output can be a
file object or a string. This code raises IOError on error."""
if isinstance(output, io.TextIOWrapper) or \
isinstance(output, io.StringIO) or \
output == sys.stdout:
pass
elif isinstance(output, 'string'.__class__): # FIXME
output = open(output, 'w')
else:
raise IOError("Invalid output type (%s) for %s" %
(output.__class__.__name__, output))
# raise IOError("Invalid output type (%s) for %s" % (type(output),
# output))
self.output = output
return | def function[open, parameter[self, output, opts]]:
constant[Use this to set where to write to. output can be a
file object or a string. This code raises IOError on error.]
if <ast.BoolOp object at 0x7da1b0373d90> begin[:]
pass
name[self].output assign[=] name[output]
return[None] | keyword[def] identifier[open] ( identifier[self] , identifier[output] , identifier[opts] = keyword[None] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[output] , identifier[io] . identifier[TextIOWrapper] ) keyword[or] identifier[isinstance] ( identifier[output] , identifier[io] . identifier[StringIO] ) keyword[or] identifier[output] == identifier[sys] . identifier[stdout] :
keyword[pass]
keyword[elif] identifier[isinstance] ( identifier[output] , literal[string] . identifier[__class__] ):
identifier[output] = identifier[open] ( identifier[output] , literal[string] )
keyword[else] :
keyword[raise] identifier[IOError] ( literal[string] %
( identifier[output] . identifier[__class__] . identifier[__name__] , identifier[output] ))
identifier[self] . identifier[output] = identifier[output]
keyword[return] | def open(self, output, opts=None):
"""Use this to set where to write to. output can be a
file object or a string. This code raises IOError on error."""
if isinstance(output, io.TextIOWrapper) or isinstance(output, io.StringIO) or output == sys.stdout:
pass # depends on [control=['if'], data=[]]
elif isinstance(output, 'string'.__class__): # FIXME
output = open(output, 'w') # depends on [control=['if'], data=[]]
else:
raise IOError('Invalid output type (%s) for %s' % (output.__class__.__name__, output))
# raise IOError("Invalid output type (%s) for %s" % (type(output),
# output))
self.output = output
return |
def _GetRecord(self, offset, record_size):
"""Retrieve a single record from the file.
Args:
offset: offset from start of input_dat where header starts
record_size: length of the header according to file (untrusted)
Returns:
A dict containing a single browser history record.
"""
record_header = "<4sLQQL"
get4 = lambda x: struct.unpack("<L", self.input_dat[x:x + 4])[0]
url_offset = struct.unpack("B", self.input_dat[offset + 52:offset + 53])[0]
if url_offset in [0xFF, 0xFE]:
return None
data_offset = get4(offset + 68)
data_size = get4(offset + 72)
start_pos = offset + data_offset
data = struct.unpack("{0}s".format(data_size),
self.input_dat[start_pos:start_pos + data_size])[0]
fmt = record_header
unknown_size = url_offset - struct.calcsize(fmt)
fmt += "{0}s".format(unknown_size)
fmt += "{0}s".format(record_size - struct.calcsize(fmt))
dat = struct.unpack(fmt, self.input_dat[offset:offset + record_size])
header, blocks, mtime, ctime, ftime, _, url = dat
url = url.split(b"\x00")[0].decode("utf-8")
if mtime:
mtime = mtime // 10 - WIN_UNIX_DIFF_MSECS
if ctime:
ctime = ctime // 10 - WIN_UNIX_DIFF_MSECS
return {
"header": header, # the header
"blocks": blocks, # number of blocks
"urloffset": url_offset, # offset of URL in file
"data_offset": data_offset, # offset for start of data
"data_size": data_size, # size of data
"data": data, # actual data
"mtime": mtime, # modified time
"ctime": ctime, # created time
"ftime": ftime, # file time
"url": url # the url visited
} | def function[_GetRecord, parameter[self, offset, record_size]]:
constant[Retrieve a single record from the file.
Args:
offset: offset from start of input_dat where header starts
record_size: length of the header according to file (untrusted)
Returns:
A dict containing a single browser history record.
]
variable[record_header] assign[=] constant[<4sLQQL]
variable[get4] assign[=] <ast.Lambda object at 0x7da1b1b47580>
variable[url_offset] assign[=] call[call[name[struct].unpack, parameter[constant[B], call[name[self].input_dat][<ast.Slice object at 0x7da1b1b463e0>]]]][constant[0]]
if compare[name[url_offset] in list[[<ast.Constant object at 0x7da1b1b442b0>, <ast.Constant object at 0x7da1b1b459f0>]]] begin[:]
return[constant[None]]
variable[data_offset] assign[=] call[name[get4], parameter[binary_operation[name[offset] + constant[68]]]]
variable[data_size] assign[=] call[name[get4], parameter[binary_operation[name[offset] + constant[72]]]]
variable[start_pos] assign[=] binary_operation[name[offset] + name[data_offset]]
variable[data] assign[=] call[call[name[struct].unpack, parameter[call[constant[{0}s].format, parameter[name[data_size]]], call[name[self].input_dat][<ast.Slice object at 0x7da1b1b46860>]]]][constant[0]]
variable[fmt] assign[=] name[record_header]
variable[unknown_size] assign[=] binary_operation[name[url_offset] - call[name[struct].calcsize, parameter[name[fmt]]]]
<ast.AugAssign object at 0x7da1b1b46cb0>
<ast.AugAssign object at 0x7da1b1b451b0>
variable[dat] assign[=] call[name[struct].unpack, parameter[name[fmt], call[name[self].input_dat][<ast.Slice object at 0x7da1b1b44730>]]]
<ast.Tuple object at 0x7da1b1b47c70> assign[=] name[dat]
variable[url] assign[=] call[call[call[name[url].split, parameter[constant[b'\x00']]]][constant[0]].decode, parameter[constant[utf-8]]]
if name[mtime] begin[:]
variable[mtime] assign[=] binary_operation[binary_operation[name[mtime] <ast.FloorDiv object at 0x7da2590d6bc0> constant[10]] - name[WIN_UNIX_DIFF_MSECS]]
if name[ctime] begin[:]
variable[ctime] assign[=] binary_operation[binary_operation[name[ctime] <ast.FloorDiv object at 0x7da2590d6bc0> constant[10]] - name[WIN_UNIX_DIFF_MSECS]]
return[dictionary[[<ast.Constant object at 0x7da1b1b45930>, <ast.Constant object at 0x7da1b1b46fb0>, <ast.Constant object at 0x7da1b1b45b70>, <ast.Constant object at 0x7da1b1b47310>, <ast.Constant object at 0x7da1b1b44970>, <ast.Constant object at 0x7da1b1b47940>, <ast.Constant object at 0x7da1b1b453c0>, <ast.Constant object at 0x7da1b1b479a0>, <ast.Constant object at 0x7da1b1b47b20>, <ast.Constant object at 0x7da1b1b455d0>], [<ast.Name object at 0x7da1b1b44fa0>, <ast.Name object at 0x7da1b1b07b20>, <ast.Name object at 0x7da1b1b06380>, <ast.Name object at 0x7da1b1b05f90>, <ast.Name object at 0x7da1b1b071c0>, <ast.Name object at 0x7da1b1b06500>, <ast.Name object at 0x7da1b1b071f0>, <ast.Name object at 0x7da1b1b04610>, <ast.Name object at 0x7da1b1b05150>, <ast.Name object at 0x7da1b1b065f0>]]] | keyword[def] identifier[_GetRecord] ( identifier[self] , identifier[offset] , identifier[record_size] ):
literal[string]
identifier[record_header] = literal[string]
identifier[get4] = keyword[lambda] identifier[x] : identifier[struct] . identifier[unpack] ( literal[string] , identifier[self] . identifier[input_dat] [ identifier[x] : identifier[x] + literal[int] ])[ literal[int] ]
identifier[url_offset] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[self] . identifier[input_dat] [ identifier[offset] + literal[int] : identifier[offset] + literal[int] ])[ literal[int] ]
keyword[if] identifier[url_offset] keyword[in] [ literal[int] , literal[int] ]:
keyword[return] keyword[None]
identifier[data_offset] = identifier[get4] ( identifier[offset] + literal[int] )
identifier[data_size] = identifier[get4] ( identifier[offset] + literal[int] )
identifier[start_pos] = identifier[offset] + identifier[data_offset]
identifier[data] = identifier[struct] . identifier[unpack] ( literal[string] . identifier[format] ( identifier[data_size] ),
identifier[self] . identifier[input_dat] [ identifier[start_pos] : identifier[start_pos] + identifier[data_size] ])[ literal[int] ]
identifier[fmt] = identifier[record_header]
identifier[unknown_size] = identifier[url_offset] - identifier[struct] . identifier[calcsize] ( identifier[fmt] )
identifier[fmt] += literal[string] . identifier[format] ( identifier[unknown_size] )
identifier[fmt] += literal[string] . identifier[format] ( identifier[record_size] - identifier[struct] . identifier[calcsize] ( identifier[fmt] ))
identifier[dat] = identifier[struct] . identifier[unpack] ( identifier[fmt] , identifier[self] . identifier[input_dat] [ identifier[offset] : identifier[offset] + identifier[record_size] ])
identifier[header] , identifier[blocks] , identifier[mtime] , identifier[ctime] , identifier[ftime] , identifier[_] , identifier[url] = identifier[dat]
identifier[url] = identifier[url] . identifier[split] ( literal[string] )[ literal[int] ]. identifier[decode] ( literal[string] )
keyword[if] identifier[mtime] :
identifier[mtime] = identifier[mtime] // literal[int] - identifier[WIN_UNIX_DIFF_MSECS]
keyword[if] identifier[ctime] :
identifier[ctime] = identifier[ctime] // literal[int] - identifier[WIN_UNIX_DIFF_MSECS]
keyword[return] {
literal[string] : identifier[header] ,
literal[string] : identifier[blocks] ,
literal[string] : identifier[url_offset] ,
literal[string] : identifier[data_offset] ,
literal[string] : identifier[data_size] ,
literal[string] : identifier[data] ,
literal[string] : identifier[mtime] ,
literal[string] : identifier[ctime] ,
literal[string] : identifier[ftime] ,
literal[string] : identifier[url]
} | def _GetRecord(self, offset, record_size):
"""Retrieve a single record from the file.
Args:
offset: offset from start of input_dat where header starts
record_size: length of the header according to file (untrusted)
Returns:
A dict containing a single browser history record.
"""
record_header = '<4sLQQL'
get4 = lambda x: struct.unpack('<L', self.input_dat[x:x + 4])[0]
url_offset = struct.unpack('B', self.input_dat[offset + 52:offset + 53])[0]
if url_offset in [255, 254]:
return None # depends on [control=['if'], data=[]]
data_offset = get4(offset + 68)
data_size = get4(offset + 72)
start_pos = offset + data_offset
data = struct.unpack('{0}s'.format(data_size), self.input_dat[start_pos:start_pos + data_size])[0]
fmt = record_header
unknown_size = url_offset - struct.calcsize(fmt)
fmt += '{0}s'.format(unknown_size)
fmt += '{0}s'.format(record_size - struct.calcsize(fmt))
dat = struct.unpack(fmt, self.input_dat[offset:offset + record_size])
(header, blocks, mtime, ctime, ftime, _, url) = dat
url = url.split(b'\x00')[0].decode('utf-8')
if mtime:
mtime = mtime // 10 - WIN_UNIX_DIFF_MSECS # depends on [control=['if'], data=[]]
if ctime:
ctime = ctime // 10 - WIN_UNIX_DIFF_MSECS # depends on [control=['if'], data=[]] # the header
# number of blocks
# offset of URL in file
# offset for start of data
# size of data
# actual data
# modified time
# created time
# file time
# the url visited
return {'header': header, 'blocks': blocks, 'urloffset': url_offset, 'data_offset': data_offset, 'data_size': data_size, 'data': data, 'mtime': mtime, 'ctime': ctime, 'ftime': ftime, 'url': url} |
def get_hashes_from_search(self, query, page=None):
""" Get the scan results for a file.
Even if you do not have a Private Mass API key that you can use, you can still automate VirusTotal Intelligence
searches pretty much in the same way that the searching for files api call works.
:param query: a VirusTotal Intelligence search string in accordance with the file search documentation .
<https://www.virustotal.com/intelligence/help/file-search/>
:param page: the next_page property of the results of a previously issued query to this API. This parameter
should not be provided if it is the very first query to the API, i.e. if we are retrieving the
first page of results.
apikey: the API key associated to a VirusTotal Community account with VirusTotal Intelligence privileges.
"""
params = {'query': query, 'apikey': self.api_key, 'page': page}
try:
response = requests.get(self.base + 'search/programmatic/', params=params, proxies=self.proxies)
except requests.RequestException as e:
return dict(error=e.message)
return response.json()['next_page'], response | def function[get_hashes_from_search, parameter[self, query, page]]:
constant[ Get the scan results for a file.
Even if you do not have a Private Mass API key that you can use, you can still automate VirusTotal Intelligence
searches pretty much in the same way that the searching for files api call works.
:param query: a VirusTotal Intelligence search string in accordance with the file search documentation .
<https://www.virustotal.com/intelligence/help/file-search/>
:param page: the next_page property of the results of a previously issued query to this API. This parameter
should not be provided if it is the very first query to the API, i.e. if we are retrieving the
first page of results.
apikey: the API key associated to a VirusTotal Community account with VirusTotal Intelligence privileges.
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b17d7f40>, <ast.Constant object at 0x7da1b17d6a70>, <ast.Constant object at 0x7da1b17d4c10>], [<ast.Name object at 0x7da1b17d6110>, <ast.Attribute object at 0x7da1b17d6cb0>, <ast.Name object at 0x7da1b17d4c70>]]
<ast.Try object at 0x7da1b17d71c0>
return[tuple[[<ast.Subscript object at 0x7da1b17d4ca0>, <ast.Name object at 0x7da1b17d7700>]]] | keyword[def] identifier[get_hashes_from_search] ( identifier[self] , identifier[query] , identifier[page] = keyword[None] ):
literal[string]
identifier[params] ={ literal[string] : identifier[query] , literal[string] : identifier[self] . identifier[api_key] , literal[string] : identifier[page] }
keyword[try] :
identifier[response] = identifier[requests] . identifier[get] ( identifier[self] . identifier[base] + literal[string] , identifier[params] = identifier[params] , identifier[proxies] = identifier[self] . identifier[proxies] )
keyword[except] identifier[requests] . identifier[RequestException] keyword[as] identifier[e] :
keyword[return] identifier[dict] ( identifier[error] = identifier[e] . identifier[message] )
keyword[return] identifier[response] . identifier[json] ()[ literal[string] ], identifier[response] | def get_hashes_from_search(self, query, page=None):
""" Get the scan results for a file.
Even if you do not have a Private Mass API key that you can use, you can still automate VirusTotal Intelligence
searches pretty much in the same way that the searching for files api call works.
:param query: a VirusTotal Intelligence search string in accordance with the file search documentation .
<https://www.virustotal.com/intelligence/help/file-search/>
:param page: the next_page property of the results of a previously issued query to this API. This parameter
should not be provided if it is the very first query to the API, i.e. if we are retrieving the
first page of results.
apikey: the API key associated to a VirusTotal Community account with VirusTotal Intelligence privileges.
"""
params = {'query': query, 'apikey': self.api_key, 'page': page}
try:
response = requests.get(self.base + 'search/programmatic/', params=params, proxies=self.proxies) # depends on [control=['try'], data=[]]
except requests.RequestException as e:
return dict(error=e.message) # depends on [control=['except'], data=['e']]
return (response.json()['next_page'], response) |
def write(path, content, encoding="UTF-8", append=False, raw=False):
"""Write *content* to file *path*"""
mode = 'wb' if not append else 'ab'
with OPEN_FUNC(path, mode) as _file:
if raw:
import shutil
shutil.copyfileobj(content, _file)
else:
_file.write(content.encode(encoding)) | def function[write, parameter[path, content, encoding, append, raw]]:
constant[Write *content* to file *path*]
variable[mode] assign[=] <ast.IfExp object at 0x7da18f721ea0>
with call[name[OPEN_FUNC], parameter[name[path], name[mode]]] begin[:]
if name[raw] begin[:]
import module[shutil]
call[name[shutil].copyfileobj, parameter[name[content], name[_file]]] | keyword[def] identifier[write] ( identifier[path] , identifier[content] , identifier[encoding] = literal[string] , identifier[append] = keyword[False] , identifier[raw] = keyword[False] ):
literal[string]
identifier[mode] = literal[string] keyword[if] keyword[not] identifier[append] keyword[else] literal[string]
keyword[with] identifier[OPEN_FUNC] ( identifier[path] , identifier[mode] ) keyword[as] identifier[_file] :
keyword[if] identifier[raw] :
keyword[import] identifier[shutil]
identifier[shutil] . identifier[copyfileobj] ( identifier[content] , identifier[_file] )
keyword[else] :
identifier[_file] . identifier[write] ( identifier[content] . identifier[encode] ( identifier[encoding] )) | def write(path, content, encoding='UTF-8', append=False, raw=False):
"""Write *content* to file *path*"""
mode = 'wb' if not append else 'ab'
with OPEN_FUNC(path, mode) as _file:
if raw:
import shutil
shutil.copyfileobj(content, _file) # depends on [control=['if'], data=[]]
else:
_file.write(content.encode(encoding)) # depends on [control=['with'], data=['_file']] |
def colorize_text(self, text):
"""Adds escape sequences to colorize text and make it
beautiful. To colorize text, prefix the text you want to color
with the color (capitalized) wrapped in double angle brackets
(i.e.: <<GREEN>>). End your string with <<NORMAL>>. If you
don't, it will be done for you (assuming you used a color code
in your string."""
# Take note of where the escape sequences are.
rnormal = text.rfind('<<NORMAL')
rany = text.rfind('<<')
# Put in the escape sequences.
for color, code in self.colors.items():
text = text.replace('<<%s>>' % color, code)
# Make sure that the last sequence is a NORMAL sequence.
if rany > -1 and rnormal < rany:
text += self.colors['NORMAL']
return text | def function[colorize_text, parameter[self, text]]:
constant[Adds escape sequences to colorize text and make it
beautiful. To colorize text, prefix the text you want to color
with the color (capitalized) wrapped in double angle brackets
(i.e.: <<GREEN>>). End your string with <<NORMAL>>. If you
don't, it will be done for you (assuming you used a color code
in your string.]
variable[rnormal] assign[=] call[name[text].rfind, parameter[constant[<<NORMAL]]]
variable[rany] assign[=] call[name[text].rfind, parameter[constant[<<]]]
for taget[tuple[[<ast.Name object at 0x7da18c4cf250>, <ast.Name object at 0x7da18c4cde40>]]] in starred[call[name[self].colors.items, parameter[]]] begin[:]
variable[text] assign[=] call[name[text].replace, parameter[binary_operation[constant[<<%s>>] <ast.Mod object at 0x7da2590d6920> name[color]], name[code]]]
if <ast.BoolOp object at 0x7da18c4cd450> begin[:]
<ast.AugAssign object at 0x7da18c4cc640>
return[name[text]] | keyword[def] identifier[colorize_text] ( identifier[self] , identifier[text] ):
literal[string]
identifier[rnormal] = identifier[text] . identifier[rfind] ( literal[string] )
identifier[rany] = identifier[text] . identifier[rfind] ( literal[string] )
keyword[for] identifier[color] , identifier[code] keyword[in] identifier[self] . identifier[colors] . identifier[items] ():
identifier[text] = identifier[text] . identifier[replace] ( literal[string] % identifier[color] , identifier[code] )
keyword[if] identifier[rany] >- literal[int] keyword[and] identifier[rnormal] < identifier[rany] :
identifier[text] += identifier[self] . identifier[colors] [ literal[string] ]
keyword[return] identifier[text] | def colorize_text(self, text):
"""Adds escape sequences to colorize text and make it
beautiful. To colorize text, prefix the text you want to color
with the color (capitalized) wrapped in double angle brackets
(i.e.: <<GREEN>>). End your string with <<NORMAL>>. If you
don't, it will be done for you (assuming you used a color code
in your string."""
# Take note of where the escape sequences are.
rnormal = text.rfind('<<NORMAL')
rany = text.rfind('<<')
# Put in the escape sequences.
for (color, code) in self.colors.items():
text = text.replace('<<%s>>' % color, code) # depends on [control=['for'], data=[]]
# Make sure that the last sequence is a NORMAL sequence.
if rany > -1 and rnormal < rany:
text += self.colors['NORMAL'] # depends on [control=['if'], data=[]]
return text |
def hmac_sha256(secret, message):
"""
获取一个字符串的在密钥 secret 加密下的 sha256 哈希值
:param:
* secret: (string) 哈希算法的密钥
* message: (string) 需要进行哈希的字符串
:return:
* hashed_str: sha256 算法哈希值
"""
hashed_str = hmac.new(secret.encode('utf-8'),
message.encode('utf-8'),
digestmod=hashlib.sha256).hexdigest()
return hashed_str | def function[hmac_sha256, parameter[secret, message]]:
constant[
获取一个字符串的在密钥 secret 加密下的 sha256 哈希值
:param:
* secret: (string) 哈希算法的密钥
* message: (string) 需要进行哈希的字符串
:return:
* hashed_str: sha256 算法哈希值
]
variable[hashed_str] assign[=] call[call[name[hmac].new, parameter[call[name[secret].encode, parameter[constant[utf-8]]], call[name[message].encode, parameter[constant[utf-8]]]]].hexdigest, parameter[]]
return[name[hashed_str]] | keyword[def] identifier[hmac_sha256] ( identifier[secret] , identifier[message] ):
literal[string]
identifier[hashed_str] = identifier[hmac] . identifier[new] ( identifier[secret] . identifier[encode] ( literal[string] ),
identifier[message] . identifier[encode] ( literal[string] ),
identifier[digestmod] = identifier[hashlib] . identifier[sha256] ). identifier[hexdigest] ()
keyword[return] identifier[hashed_str] | def hmac_sha256(secret, message):
"""
获取一个字符串的在密钥 secret 加密下的 sha256 哈希值
:param:
* secret: (string) 哈希算法的密钥
* message: (string) 需要进行哈希的字符串
:return:
* hashed_str: sha256 算法哈希值
"""
hashed_str = hmac.new(secret.encode('utf-8'), message.encode('utf-8'), digestmod=hashlib.sha256).hexdigest()
return hashed_str |
def dict_of(validate_key, validate_item):
"""Returns a validator function that succeeds only if the input is a dict, and each key and value in the dict passes
as input to the provided validators validate_key and validate_item, respectively.
:param callable validate_key: the validator function for keys in the dict
:param callable validate_item: the validator function for values in the list
:returns: a function which returns True its input is an dict of valid items, and raises TypeError otherwise
:rtype: callable
"""
def validate(value, should_raise=True):
validate_type = is_type(dict)
if not validate_type(value, should_raise=should_raise):
return False
for key, item in value.items():
try:
validate_key(key)
except TypeError as e:
if should_raise:
samtranslator.model.exceptions.prepend(e, "dict contained an invalid key")
raise
return False
try:
validate_item(item)
except TypeError as e:
if should_raise:
samtranslator.model.exceptions.prepend(e, "dict contained an invalid value")
raise
return False
return True
return validate | def function[dict_of, parameter[validate_key, validate_item]]:
constant[Returns a validator function that succeeds only if the input is a dict, and each key and value in the dict passes
as input to the provided validators validate_key and validate_item, respectively.
:param callable validate_key: the validator function for keys in the dict
:param callable validate_item: the validator function for values in the list
:returns: a function which returns True its input is an dict of valid items, and raises TypeError otherwise
:rtype: callable
]
def function[validate, parameter[value, should_raise]]:
variable[validate_type] assign[=] call[name[is_type], parameter[name[dict]]]
if <ast.UnaryOp object at 0x7da20c7ca3e0> begin[:]
return[constant[False]]
for taget[tuple[[<ast.Name object at 0x7da20e9576a0>, <ast.Name object at 0x7da20e956350>]]] in starred[call[name[value].items, parameter[]]] begin[:]
<ast.Try object at 0x7da20e957310>
<ast.Try object at 0x7da20e9540d0>
return[constant[True]]
return[name[validate]] | keyword[def] identifier[dict_of] ( identifier[validate_key] , identifier[validate_item] ):
literal[string]
keyword[def] identifier[validate] ( identifier[value] , identifier[should_raise] = keyword[True] ):
identifier[validate_type] = identifier[is_type] ( identifier[dict] )
keyword[if] keyword[not] identifier[validate_type] ( identifier[value] , identifier[should_raise] = identifier[should_raise] ):
keyword[return] keyword[False]
keyword[for] identifier[key] , identifier[item] keyword[in] identifier[value] . identifier[items] ():
keyword[try] :
identifier[validate_key] ( identifier[key] )
keyword[except] identifier[TypeError] keyword[as] identifier[e] :
keyword[if] identifier[should_raise] :
identifier[samtranslator] . identifier[model] . identifier[exceptions] . identifier[prepend] ( identifier[e] , literal[string] )
keyword[raise]
keyword[return] keyword[False]
keyword[try] :
identifier[validate_item] ( identifier[item] )
keyword[except] identifier[TypeError] keyword[as] identifier[e] :
keyword[if] identifier[should_raise] :
identifier[samtranslator] . identifier[model] . identifier[exceptions] . identifier[prepend] ( identifier[e] , literal[string] )
keyword[raise]
keyword[return] keyword[False]
keyword[return] keyword[True]
keyword[return] identifier[validate] | def dict_of(validate_key, validate_item):
"""Returns a validator function that succeeds only if the input is a dict, and each key and value in the dict passes
as input to the provided validators validate_key and validate_item, respectively.
:param callable validate_key: the validator function for keys in the dict
:param callable validate_item: the validator function for values in the list
:returns: a function which returns True its input is an dict of valid items, and raises TypeError otherwise
:rtype: callable
"""
def validate(value, should_raise=True):
validate_type = is_type(dict)
if not validate_type(value, should_raise=should_raise):
return False # depends on [control=['if'], data=[]]
for (key, item) in value.items():
try:
validate_key(key) # depends on [control=['try'], data=[]]
except TypeError as e:
if should_raise:
samtranslator.model.exceptions.prepend(e, 'dict contained an invalid key')
raise # depends on [control=['if'], data=[]]
return False # depends on [control=['except'], data=['e']]
try:
validate_item(item) # depends on [control=['try'], data=[]]
except TypeError as e:
if should_raise:
samtranslator.model.exceptions.prepend(e, 'dict contained an invalid value')
raise # depends on [control=['if'], data=[]]
return False # depends on [control=['except'], data=['e']] # depends on [control=['for'], data=[]]
return True
return validate |
def add_granule(self, data, store, workspace=None):
'''Harvest/add a granule into an existing imagemosaic'''
ext = os.path.splitext(data)[-1]
if ext == ".zip":
type = "file.imagemosaic"
upload_data = open(data, 'rb')
headers = {
"Content-type": "application/zip",
"Accept": "application/xml"
}
else:
type = "external.imagemosaic"
upload_data = data if data.startswith("file:") else "file:{data}".format(data=data)
headers = {
"Content-type": "text/plain",
"Accept": "application/xml"
}
params = dict()
workspace_name = workspace
if isinstance(store, basestring):
store_name = store
else:
store_name = store.name
workspace_name = store.workspace.name
if workspace_name is None:
raise ValueError("Must specify workspace")
url = build_url(
self.service_url,
[
"workspaces",
workspace_name,
"coveragestores",
store_name,
type
],
params
)
try:
resp = self.http_request(url, method='post', data=upload_data, headers=headers)
if resp.status_code != 202:
FailedRequestError('Failed to add granule to mosaic {} : {}, {}'.format(store, resp.status_code, resp.text))
self._cache.clear()
finally:
if hasattr(upload_data, "close"):
upload_data.close()
# maybe return a list of all granules?
return None | def function[add_granule, parameter[self, data, store, workspace]]:
constant[Harvest/add a granule into an existing imagemosaic]
variable[ext] assign[=] call[call[name[os].path.splitext, parameter[name[data]]]][<ast.UnaryOp object at 0x7da1b0123f10>]
if compare[name[ext] equal[==] constant[.zip]] begin[:]
variable[type] assign[=] constant[file.imagemosaic]
variable[upload_data] assign[=] call[name[open], parameter[name[data], constant[rb]]]
variable[headers] assign[=] dictionary[[<ast.Constant object at 0x7da1b0121a50>, <ast.Constant object at 0x7da1b01232e0>], [<ast.Constant object at 0x7da1b0122200>, <ast.Constant object at 0x7da1b01225f0>]]
variable[params] assign[=] call[name[dict], parameter[]]
variable[workspace_name] assign[=] name[workspace]
if call[name[isinstance], parameter[name[store], name[basestring]]] begin[:]
variable[store_name] assign[=] name[store]
if compare[name[workspace_name] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b0120400>
variable[url] assign[=] call[name[build_url], parameter[name[self].service_url, list[[<ast.Constant object at 0x7da1b0121b70>, <ast.Name object at 0x7da1b01226e0>, <ast.Constant object at 0x7da1b0121f00>, <ast.Name object at 0x7da1b0123fd0>, <ast.Name object at 0x7da1b0123550>]], name[params]]]
<ast.Try object at 0x7da1b0123280>
return[constant[None]] | keyword[def] identifier[add_granule] ( identifier[self] , identifier[data] , identifier[store] , identifier[workspace] = keyword[None] ):
literal[string]
identifier[ext] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[data] )[- literal[int] ]
keyword[if] identifier[ext] == literal[string] :
identifier[type] = literal[string]
identifier[upload_data] = identifier[open] ( identifier[data] , literal[string] )
identifier[headers] ={
literal[string] : literal[string] ,
literal[string] : literal[string]
}
keyword[else] :
identifier[type] = literal[string]
identifier[upload_data] = identifier[data] keyword[if] identifier[data] . identifier[startswith] ( literal[string] ) keyword[else] literal[string] . identifier[format] ( identifier[data] = identifier[data] )
identifier[headers] ={
literal[string] : literal[string] ,
literal[string] : literal[string]
}
identifier[params] = identifier[dict] ()
identifier[workspace_name] = identifier[workspace]
keyword[if] identifier[isinstance] ( identifier[store] , identifier[basestring] ):
identifier[store_name] = identifier[store]
keyword[else] :
identifier[store_name] = identifier[store] . identifier[name]
identifier[workspace_name] = identifier[store] . identifier[workspace] . identifier[name]
keyword[if] identifier[workspace_name] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[url] = identifier[build_url] (
identifier[self] . identifier[service_url] ,
[
literal[string] ,
identifier[workspace_name] ,
literal[string] ,
identifier[store_name] ,
identifier[type]
],
identifier[params]
)
keyword[try] :
identifier[resp] = identifier[self] . identifier[http_request] ( identifier[url] , identifier[method] = literal[string] , identifier[data] = identifier[upload_data] , identifier[headers] = identifier[headers] )
keyword[if] identifier[resp] . identifier[status_code] != literal[int] :
identifier[FailedRequestError] ( literal[string] . identifier[format] ( identifier[store] , identifier[resp] . identifier[status_code] , identifier[resp] . identifier[text] ))
identifier[self] . identifier[_cache] . identifier[clear] ()
keyword[finally] :
keyword[if] identifier[hasattr] ( identifier[upload_data] , literal[string] ):
identifier[upload_data] . identifier[close] ()
keyword[return] keyword[None] | def add_granule(self, data, store, workspace=None):
"""Harvest/add a granule into an existing imagemosaic"""
ext = os.path.splitext(data)[-1]
if ext == '.zip':
type = 'file.imagemosaic'
upload_data = open(data, 'rb')
headers = {'Content-type': 'application/zip', 'Accept': 'application/xml'} # depends on [control=['if'], data=[]]
else:
type = 'external.imagemosaic'
upload_data = data if data.startswith('file:') else 'file:{data}'.format(data=data)
headers = {'Content-type': 'text/plain', 'Accept': 'application/xml'}
params = dict()
workspace_name = workspace
if isinstance(store, basestring):
store_name = store # depends on [control=['if'], data=[]]
else:
store_name = store.name
workspace_name = store.workspace.name
if workspace_name is None:
raise ValueError('Must specify workspace') # depends on [control=['if'], data=[]]
url = build_url(self.service_url, ['workspaces', workspace_name, 'coveragestores', store_name, type], params)
try:
resp = self.http_request(url, method='post', data=upload_data, headers=headers)
if resp.status_code != 202:
FailedRequestError('Failed to add granule to mosaic {} : {}, {}'.format(store, resp.status_code, resp.text)) # depends on [control=['if'], data=[]]
self._cache.clear() # depends on [control=['try'], data=[]]
finally:
if hasattr(upload_data, 'close'):
upload_data.close() # depends on [control=['if'], data=[]]
# maybe return a list of all granules?
return None |
def _wait_for_finishing(self):
"""Observe running state machine and stop engine if execution has finished"""
self.state_machine_running = True
self.__running_state_machine.join()
self.__set_execution_mode_to_finished()
self.state_machine_manager.active_state_machine_id = None
plugins.run_on_state_machine_execution_finished()
# self.__set_execution_mode_to_stopped()
self.state_machine_running = False | def function[_wait_for_finishing, parameter[self]]:
constant[Observe running state machine and stop engine if execution has finished]
name[self].state_machine_running assign[=] constant[True]
call[name[self].__running_state_machine.join, parameter[]]
call[name[self].__set_execution_mode_to_finished, parameter[]]
name[self].state_machine_manager.active_state_machine_id assign[=] constant[None]
call[name[plugins].run_on_state_machine_execution_finished, parameter[]]
name[self].state_machine_running assign[=] constant[False] | keyword[def] identifier[_wait_for_finishing] ( identifier[self] ):
literal[string]
identifier[self] . identifier[state_machine_running] = keyword[True]
identifier[self] . identifier[__running_state_machine] . identifier[join] ()
identifier[self] . identifier[__set_execution_mode_to_finished] ()
identifier[self] . identifier[state_machine_manager] . identifier[active_state_machine_id] = keyword[None]
identifier[plugins] . identifier[run_on_state_machine_execution_finished] ()
identifier[self] . identifier[state_machine_running] = keyword[False] | def _wait_for_finishing(self):
"""Observe running state machine and stop engine if execution has finished"""
self.state_machine_running = True
self.__running_state_machine.join()
self.__set_execution_mode_to_finished()
self.state_machine_manager.active_state_machine_id = None
plugins.run_on_state_machine_execution_finished()
# self.__set_execution_mode_to_stopped()
self.state_machine_running = False |
def read_chunk(self, chunk):
r"""
Works like :meth:`read`\ , but data is stored in the writable
buffer ``chunk`` rather than returned. Reads at most a number of
bytes equal to the size of ``chunk``\ .
:type chunk: buffer
:param chunk: a writable object that supports the buffer protocol
:rtype: int
:return: the number of bytes read
"""
_complain_ifclosed(self.closed)
return self.f.readinto(chunk) | def function[read_chunk, parameter[self, chunk]]:
constant[
Works like :meth:`read`\ , but data is stored in the writable
buffer ``chunk`` rather than returned. Reads at most a number of
bytes equal to the size of ``chunk``\ .
:type chunk: buffer
:param chunk: a writable object that supports the buffer protocol
:rtype: int
:return: the number of bytes read
]
call[name[_complain_ifclosed], parameter[name[self].closed]]
return[call[name[self].f.readinto, parameter[name[chunk]]]] | keyword[def] identifier[read_chunk] ( identifier[self] , identifier[chunk] ):
literal[string]
identifier[_complain_ifclosed] ( identifier[self] . identifier[closed] )
keyword[return] identifier[self] . identifier[f] . identifier[readinto] ( identifier[chunk] ) | def read_chunk(self, chunk):
"""
Works like :meth:`read`\\ , but data is stored in the writable
buffer ``chunk`` rather than returned. Reads at most a number of
bytes equal to the size of ``chunk``\\ .
:type chunk: buffer
:param chunk: a writable object that supports the buffer protocol
:rtype: int
:return: the number of bytes read
"""
_complain_ifclosed(self.closed)
return self.f.readinto(chunk) |
def compute_distances_dict(egg):
""" Creates a nested dict of distances """
pres, rec, features, dist_funcs = parse_egg(egg)
pres_list = list(pres)
features_list = list(features)
# initialize dist dict
distances = {}
# for each word in the list
for idx1, item1 in enumerate(pres_list):
distances[item1]={}
# for each word in the list
for idx2, item2 in enumerate(pres_list):
distances[item1][item2]={}
# for each feature in dist_funcs
for feature in dist_funcs:
distances[item1][item2][feature] = builtin_dist_funcs[dist_funcs[feature]](features_list[idx1][feature],features_list[idx2][feature])
return distances | def function[compute_distances_dict, parameter[egg]]:
constant[ Creates a nested dict of distances ]
<ast.Tuple object at 0x7da204344880> assign[=] call[name[parse_egg], parameter[name[egg]]]
variable[pres_list] assign[=] call[name[list], parameter[name[pres]]]
variable[features_list] assign[=] call[name[list], parameter[name[features]]]
variable[distances] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da204346ce0>, <ast.Name object at 0x7da204346140>]]] in starred[call[name[enumerate], parameter[name[pres_list]]]] begin[:]
call[name[distances]][name[item1]] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da204346da0>, <ast.Name object at 0x7da204346050>]]] in starred[call[name[enumerate], parameter[name[pres_list]]]] begin[:]
call[call[name[distances]][name[item1]]][name[item2]] assign[=] dictionary[[], []]
for taget[name[feature]] in starred[name[dist_funcs]] begin[:]
call[call[call[name[distances]][name[item1]]][name[item2]]][name[feature]] assign[=] call[call[name[builtin_dist_funcs]][call[name[dist_funcs]][name[feature]]], parameter[call[call[name[features_list]][name[idx1]]][name[feature]], call[call[name[features_list]][name[idx2]]][name[feature]]]]
return[name[distances]] | keyword[def] identifier[compute_distances_dict] ( identifier[egg] ):
literal[string]
identifier[pres] , identifier[rec] , identifier[features] , identifier[dist_funcs] = identifier[parse_egg] ( identifier[egg] )
identifier[pres_list] = identifier[list] ( identifier[pres] )
identifier[features_list] = identifier[list] ( identifier[features] )
identifier[distances] ={}
keyword[for] identifier[idx1] , identifier[item1] keyword[in] identifier[enumerate] ( identifier[pres_list] ):
identifier[distances] [ identifier[item1] ]={}
keyword[for] identifier[idx2] , identifier[item2] keyword[in] identifier[enumerate] ( identifier[pres_list] ):
identifier[distances] [ identifier[item1] ][ identifier[item2] ]={}
keyword[for] identifier[feature] keyword[in] identifier[dist_funcs] :
identifier[distances] [ identifier[item1] ][ identifier[item2] ][ identifier[feature] ]= identifier[builtin_dist_funcs] [ identifier[dist_funcs] [ identifier[feature] ]]( identifier[features_list] [ identifier[idx1] ][ identifier[feature] ], identifier[features_list] [ identifier[idx2] ][ identifier[feature] ])
keyword[return] identifier[distances] | def compute_distances_dict(egg):
""" Creates a nested dict of distances """
(pres, rec, features, dist_funcs) = parse_egg(egg)
pres_list = list(pres)
features_list = list(features)
# initialize dist dict
distances = {}
# for each word in the list
for (idx1, item1) in enumerate(pres_list):
distances[item1] = {}
# for each word in the list
for (idx2, item2) in enumerate(pres_list):
distances[item1][item2] = {}
# for each feature in dist_funcs
for feature in dist_funcs:
distances[item1][item2][feature] = builtin_dist_funcs[dist_funcs[feature]](features_list[idx1][feature], features_list[idx2][feature]) # depends on [control=['for'], data=['feature']] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
return distances |
def add_item_metadata(self, handle, key, value):
"""Store the given key:value pair for the item associated with handle.
:param handle: handle for accessing an item before the dataset is
frozen
:param key: metadata key
:param value: metadata value
"""
identifier = generate_identifier(handle)
metadata_blob_suffix = "{}.{}.json".format(identifier, key)
metadata_blob_name = self.fragments_key_prefix + metadata_blob_suffix
self._blobservice.create_blob_from_text(
self.uuid,
metadata_blob_name,
json.dumps(value)
)
self._blobservice.set_blob_metadata(
container_name=self.uuid,
blob_name=metadata_blob_name,
metadata={
"type": "item_metadata"
}
) | def function[add_item_metadata, parameter[self, handle, key, value]]:
constant[Store the given key:value pair for the item associated with handle.
:param handle: handle for accessing an item before the dataset is
frozen
:param key: metadata key
:param value: metadata value
]
variable[identifier] assign[=] call[name[generate_identifier], parameter[name[handle]]]
variable[metadata_blob_suffix] assign[=] call[constant[{}.{}.json].format, parameter[name[identifier], name[key]]]
variable[metadata_blob_name] assign[=] binary_operation[name[self].fragments_key_prefix + name[metadata_blob_suffix]]
call[name[self]._blobservice.create_blob_from_text, parameter[name[self].uuid, name[metadata_blob_name], call[name[json].dumps, parameter[name[value]]]]]
call[name[self]._blobservice.set_blob_metadata, parameter[]] | keyword[def] identifier[add_item_metadata] ( identifier[self] , identifier[handle] , identifier[key] , identifier[value] ):
literal[string]
identifier[identifier] = identifier[generate_identifier] ( identifier[handle] )
identifier[metadata_blob_suffix] = literal[string] . identifier[format] ( identifier[identifier] , identifier[key] )
identifier[metadata_blob_name] = identifier[self] . identifier[fragments_key_prefix] + identifier[metadata_blob_suffix]
identifier[self] . identifier[_blobservice] . identifier[create_blob_from_text] (
identifier[self] . identifier[uuid] ,
identifier[metadata_blob_name] ,
identifier[json] . identifier[dumps] ( identifier[value] )
)
identifier[self] . identifier[_blobservice] . identifier[set_blob_metadata] (
identifier[container_name] = identifier[self] . identifier[uuid] ,
identifier[blob_name] = identifier[metadata_blob_name] ,
identifier[metadata] ={
literal[string] : literal[string]
}
) | def add_item_metadata(self, handle, key, value):
"""Store the given key:value pair for the item associated with handle.
:param handle: handle for accessing an item before the dataset is
frozen
:param key: metadata key
:param value: metadata value
"""
identifier = generate_identifier(handle)
metadata_blob_suffix = '{}.{}.json'.format(identifier, key)
metadata_blob_name = self.fragments_key_prefix + metadata_blob_suffix
self._blobservice.create_blob_from_text(self.uuid, metadata_blob_name, json.dumps(value))
self._blobservice.set_blob_metadata(container_name=self.uuid, blob_name=metadata_blob_name, metadata={'type': 'item_metadata'}) |
def update_primary(hdu_in, hdu=None):
""" 'Update' a primary HDU
This checks hdu exists and creates it from hdu_in if it does not.
If hdu does exist, this adds the data in hdu_in to hdu
"""
if hdu is None:
hdu = fits.PrimaryHDU(data=hdu_in.data, header=hdu_in.header)
else:
hdu.data += hdu_in.data
return hdu | def function[update_primary, parameter[hdu_in, hdu]]:
constant[ 'Update' a primary HDU
This checks hdu exists and creates it from hdu_in if it does not.
If hdu does exist, this adds the data in hdu_in to hdu
]
if compare[name[hdu] is constant[None]] begin[:]
variable[hdu] assign[=] call[name[fits].PrimaryHDU, parameter[]]
return[name[hdu]] | keyword[def] identifier[update_primary] ( identifier[hdu_in] , identifier[hdu] = keyword[None] ):
literal[string]
keyword[if] identifier[hdu] keyword[is] keyword[None] :
identifier[hdu] = identifier[fits] . identifier[PrimaryHDU] ( identifier[data] = identifier[hdu_in] . identifier[data] , identifier[header] = identifier[hdu_in] . identifier[header] )
keyword[else] :
identifier[hdu] . identifier[data] += identifier[hdu_in] . identifier[data]
keyword[return] identifier[hdu] | def update_primary(hdu_in, hdu=None):
""" 'Update' a primary HDU
This checks hdu exists and creates it from hdu_in if it does not.
If hdu does exist, this adds the data in hdu_in to hdu
"""
if hdu is None:
hdu = fits.PrimaryHDU(data=hdu_in.data, header=hdu_in.header) # depends on [control=['if'], data=['hdu']]
else:
hdu.data += hdu_in.data
return hdu |
def ReadMostRecentClientGraphSeries(self, client_label,
report_type
):
"""See db.Database."""
series_with_timestamps = self.ReadAllClientGraphSeries(
client_label, report_type)
if not series_with_timestamps:
return None
_, latest_series = list(sorted(iteritems(series_with_timestamps)))[-1]
return latest_series | def function[ReadMostRecentClientGraphSeries, parameter[self, client_label, report_type]]:
constant[See db.Database.]
variable[series_with_timestamps] assign[=] call[name[self].ReadAllClientGraphSeries, parameter[name[client_label], name[report_type]]]
if <ast.UnaryOp object at 0x7da1b1b6e4a0> begin[:]
return[constant[None]]
<ast.Tuple object at 0x7da1b1b6f6d0> assign[=] call[call[name[list], parameter[call[name[sorted], parameter[call[name[iteritems], parameter[name[series_with_timestamps]]]]]]]][<ast.UnaryOp object at 0x7da1b1b6f2e0>]
return[name[latest_series]] | keyword[def] identifier[ReadMostRecentClientGraphSeries] ( identifier[self] , identifier[client_label] ,
identifier[report_type]
):
literal[string]
identifier[series_with_timestamps] = identifier[self] . identifier[ReadAllClientGraphSeries] (
identifier[client_label] , identifier[report_type] )
keyword[if] keyword[not] identifier[series_with_timestamps] :
keyword[return] keyword[None]
identifier[_] , identifier[latest_series] = identifier[list] ( identifier[sorted] ( identifier[iteritems] ( identifier[series_with_timestamps] )))[- literal[int] ]
keyword[return] identifier[latest_series] | def ReadMostRecentClientGraphSeries(self, client_label, report_type):
"""See db.Database."""
series_with_timestamps = self.ReadAllClientGraphSeries(client_label, report_type)
if not series_with_timestamps:
return None # depends on [control=['if'], data=[]]
(_, latest_series) = list(sorted(iteritems(series_with_timestamps)))[-1]
return latest_series |
def random_orthonormal(normal):
"""Return a random normalized vector orthogonal to the given vector"""
u = normal_fns[np.argmin(np.fabs(normal))](normal)
u /= np.linalg.norm(u)
v = np.cross(normal, u)
v /= np.linalg.norm(v)
alpha = np.random.uniform(0.0, np.pi*2)
return np.cos(alpha)*u + np.sin(alpha)*v | def function[random_orthonormal, parameter[normal]]:
constant[Return a random normalized vector orthogonal to the given vector]
variable[u] assign[=] call[call[name[normal_fns]][call[name[np].argmin, parameter[call[name[np].fabs, parameter[name[normal]]]]]], parameter[name[normal]]]
<ast.AugAssign object at 0x7da207f03910>
variable[v] assign[=] call[name[np].cross, parameter[name[normal], name[u]]]
<ast.AugAssign object at 0x7da207f01d20>
variable[alpha] assign[=] call[name[np].random.uniform, parameter[constant[0.0], binary_operation[name[np].pi * constant[2]]]]
return[binary_operation[binary_operation[call[name[np].cos, parameter[name[alpha]]] * name[u]] + binary_operation[call[name[np].sin, parameter[name[alpha]]] * name[v]]]] | keyword[def] identifier[random_orthonormal] ( identifier[normal] ):
literal[string]
identifier[u] = identifier[normal_fns] [ identifier[np] . identifier[argmin] ( identifier[np] . identifier[fabs] ( identifier[normal] ))]( identifier[normal] )
identifier[u] /= identifier[np] . identifier[linalg] . identifier[norm] ( identifier[u] )
identifier[v] = identifier[np] . identifier[cross] ( identifier[normal] , identifier[u] )
identifier[v] /= identifier[np] . identifier[linalg] . identifier[norm] ( identifier[v] )
identifier[alpha] = identifier[np] . identifier[random] . identifier[uniform] ( literal[int] , identifier[np] . identifier[pi] * literal[int] )
keyword[return] identifier[np] . identifier[cos] ( identifier[alpha] )* identifier[u] + identifier[np] . identifier[sin] ( identifier[alpha] )* identifier[v] | def random_orthonormal(normal):
"""Return a random normalized vector orthogonal to the given vector"""
u = normal_fns[np.argmin(np.fabs(normal))](normal)
u /= np.linalg.norm(u)
v = np.cross(normal, u)
v /= np.linalg.norm(v)
alpha = np.random.uniform(0.0, np.pi * 2)
return np.cos(alpha) * u + np.sin(alpha) * v |
def anonymous_required(func=None, url=None):
"""Required that the user is not logged in."""
url = url or "/"
def _dec(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if request.user.is_authenticated():
return redirect(url)
else:
return view_func(request, *args, **kwargs)
return _wrapped_view
if func is None:
return _dec
else:
return _dec(func) | def function[anonymous_required, parameter[func, url]]:
constant[Required that the user is not logged in.]
variable[url] assign[=] <ast.BoolOp object at 0x7da2041d9de0>
def function[_dec, parameter[view_func]]:
def function[_wrapped_view, parameter[request]]:
if call[name[request].user.is_authenticated, parameter[]] begin[:]
return[call[name[redirect], parameter[name[url]]]]
return[name[_wrapped_view]]
if compare[name[func] is constant[None]] begin[:]
return[name[_dec]] | keyword[def] identifier[anonymous_required] ( identifier[func] = keyword[None] , identifier[url] = keyword[None] ):
literal[string]
identifier[url] = identifier[url] keyword[or] literal[string]
keyword[def] identifier[_dec] ( identifier[view_func] ):
@ identifier[wraps] ( identifier[view_func] , identifier[assigned] = identifier[available_attrs] ( identifier[view_func] ))
keyword[def] identifier[_wrapped_view] ( identifier[request] ,* identifier[args] ,** identifier[kwargs] ):
keyword[if] identifier[request] . identifier[user] . identifier[is_authenticated] ():
keyword[return] identifier[redirect] ( identifier[url] )
keyword[else] :
keyword[return] identifier[view_func] ( identifier[request] ,* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[_wrapped_view]
keyword[if] identifier[func] keyword[is] keyword[None] :
keyword[return] identifier[_dec]
keyword[else] :
keyword[return] identifier[_dec] ( identifier[func] ) | def anonymous_required(func=None, url=None):
"""Required that the user is not logged in."""
url = url or '/'
def _dec(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if request.user.is_authenticated():
return redirect(url) # depends on [control=['if'], data=[]]
else:
return view_func(request, *args, **kwargs)
return _wrapped_view
if func is None:
return _dec # depends on [control=['if'], data=[]]
else:
return _dec(func) |
def process_credentials_elements(cred_tree):
""" Receive an XML object with the credentials to run
a scan against a given target.
@param:
<credentials>
<credential type="up" service="ssh" port="22">
<username>scanuser</username>
<password>mypass</password>
</credential>
<credential type="up" service="smb">
<username>smbuser</username>
<password>mypass</password>
</credential>
</credentials>
@return: Dictionary containing the credentials for a given target.
Example form:
{'ssh': {'type': type,
'port': port,
'username': username,
'password': pass,
},
'smb': {'type': type,
'username': username,
'password': pass,
},
}
"""
credentials = {}
for credential in cred_tree:
service = credential.attrib.get('service')
credentials[service] = {}
credentials[service]['type'] = credential.attrib.get('type')
if service == 'ssh':
credentials[service]['port'] = credential.attrib.get('port')
for param in credential:
credentials[service][param.tag] = param.text
return credentials | def function[process_credentials_elements, parameter[cred_tree]]:
constant[ Receive an XML object with the credentials to run
a scan against a given target.
@param:
<credentials>
<credential type="up" service="ssh" port="22">
<username>scanuser</username>
<password>mypass</password>
</credential>
<credential type="up" service="smb">
<username>smbuser</username>
<password>mypass</password>
</credential>
</credentials>
@return: Dictionary containing the credentials for a given target.
Example form:
{'ssh': {'type': type,
'port': port,
'username': username,
'password': pass,
},
'smb': {'type': type,
'username': username,
'password': pass,
},
}
]
variable[credentials] assign[=] dictionary[[], []]
for taget[name[credential]] in starred[name[cred_tree]] begin[:]
variable[service] assign[=] call[name[credential].attrib.get, parameter[constant[service]]]
call[name[credentials]][name[service]] assign[=] dictionary[[], []]
call[call[name[credentials]][name[service]]][constant[type]] assign[=] call[name[credential].attrib.get, parameter[constant[type]]]
if compare[name[service] equal[==] constant[ssh]] begin[:]
call[call[name[credentials]][name[service]]][constant[port]] assign[=] call[name[credential].attrib.get, parameter[constant[port]]]
for taget[name[param]] in starred[name[credential]] begin[:]
call[call[name[credentials]][name[service]]][name[param].tag] assign[=] name[param].text
return[name[credentials]] | keyword[def] identifier[process_credentials_elements] ( identifier[cred_tree] ):
literal[string]
identifier[credentials] ={}
keyword[for] identifier[credential] keyword[in] identifier[cred_tree] :
identifier[service] = identifier[credential] . identifier[attrib] . identifier[get] ( literal[string] )
identifier[credentials] [ identifier[service] ]={}
identifier[credentials] [ identifier[service] ][ literal[string] ]= identifier[credential] . identifier[attrib] . identifier[get] ( literal[string] )
keyword[if] identifier[service] == literal[string] :
identifier[credentials] [ identifier[service] ][ literal[string] ]= identifier[credential] . identifier[attrib] . identifier[get] ( literal[string] )
keyword[for] identifier[param] keyword[in] identifier[credential] :
identifier[credentials] [ identifier[service] ][ identifier[param] . identifier[tag] ]= identifier[param] . identifier[text]
keyword[return] identifier[credentials] | def process_credentials_elements(cred_tree):
""" Receive an XML object with the credentials to run
a scan against a given target.
@param:
<credentials>
<credential type="up" service="ssh" port="22">
<username>scanuser</username>
<password>mypass</password>
</credential>
<credential type="up" service="smb">
<username>smbuser</username>
<password>mypass</password>
</credential>
</credentials>
@return: Dictionary containing the credentials for a given target.
Example form:
{'ssh': {'type': type,
'port': port,
'username': username,
'password': pass,
},
'smb': {'type': type,
'username': username,
'password': pass,
},
}
"""
credentials = {}
for credential in cred_tree:
service = credential.attrib.get('service')
credentials[service] = {}
credentials[service]['type'] = credential.attrib.get('type')
if service == 'ssh':
credentials[service]['port'] = credential.attrib.get('port') # depends on [control=['if'], data=['service']]
for param in credential:
credentials[service][param.tag] = param.text # depends on [control=['for'], data=['param']] # depends on [control=['for'], data=['credential']]
return credentials |
def get_reserved_resources(role=None):
""" resource types from state summary include: reserved_resources
:param role: the name of the role if for reserved and if None all reserved
:type role: str
:return: resources(cpu,mem)
:rtype: Resources
"""
rtype = 'reserved_resources'
cpus = 0.0
mem = 0.0
summary = DCOSClient().get_state_summary()
if 'slaves' in summary:
agents = summary.get('slaves')
for agent in agents:
resource_reservations = agent.get(rtype)
reservations = []
if role is None or '*' in role:
reservations = resource_reservations.values()
elif role in resource_reservations:
reservations = [resource_reservations.get(role)]
for reservation in reservations:
if reservation.get('cpus') is not None:
cpus += reservation.get('cpus')
if reservation.get('mem') is not None:
mem += reservation.get('mem')
return Resources(cpus, mem) | def function[get_reserved_resources, parameter[role]]:
constant[ resource types from state summary include: reserved_resources
:param role: the name of the role if for reserved and if None all reserved
:type role: str
:return: resources(cpu,mem)
:rtype: Resources
]
variable[rtype] assign[=] constant[reserved_resources]
variable[cpus] assign[=] constant[0.0]
variable[mem] assign[=] constant[0.0]
variable[summary] assign[=] call[call[name[DCOSClient], parameter[]].get_state_summary, parameter[]]
if compare[constant[slaves] in name[summary]] begin[:]
variable[agents] assign[=] call[name[summary].get, parameter[constant[slaves]]]
for taget[name[agent]] in starred[name[agents]] begin[:]
variable[resource_reservations] assign[=] call[name[agent].get, parameter[name[rtype]]]
variable[reservations] assign[=] list[[]]
if <ast.BoolOp object at 0x7da1b192acb0> begin[:]
variable[reservations] assign[=] call[name[resource_reservations].values, parameter[]]
for taget[name[reservation]] in starred[name[reservations]] begin[:]
if compare[call[name[reservation].get, parameter[constant[cpus]]] is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da1b192b9a0>
if compare[call[name[reservation].get, parameter[constant[mem]]] is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da1b192ba00>
return[call[name[Resources], parameter[name[cpus], name[mem]]]] | keyword[def] identifier[get_reserved_resources] ( identifier[role] = keyword[None] ):
literal[string]
identifier[rtype] = literal[string]
identifier[cpus] = literal[int]
identifier[mem] = literal[int]
identifier[summary] = identifier[DCOSClient] (). identifier[get_state_summary] ()
keyword[if] literal[string] keyword[in] identifier[summary] :
identifier[agents] = identifier[summary] . identifier[get] ( literal[string] )
keyword[for] identifier[agent] keyword[in] identifier[agents] :
identifier[resource_reservations] = identifier[agent] . identifier[get] ( identifier[rtype] )
identifier[reservations] =[]
keyword[if] identifier[role] keyword[is] keyword[None] keyword[or] literal[string] keyword[in] identifier[role] :
identifier[reservations] = identifier[resource_reservations] . identifier[values] ()
keyword[elif] identifier[role] keyword[in] identifier[resource_reservations] :
identifier[reservations] =[ identifier[resource_reservations] . identifier[get] ( identifier[role] )]
keyword[for] identifier[reservation] keyword[in] identifier[reservations] :
keyword[if] identifier[reservation] . identifier[get] ( literal[string] ) keyword[is] keyword[not] keyword[None] :
identifier[cpus] += identifier[reservation] . identifier[get] ( literal[string] )
keyword[if] identifier[reservation] . identifier[get] ( literal[string] ) keyword[is] keyword[not] keyword[None] :
identifier[mem] += identifier[reservation] . identifier[get] ( literal[string] )
keyword[return] identifier[Resources] ( identifier[cpus] , identifier[mem] ) | def get_reserved_resources(role=None):
""" resource types from state summary include: reserved_resources
:param role: the name of the role if for reserved and if None all reserved
:type role: str
:return: resources(cpu,mem)
:rtype: Resources
"""
rtype = 'reserved_resources'
cpus = 0.0
mem = 0.0
summary = DCOSClient().get_state_summary()
if 'slaves' in summary:
agents = summary.get('slaves')
for agent in agents:
resource_reservations = agent.get(rtype)
reservations = []
if role is None or '*' in role:
reservations = resource_reservations.values() # depends on [control=['if'], data=[]]
elif role in resource_reservations:
reservations = [resource_reservations.get(role)] # depends on [control=['if'], data=['role', 'resource_reservations']]
for reservation in reservations:
if reservation.get('cpus') is not None:
cpus += reservation.get('cpus') # depends on [control=['if'], data=[]]
if reservation.get('mem') is not None:
mem += reservation.get('mem') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['reservation']] # depends on [control=['for'], data=['agent']] # depends on [control=['if'], data=['summary']]
return Resources(cpus, mem) |
def homepage_view(request, message=None):
''' The view of the homepage. '''
userProfile = UserProfile.objects.get(user=request.user)
request_types = RequestType.objects.filter(enabled=True)
# List of request types for which the user is a relevant manager
manager_request_types = list()
for request_type in request_types:
for position in request_type.managers.filter(active=True):
if userProfile == position.incumbent:
manager_request_types.append(request_type)
break
# Pseudo-dictionary, list with items of form (request_type, (request,
# [list_of_request_responses], response_form))
requests_dict = list()
# Generate a dict of open requests for each request_type for which the user
# is a relevant manager:
if manager_request_types:
for request_type in manager_request_types:
# Items of form (request, [list_of_request_responses],
# response_form, upvote, vote_form)
requests_list = list()
# Select only open requests of type request_type:
requests = Request.objects.filter(
request_type=request_type, status=Request.OPEN,
)
for req in requests:
response_form = ManagerResponseForm(
request.POST if "add_response-{0}".format(req.pk) in request.POST else None,
initial={'action': Response.NONE},
profile=userProfile,
request=req,
prefix="{}-response".format(req.pk),
)
vote_form = VoteForm(
request.POST if "vote-{0}".format(req.pk) in request.POST else None,
profile=userProfile,
request=req,
prefix="vote",
)
if response_form.is_valid():
response = response_form.save()
if response.request.closed:
messages.add_message(request, messages.SUCCESS,
MESSAGES['REQ_CLOSED'])
if response.request.filled:
messages.add_message(request, messages.SUCCESS,
MESSAGES['REQ_FILLED'])
return HttpResponseRedirect(reverse('homepage'))
if vote_form.is_valid():
vote_form.save()
return HttpResponseRedirect(reverse('homepage'))
response_list = Response.objects.filter(request=req)
upvote = userProfile in req.upvotes.all()
requests_list.append(
(req, response_list, response_form, upvote, vote_form)
)
requests_dict.append((request_type, requests_list))
### Announcements
# Pseudo-dictionary, list with items of form (announcement,
# announcement_unpin_form)
announcements_dict = list()
# Oldest genesis of an unpinned announcement to be displayed.
within_life = now() - timedelta(hours=settings.ANNOUNCEMENT_LIFE)
announcements = \
list(Announcement.objects.filter(pinned=True)) + \
list(Announcement.objects.filter(pinned=False, post_date__gte=within_life))
for a in announcements:
pin_form = None
if request.user.is_superuser or a.manager.incumbent == userProfile:
pin_form = PinForm(
request.POST if "pin-{0}".format(a.pk) in request.POST else None,
instance=a,
prefix="pin",
)
if pin_form.is_valid():
pin_form.save()
return HttpResponseRedirect(reverse('homepage'))
announcements_dict.append((a, pin_form))
if Manager.objects.filter(incumbent=userProfile, active=True).count():
announcement_form = AnnouncementForm(
request.POST if "post_announcement" in request.POST else None,
profile=userProfile,
prefix="announce",
)
else:
announcement_form = None
if announcement_form and announcement_form.is_valid():
announcement_form.save(request)
return HttpResponseRedirect(reverse('homepage'))
### Events
week_from_now = now() + timedelta(days=7)
# Get only next 7 days of events:
events_list = Event.objects.exclude(
start_time__gte=week_from_now
).exclude(
end_time__lte=now(),
)
# Pseudo-dictionary, list with items of form (event, ongoing, rsvpd, rsvp_form)
events_dict = list()
for event in events_list:
ongoing = ((event.start_time <= now()) and (event.end_time >= now()))
rsvpd = (userProfile in event.rsvps.all())
rsvp_form = RsvpForm(
request.POST if "rsvp-{0}".format(event.pk) in request.POST else None,
profile=userProfile,
instance=event,
prefix="rsvp",
)
if rsvp_form.is_valid():
rsvpd = rsvp_form.save()
if not rsvpd:
message = MESSAGES['RSVP_REMOVE'].format(event=event.title)
messages.add_message(request, messages.SUCCESS, message)
else:
message = MESSAGES['RSVP_ADD'].format(event=event.title)
messages.add_message(request, messages.SUCCESS, message)
return HttpResponseRedirect(reverse('homepage'))
events_dict.append((event, ongoing, rsvpd, rsvp_form))
### Threads
thread_form = ThreadForm(
request.POST if "submit_thread_form" in request.POST else None,
profile=userProfile,
prefix="thread",
)
if thread_form.is_valid():
thread_form.save()
return HttpResponseRedirect(reverse('homepage'))
# List of with items of form (thread, most_recent_message_in_thread)
thread_set = []
for thread in Thread.objects.all()[:settings.HOME_MAX_THREADS]:
try:
latest_message = Message.objects.filter(thread=thread).latest('post_date')
except Message.DoesNotExist:
latest_message = None
thread_set.append((thread, latest_message))
return render_to_response('homepage.html', {
'page_name': "Home",
'requests_dict': requests_dict,
'announcements_dict': announcements_dict,
'announcement_form': announcement_form,
'events_dict': events_dict,
'thread_set': thread_set,
'thread_form': thread_form,
}, context_instance=RequestContext(request)) | def function[homepage_view, parameter[request, message]]:
constant[ The view of the homepage. ]
variable[userProfile] assign[=] call[name[UserProfile].objects.get, parameter[]]
variable[request_types] assign[=] call[name[RequestType].objects.filter, parameter[]]
variable[manager_request_types] assign[=] call[name[list], parameter[]]
for taget[name[request_type]] in starred[name[request_types]] begin[:]
for taget[name[position]] in starred[call[name[request_type].managers.filter, parameter[]]] begin[:]
if compare[name[userProfile] equal[==] name[position].incumbent] begin[:]
call[name[manager_request_types].append, parameter[name[request_type]]]
break
variable[requests_dict] assign[=] call[name[list], parameter[]]
if name[manager_request_types] begin[:]
for taget[name[request_type]] in starred[name[manager_request_types]] begin[:]
variable[requests_list] assign[=] call[name[list], parameter[]]
variable[requests] assign[=] call[name[Request].objects.filter, parameter[]]
for taget[name[req]] in starred[name[requests]] begin[:]
variable[response_form] assign[=] call[name[ManagerResponseForm], parameter[<ast.IfExp object at 0x7da18bcc9780>]]
variable[vote_form] assign[=] call[name[VoteForm], parameter[<ast.IfExp object at 0x7da18bcc8850>]]
if call[name[response_form].is_valid, parameter[]] begin[:]
variable[response] assign[=] call[name[response_form].save, parameter[]]
if name[response].request.closed begin[:]
call[name[messages].add_message, parameter[name[request], name[messages].SUCCESS, call[name[MESSAGES]][constant[REQ_CLOSED]]]]
if name[response].request.filled begin[:]
call[name[messages].add_message, parameter[name[request], name[messages].SUCCESS, call[name[MESSAGES]][constant[REQ_FILLED]]]]
return[call[name[HttpResponseRedirect], parameter[call[name[reverse], parameter[constant[homepage]]]]]]
if call[name[vote_form].is_valid, parameter[]] begin[:]
call[name[vote_form].save, parameter[]]
return[call[name[HttpResponseRedirect], parameter[call[name[reverse], parameter[constant[homepage]]]]]]
variable[response_list] assign[=] call[name[Response].objects.filter, parameter[]]
variable[upvote] assign[=] compare[name[userProfile] in call[name[req].upvotes.all, parameter[]]]
call[name[requests_list].append, parameter[tuple[[<ast.Name object at 0x7da18bccb970>, <ast.Name object at 0x7da18bcca080>, <ast.Name object at 0x7da18bcca980>, <ast.Name object at 0x7da18bccbb50>, <ast.Name object at 0x7da18bcc9c60>]]]]
call[name[requests_dict].append, parameter[tuple[[<ast.Name object at 0x7da18bccac80>, <ast.Name object at 0x7da18bcc91e0>]]]]
variable[announcements_dict] assign[=] call[name[list], parameter[]]
variable[within_life] assign[=] binary_operation[call[name[now], parameter[]] - call[name[timedelta], parameter[]]]
variable[announcements] assign[=] binary_operation[call[name[list], parameter[call[name[Announcement].objects.filter, parameter[]]]] + call[name[list], parameter[call[name[Announcement].objects.filter, parameter[]]]]]
for taget[name[a]] in starred[name[announcements]] begin[:]
variable[pin_form] assign[=] constant[None]
if <ast.BoolOp object at 0x7da18bcc9b10> begin[:]
variable[pin_form] assign[=] call[name[PinForm], parameter[<ast.IfExp object at 0x7da18bcc9a20>]]
if call[name[pin_form].is_valid, parameter[]] begin[:]
call[name[pin_form].save, parameter[]]
return[call[name[HttpResponseRedirect], parameter[call[name[reverse], parameter[constant[homepage]]]]]]
call[name[announcements_dict].append, parameter[tuple[[<ast.Name object at 0x7da18bcca4a0>, <ast.Name object at 0x7da18bccb400>]]]]
if call[call[name[Manager].objects.filter, parameter[]].count, parameter[]] begin[:]
variable[announcement_form] assign[=] call[name[AnnouncementForm], parameter[<ast.IfExp object at 0x7da18ede7d90>]]
if <ast.BoolOp object at 0x7da18ede65f0> begin[:]
call[name[announcement_form].save, parameter[name[request]]]
return[call[name[HttpResponseRedirect], parameter[call[name[reverse], parameter[constant[homepage]]]]]]
variable[week_from_now] assign[=] binary_operation[call[name[now], parameter[]] + call[name[timedelta], parameter[]]]
variable[events_list] assign[=] call[call[name[Event].objects.exclude, parameter[]].exclude, parameter[]]
variable[events_dict] assign[=] call[name[list], parameter[]]
for taget[name[event]] in starred[name[events_list]] begin[:]
variable[ongoing] assign[=] <ast.BoolOp object at 0x7da18ede5150>
variable[rsvpd] assign[=] compare[name[userProfile] in call[name[event].rsvps.all, parameter[]]]
variable[rsvp_form] assign[=] call[name[RsvpForm], parameter[<ast.IfExp object at 0x7da18ede6290>]]
if call[name[rsvp_form].is_valid, parameter[]] begin[:]
variable[rsvpd] assign[=] call[name[rsvp_form].save, parameter[]]
if <ast.UnaryOp object at 0x7da18ede7070> begin[:]
variable[message] assign[=] call[call[name[MESSAGES]][constant[RSVP_REMOVE]].format, parameter[]]
call[name[messages].add_message, parameter[name[request], name[messages].SUCCESS, name[message]]]
return[call[name[HttpResponseRedirect], parameter[call[name[reverse], parameter[constant[homepage]]]]]]
call[name[events_dict].append, parameter[tuple[[<ast.Name object at 0x7da18ede7ac0>, <ast.Name object at 0x7da18ede6680>, <ast.Name object at 0x7da18ede73a0>, <ast.Name object at 0x7da18ede4ac0>]]]]
variable[thread_form] assign[=] call[name[ThreadForm], parameter[<ast.IfExp object at 0x7da18ede6860>]]
if call[name[thread_form].is_valid, parameter[]] begin[:]
call[name[thread_form].save, parameter[]]
return[call[name[HttpResponseRedirect], parameter[call[name[reverse], parameter[constant[homepage]]]]]]
variable[thread_set] assign[=] list[[]]
for taget[name[thread]] in starred[call[call[name[Thread].objects.all, parameter[]]][<ast.Slice object at 0x7da18ede4610>]] begin[:]
<ast.Try object at 0x7da18ede7190>
call[name[thread_set].append, parameter[tuple[[<ast.Name object at 0x7da18ede7460>, <ast.Name object at 0x7da18ede7160>]]]]
return[call[name[render_to_response], parameter[constant[homepage.html], dictionary[[<ast.Constant object at 0x7da18ede5a20>, <ast.Constant object at 0x7da18ede5870>, <ast.Constant object at 0x7da18ede7a90>, <ast.Constant object at 0x7da18ede5960>, <ast.Constant object at 0x7da18ede44f0>, <ast.Constant object at 0x7da18ede5f60>, <ast.Constant object at 0x7da18ede62c0>], [<ast.Constant object at 0x7da18ede4220>, <ast.Name object at 0x7da18ede4f40>, <ast.Name object at 0x7da18ede77c0>, <ast.Name object at 0x7da18ede60b0>, <ast.Name object at 0x7da18ede4c40>, <ast.Name object at 0x7da18ede5cf0>, <ast.Name object at 0x7da18ede7340>]]]]] | keyword[def] identifier[homepage_view] ( identifier[request] , identifier[message] = keyword[None] ):
literal[string]
identifier[userProfile] = identifier[UserProfile] . identifier[objects] . identifier[get] ( identifier[user] = identifier[request] . identifier[user] )
identifier[request_types] = identifier[RequestType] . identifier[objects] . identifier[filter] ( identifier[enabled] = keyword[True] )
identifier[manager_request_types] = identifier[list] ()
keyword[for] identifier[request_type] keyword[in] identifier[request_types] :
keyword[for] identifier[position] keyword[in] identifier[request_type] . identifier[managers] . identifier[filter] ( identifier[active] = keyword[True] ):
keyword[if] identifier[userProfile] == identifier[position] . identifier[incumbent] :
identifier[manager_request_types] . identifier[append] ( identifier[request_type] )
keyword[break]
identifier[requests_dict] = identifier[list] ()
keyword[if] identifier[manager_request_types] :
keyword[for] identifier[request_type] keyword[in] identifier[manager_request_types] :
identifier[requests_list] = identifier[list] ()
identifier[requests] = identifier[Request] . identifier[objects] . identifier[filter] (
identifier[request_type] = identifier[request_type] , identifier[status] = identifier[Request] . identifier[OPEN] ,
)
keyword[for] identifier[req] keyword[in] identifier[requests] :
identifier[response_form] = identifier[ManagerResponseForm] (
identifier[request] . identifier[POST] keyword[if] literal[string] . identifier[format] ( identifier[req] . identifier[pk] ) keyword[in] identifier[request] . identifier[POST] keyword[else] keyword[None] ,
identifier[initial] ={ literal[string] : identifier[Response] . identifier[NONE] },
identifier[profile] = identifier[userProfile] ,
identifier[request] = identifier[req] ,
identifier[prefix] = literal[string] . identifier[format] ( identifier[req] . identifier[pk] ),
)
identifier[vote_form] = identifier[VoteForm] (
identifier[request] . identifier[POST] keyword[if] literal[string] . identifier[format] ( identifier[req] . identifier[pk] ) keyword[in] identifier[request] . identifier[POST] keyword[else] keyword[None] ,
identifier[profile] = identifier[userProfile] ,
identifier[request] = identifier[req] ,
identifier[prefix] = literal[string] ,
)
keyword[if] identifier[response_form] . identifier[is_valid] ():
identifier[response] = identifier[response_form] . identifier[save] ()
keyword[if] identifier[response] . identifier[request] . identifier[closed] :
identifier[messages] . identifier[add_message] ( identifier[request] , identifier[messages] . identifier[SUCCESS] ,
identifier[MESSAGES] [ literal[string] ])
keyword[if] identifier[response] . identifier[request] . identifier[filled] :
identifier[messages] . identifier[add_message] ( identifier[request] , identifier[messages] . identifier[SUCCESS] ,
identifier[MESSAGES] [ literal[string] ])
keyword[return] identifier[HttpResponseRedirect] ( identifier[reverse] ( literal[string] ))
keyword[if] identifier[vote_form] . identifier[is_valid] ():
identifier[vote_form] . identifier[save] ()
keyword[return] identifier[HttpResponseRedirect] ( identifier[reverse] ( literal[string] ))
identifier[response_list] = identifier[Response] . identifier[objects] . identifier[filter] ( identifier[request] = identifier[req] )
identifier[upvote] = identifier[userProfile] keyword[in] identifier[req] . identifier[upvotes] . identifier[all] ()
identifier[requests_list] . identifier[append] (
( identifier[req] , identifier[response_list] , identifier[response_form] , identifier[upvote] , identifier[vote_form] )
)
identifier[requests_dict] . identifier[append] (( identifier[request_type] , identifier[requests_list] ))
identifier[announcements_dict] = identifier[list] ()
identifier[within_life] = identifier[now] ()- identifier[timedelta] ( identifier[hours] = identifier[settings] . identifier[ANNOUNCEMENT_LIFE] )
identifier[announcements] = identifier[list] ( identifier[Announcement] . identifier[objects] . identifier[filter] ( identifier[pinned] = keyword[True] ))+ identifier[list] ( identifier[Announcement] . identifier[objects] . identifier[filter] ( identifier[pinned] = keyword[False] , identifier[post_date__gte] = identifier[within_life] ))
keyword[for] identifier[a] keyword[in] identifier[announcements] :
identifier[pin_form] = keyword[None]
keyword[if] identifier[request] . identifier[user] . identifier[is_superuser] keyword[or] identifier[a] . identifier[manager] . identifier[incumbent] == identifier[userProfile] :
identifier[pin_form] = identifier[PinForm] (
identifier[request] . identifier[POST] keyword[if] literal[string] . identifier[format] ( identifier[a] . identifier[pk] ) keyword[in] identifier[request] . identifier[POST] keyword[else] keyword[None] ,
identifier[instance] = identifier[a] ,
identifier[prefix] = literal[string] ,
)
keyword[if] identifier[pin_form] . identifier[is_valid] ():
identifier[pin_form] . identifier[save] ()
keyword[return] identifier[HttpResponseRedirect] ( identifier[reverse] ( literal[string] ))
identifier[announcements_dict] . identifier[append] (( identifier[a] , identifier[pin_form] ))
keyword[if] identifier[Manager] . identifier[objects] . identifier[filter] ( identifier[incumbent] = identifier[userProfile] , identifier[active] = keyword[True] ). identifier[count] ():
identifier[announcement_form] = identifier[AnnouncementForm] (
identifier[request] . identifier[POST] keyword[if] literal[string] keyword[in] identifier[request] . identifier[POST] keyword[else] keyword[None] ,
identifier[profile] = identifier[userProfile] ,
identifier[prefix] = literal[string] ,
)
keyword[else] :
identifier[announcement_form] = keyword[None]
keyword[if] identifier[announcement_form] keyword[and] identifier[announcement_form] . identifier[is_valid] ():
identifier[announcement_form] . identifier[save] ( identifier[request] )
keyword[return] identifier[HttpResponseRedirect] ( identifier[reverse] ( literal[string] ))
identifier[week_from_now] = identifier[now] ()+ identifier[timedelta] ( identifier[days] = literal[int] )
identifier[events_list] = identifier[Event] . identifier[objects] . identifier[exclude] (
identifier[start_time__gte] = identifier[week_from_now]
). identifier[exclude] (
identifier[end_time__lte] = identifier[now] (),
)
identifier[events_dict] = identifier[list] ()
keyword[for] identifier[event] keyword[in] identifier[events_list] :
identifier[ongoing] =(( identifier[event] . identifier[start_time] <= identifier[now] ()) keyword[and] ( identifier[event] . identifier[end_time] >= identifier[now] ()))
identifier[rsvpd] =( identifier[userProfile] keyword[in] identifier[event] . identifier[rsvps] . identifier[all] ())
identifier[rsvp_form] = identifier[RsvpForm] (
identifier[request] . identifier[POST] keyword[if] literal[string] . identifier[format] ( identifier[event] . identifier[pk] ) keyword[in] identifier[request] . identifier[POST] keyword[else] keyword[None] ,
identifier[profile] = identifier[userProfile] ,
identifier[instance] = identifier[event] ,
identifier[prefix] = literal[string] ,
)
keyword[if] identifier[rsvp_form] . identifier[is_valid] ():
identifier[rsvpd] = identifier[rsvp_form] . identifier[save] ()
keyword[if] keyword[not] identifier[rsvpd] :
identifier[message] = identifier[MESSAGES] [ literal[string] ]. identifier[format] ( identifier[event] = identifier[event] . identifier[title] )
identifier[messages] . identifier[add_message] ( identifier[request] , identifier[messages] . identifier[SUCCESS] , identifier[message] )
keyword[else] :
identifier[message] = identifier[MESSAGES] [ literal[string] ]. identifier[format] ( identifier[event] = identifier[event] . identifier[title] )
identifier[messages] . identifier[add_message] ( identifier[request] , identifier[messages] . identifier[SUCCESS] , identifier[message] )
keyword[return] identifier[HttpResponseRedirect] ( identifier[reverse] ( literal[string] ))
identifier[events_dict] . identifier[append] (( identifier[event] , identifier[ongoing] , identifier[rsvpd] , identifier[rsvp_form] ))
identifier[thread_form] = identifier[ThreadForm] (
identifier[request] . identifier[POST] keyword[if] literal[string] keyword[in] identifier[request] . identifier[POST] keyword[else] keyword[None] ,
identifier[profile] = identifier[userProfile] ,
identifier[prefix] = literal[string] ,
)
keyword[if] identifier[thread_form] . identifier[is_valid] ():
identifier[thread_form] . identifier[save] ()
keyword[return] identifier[HttpResponseRedirect] ( identifier[reverse] ( literal[string] ))
identifier[thread_set] =[]
keyword[for] identifier[thread] keyword[in] identifier[Thread] . identifier[objects] . identifier[all] ()[: identifier[settings] . identifier[HOME_MAX_THREADS] ]:
keyword[try] :
identifier[latest_message] = identifier[Message] . identifier[objects] . identifier[filter] ( identifier[thread] = identifier[thread] ). identifier[latest] ( literal[string] )
keyword[except] identifier[Message] . identifier[DoesNotExist] :
identifier[latest_message] = keyword[None]
identifier[thread_set] . identifier[append] (( identifier[thread] , identifier[latest_message] ))
keyword[return] identifier[render_to_response] ( literal[string] ,{
literal[string] : literal[string] ,
literal[string] : identifier[requests_dict] ,
literal[string] : identifier[announcements_dict] ,
literal[string] : identifier[announcement_form] ,
literal[string] : identifier[events_dict] ,
literal[string] : identifier[thread_set] ,
literal[string] : identifier[thread_form] ,
}, identifier[context_instance] = identifier[RequestContext] ( identifier[request] )) | def homepage_view(request, message=None):
""" The view of the homepage. """
userProfile = UserProfile.objects.get(user=request.user)
request_types = RequestType.objects.filter(enabled=True)
# List of request types for which the user is a relevant manager
manager_request_types = list()
for request_type in request_types:
for position in request_type.managers.filter(active=True):
if userProfile == position.incumbent:
manager_request_types.append(request_type)
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['position']] # depends on [control=['for'], data=['request_type']]
# Pseudo-dictionary, list with items of form (request_type, (request,
# [list_of_request_responses], response_form))
requests_dict = list()
# Generate a dict of open requests for each request_type for which the user
# is a relevant manager:
if manager_request_types:
for request_type in manager_request_types:
# Items of form (request, [list_of_request_responses],
# response_form, upvote, vote_form)
requests_list = list()
# Select only open requests of type request_type:
requests = Request.objects.filter(request_type=request_type, status=Request.OPEN)
for req in requests:
response_form = ManagerResponseForm(request.POST if 'add_response-{0}'.format(req.pk) in request.POST else None, initial={'action': Response.NONE}, profile=userProfile, request=req, prefix='{}-response'.format(req.pk))
vote_form = VoteForm(request.POST if 'vote-{0}'.format(req.pk) in request.POST else None, profile=userProfile, request=req, prefix='vote')
if response_form.is_valid():
response = response_form.save()
if response.request.closed:
messages.add_message(request, messages.SUCCESS, MESSAGES['REQ_CLOSED']) # depends on [control=['if'], data=[]]
if response.request.filled:
messages.add_message(request, messages.SUCCESS, MESSAGES['REQ_FILLED']) # depends on [control=['if'], data=[]]
return HttpResponseRedirect(reverse('homepage')) # depends on [control=['if'], data=[]]
if vote_form.is_valid():
vote_form.save()
return HttpResponseRedirect(reverse('homepage')) # depends on [control=['if'], data=[]]
response_list = Response.objects.filter(request=req)
upvote = userProfile in req.upvotes.all()
requests_list.append((req, response_list, response_form, upvote, vote_form)) # depends on [control=['for'], data=['req']]
requests_dict.append((request_type, requests_list)) # depends on [control=['for'], data=['request_type']] # depends on [control=['if'], data=[]]
### Announcements
# Pseudo-dictionary, list with items of form (announcement,
# announcement_unpin_form)
announcements_dict = list()
# Oldest genesis of an unpinned announcement to be displayed.
within_life = now() - timedelta(hours=settings.ANNOUNCEMENT_LIFE)
announcements = list(Announcement.objects.filter(pinned=True)) + list(Announcement.objects.filter(pinned=False, post_date__gte=within_life))
for a in announcements:
pin_form = None
if request.user.is_superuser or a.manager.incumbent == userProfile:
pin_form = PinForm(request.POST if 'pin-{0}'.format(a.pk) in request.POST else None, instance=a, prefix='pin')
if pin_form.is_valid():
pin_form.save()
return HttpResponseRedirect(reverse('homepage')) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
announcements_dict.append((a, pin_form)) # depends on [control=['for'], data=['a']]
if Manager.objects.filter(incumbent=userProfile, active=True).count():
announcement_form = AnnouncementForm(request.POST if 'post_announcement' in request.POST else None, profile=userProfile, prefix='announce') # depends on [control=['if'], data=[]]
else:
announcement_form = None
if announcement_form and announcement_form.is_valid():
announcement_form.save(request)
return HttpResponseRedirect(reverse('homepage')) # depends on [control=['if'], data=[]]
### Events
week_from_now = now() + timedelta(days=7)
# Get only next 7 days of events:
events_list = Event.objects.exclude(start_time__gte=week_from_now).exclude(end_time__lte=now())
# Pseudo-dictionary, list with items of form (event, ongoing, rsvpd, rsvp_form)
events_dict = list()
for event in events_list:
ongoing = event.start_time <= now() and event.end_time >= now()
rsvpd = userProfile in event.rsvps.all()
rsvp_form = RsvpForm(request.POST if 'rsvp-{0}'.format(event.pk) in request.POST else None, profile=userProfile, instance=event, prefix='rsvp')
if rsvp_form.is_valid():
rsvpd = rsvp_form.save()
if not rsvpd:
message = MESSAGES['RSVP_REMOVE'].format(event=event.title)
messages.add_message(request, messages.SUCCESS, message) # depends on [control=['if'], data=[]]
else:
message = MESSAGES['RSVP_ADD'].format(event=event.title)
messages.add_message(request, messages.SUCCESS, message)
return HttpResponseRedirect(reverse('homepage')) # depends on [control=['if'], data=[]]
events_dict.append((event, ongoing, rsvpd, rsvp_form)) # depends on [control=['for'], data=['event']]
### Threads
thread_form = ThreadForm(request.POST if 'submit_thread_form' in request.POST else None, profile=userProfile, prefix='thread')
if thread_form.is_valid():
thread_form.save()
return HttpResponseRedirect(reverse('homepage')) # depends on [control=['if'], data=[]]
# List of with items of form (thread, most_recent_message_in_thread)
thread_set = []
for thread in Thread.objects.all()[:settings.HOME_MAX_THREADS]:
try:
latest_message = Message.objects.filter(thread=thread).latest('post_date') # depends on [control=['try'], data=[]]
except Message.DoesNotExist:
latest_message = None # depends on [control=['except'], data=[]]
thread_set.append((thread, latest_message)) # depends on [control=['for'], data=['thread']]
return render_to_response('homepage.html', {'page_name': 'Home', 'requests_dict': requests_dict, 'announcements_dict': announcements_dict, 'announcement_form': announcement_form, 'events_dict': events_dict, 'thread_set': thread_set, 'thread_form': thread_form}, context_instance=RequestContext(request)) |
def list(self, service, per_page=20, page=1):
""" Get a list of limits for the given service
:param service: The service that the limit is linked to
:param per_page: The number of results per page returned
:param page: The page number of the results
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`,
:class:`requests.exceptions.HTTPError`
"""
params = {'per_page': per_page, 'page': page}
return self.request.get('limit/' + service, params) | def function[list, parameter[self, service, per_page, page]]:
constant[ Get a list of limits for the given service
:param service: The service that the limit is linked to
:param per_page: The number of results per page returned
:param page: The page number of the results
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`,
:class:`requests.exceptions.HTTPError`
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da204566c80>, <ast.Constant object at 0x7da204565b40>], [<ast.Name object at 0x7da204567580>, <ast.Name object at 0x7da204565690>]]
return[call[name[self].request.get, parameter[binary_operation[constant[limit/] + name[service]], name[params]]]] | keyword[def] identifier[list] ( identifier[self] , identifier[service] , identifier[per_page] = literal[int] , identifier[page] = literal[int] ):
literal[string]
identifier[params] ={ literal[string] : identifier[per_page] , literal[string] : identifier[page] }
keyword[return] identifier[self] . identifier[request] . identifier[get] ( literal[string] + identifier[service] , identifier[params] ) | def list(self, service, per_page=20, page=1):
""" Get a list of limits for the given service
:param service: The service that the limit is linked to
:param per_page: The number of results per page returned
:param page: The page number of the results
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`,
:class:`requests.exceptions.HTTPError`
"""
params = {'per_page': per_page, 'page': page}
return self.request.get('limit/' + service, params) |
def dilute_ionic_conductivity(ionic_conductivities, zs, rhom):
r'''This function handles the calculation of the electrical conductivity of
a dilute electrolytic aqueous solution. Requires the mole fractions of
each ion, the molar density of the whole mixture, and ionic conductivity
coefficients for each ion.
.. math::
\lambda = \sum_i \lambda_i^\circ z_i \rho_m
Parameters
----------
ionic_conductivities : list[float]
Ionic conductivity coefficients of each ion in the mixture [m^2*S/mol]
zs : list[float]
Mole fractions of each ion in the mixture, [-]
rhom : float
Overall molar density of the solution, [mol/m^3]
Returns
-------
kappa : float
Electrical conductivity of the fluid, [S/m]
Notes
-----
The ionic conductivity coefficients should not be `equivalent` coefficients;
for example, 0.0053 m^2*S/mol is the equivalent conductivity coefficient of
Mg+2, but this method expects twice its value - 0.0106. Both are reported
commonly in literature.
Water can be included in this caclulation by specifying a coefficient of
0. The conductivity of any electrolyte eclipses its own conductivity by
many orders of magnitude. Any other solvents present will affect the
conductivity extensively and there are few good methods to predict this
effect.
Examples
--------
Complex mixture of electrolytes ['Cl-', 'HCO3-', 'SO4-2', 'Na+', 'K+',
'Ca+2', 'Mg+2']:
>>> ionic_conductivities = [0.00764, 0.00445, 0.016, 0.00501, 0.00735, 0.0119, 0.01061]
>>> zs = [0.03104, 0.00039, 0.00022, 0.02413, 0.0009, 0.0024, 0.00103]
>>> dilute_ionic_conductivity(ionic_conductivities=ionic_conductivities, zs=zs, rhom=53865.9)
22.05246783663
References
----------
.. [1] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of
Chemistry and Physics, 95E. Boca Raton, FL: CRC press, 2014.
'''
return sum([ci*(zi*rhom) for zi, ci in zip(zs, ionic_conductivities)]) | def function[dilute_ionic_conductivity, parameter[ionic_conductivities, zs, rhom]]:
constant[This function handles the calculation of the electrical conductivity of
a dilute electrolytic aqueous solution. Requires the mole fractions of
each ion, the molar density of the whole mixture, and ionic conductivity
coefficients for each ion.
.. math::
\lambda = \sum_i \lambda_i^\circ z_i \rho_m
Parameters
----------
ionic_conductivities : list[float]
Ionic conductivity coefficients of each ion in the mixture [m^2*S/mol]
zs : list[float]
Mole fractions of each ion in the mixture, [-]
rhom : float
Overall molar density of the solution, [mol/m^3]
Returns
-------
kappa : float
Electrical conductivity of the fluid, [S/m]
Notes
-----
The ionic conductivity coefficients should not be `equivalent` coefficients;
for example, 0.0053 m^2*S/mol is the equivalent conductivity coefficient of
Mg+2, but this method expects twice its value - 0.0106. Both are reported
commonly in literature.
Water can be included in this caclulation by specifying a coefficient of
0. The conductivity of any electrolyte eclipses its own conductivity by
many orders of magnitude. Any other solvents present will affect the
conductivity extensively and there are few good methods to predict this
effect.
Examples
--------
Complex mixture of electrolytes ['Cl-', 'HCO3-', 'SO4-2', 'Na+', 'K+',
'Ca+2', 'Mg+2']:
>>> ionic_conductivities = [0.00764, 0.00445, 0.016, 0.00501, 0.00735, 0.0119, 0.01061]
>>> zs = [0.03104, 0.00039, 0.00022, 0.02413, 0.0009, 0.0024, 0.00103]
>>> dilute_ionic_conductivity(ionic_conductivities=ionic_conductivities, zs=zs, rhom=53865.9)
22.05246783663
References
----------
.. [1] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of
Chemistry and Physics, 95E. Boca Raton, FL: CRC press, 2014.
]
return[call[name[sum], parameter[<ast.ListComp object at 0x7da18c4cd870>]]] | keyword[def] identifier[dilute_ionic_conductivity] ( identifier[ionic_conductivities] , identifier[zs] , identifier[rhom] ):
literal[string]
keyword[return] identifier[sum] ([ identifier[ci] *( identifier[zi] * identifier[rhom] ) keyword[for] identifier[zi] , identifier[ci] keyword[in] identifier[zip] ( identifier[zs] , identifier[ionic_conductivities] )]) | def dilute_ionic_conductivity(ionic_conductivities, zs, rhom):
"""This function handles the calculation of the electrical conductivity of
a dilute electrolytic aqueous solution. Requires the mole fractions of
each ion, the molar density of the whole mixture, and ionic conductivity
coefficients for each ion.
.. math::
\\lambda = \\sum_i \\lambda_i^\\circ z_i \\rho_m
Parameters
----------
ionic_conductivities : list[float]
Ionic conductivity coefficients of each ion in the mixture [m^2*S/mol]
zs : list[float]
Mole fractions of each ion in the mixture, [-]
rhom : float
Overall molar density of the solution, [mol/m^3]
Returns
-------
kappa : float
Electrical conductivity of the fluid, [S/m]
Notes
-----
The ionic conductivity coefficients should not be `equivalent` coefficients;
for example, 0.0053 m^2*S/mol is the equivalent conductivity coefficient of
Mg+2, but this method expects twice its value - 0.0106. Both are reported
commonly in literature.
Water can be included in this caclulation by specifying a coefficient of
0. The conductivity of any electrolyte eclipses its own conductivity by
many orders of magnitude. Any other solvents present will affect the
conductivity extensively and there are few good methods to predict this
effect.
Examples
--------
Complex mixture of electrolytes ['Cl-', 'HCO3-', 'SO4-2', 'Na+', 'K+',
'Ca+2', 'Mg+2']:
>>> ionic_conductivities = [0.00764, 0.00445, 0.016, 0.00501, 0.00735, 0.0119, 0.01061]
>>> zs = [0.03104, 0.00039, 0.00022, 0.02413, 0.0009, 0.0024, 0.00103]
>>> dilute_ionic_conductivity(ionic_conductivities=ionic_conductivities, zs=zs, rhom=53865.9)
22.05246783663
References
----------
.. [1] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of
Chemistry and Physics, 95E. Boca Raton, FL: CRC press, 2014.
"""
return sum([ci * (zi * rhom) for (zi, ci) in zip(zs, ionic_conductivities)]) |
def put(self, path, args, wait=False): # pragma: no cover, looks never used!
# todo: remove this because it looks never used anywhere...
"""PUT and HTTP request to a daemon
:param path: path to do the request
:type path: str
:param args: data to send in the request
:type args:
:return: Content of the HTTP response if server returned 200
:rtype: str
"""
uri = self.make_uri(path)
timeout = self.make_timeout(wait)
try:
logger.debug("put: %s, timeout: %s, params: %s", uri, timeout, args)
rsp = self._requests_con.put(uri, args, timeout=timeout, verify=self.strong_ssl)
logger.debug("got: %d - %s", rsp.status_code, rsp.text)
if rsp.status_code != 200:
raise HTTPClientDataException(rsp.status_code, rsp.text, uri)
return rsp.content
except (requests.Timeout, requests.ConnectTimeout):
raise HTTPClientTimeoutException(timeout, uri)
except requests.ConnectionError as exp:
raise HTTPClientConnectionException(uri, exp.args[0])
except Exception as exp:
raise HTTPClientException('Request error to %s: %s' % (uri, exp)) | def function[put, parameter[self, path, args, wait]]:
constant[PUT and HTTP request to a daemon
:param path: path to do the request
:type path: str
:param args: data to send in the request
:type args:
:return: Content of the HTTP response if server returned 200
:rtype: str
]
variable[uri] assign[=] call[name[self].make_uri, parameter[name[path]]]
variable[timeout] assign[=] call[name[self].make_timeout, parameter[name[wait]]]
<ast.Try object at 0x7da18bc70b80> | keyword[def] identifier[put] ( identifier[self] , identifier[path] , identifier[args] , identifier[wait] = keyword[False] ):
literal[string]
identifier[uri] = identifier[self] . identifier[make_uri] ( identifier[path] )
identifier[timeout] = identifier[self] . identifier[make_timeout] ( identifier[wait] )
keyword[try] :
identifier[logger] . identifier[debug] ( literal[string] , identifier[uri] , identifier[timeout] , identifier[args] )
identifier[rsp] = identifier[self] . identifier[_requests_con] . identifier[put] ( identifier[uri] , identifier[args] , identifier[timeout] = identifier[timeout] , identifier[verify] = identifier[self] . identifier[strong_ssl] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[rsp] . identifier[status_code] , identifier[rsp] . identifier[text] )
keyword[if] identifier[rsp] . identifier[status_code] != literal[int] :
keyword[raise] identifier[HTTPClientDataException] ( identifier[rsp] . identifier[status_code] , identifier[rsp] . identifier[text] , identifier[uri] )
keyword[return] identifier[rsp] . identifier[content]
keyword[except] ( identifier[requests] . identifier[Timeout] , identifier[requests] . identifier[ConnectTimeout] ):
keyword[raise] identifier[HTTPClientTimeoutException] ( identifier[timeout] , identifier[uri] )
keyword[except] identifier[requests] . identifier[ConnectionError] keyword[as] identifier[exp] :
keyword[raise] identifier[HTTPClientConnectionException] ( identifier[uri] , identifier[exp] . identifier[args] [ literal[int] ])
keyword[except] identifier[Exception] keyword[as] identifier[exp] :
keyword[raise] identifier[HTTPClientException] ( literal[string] %( identifier[uri] , identifier[exp] )) | def put(self, path, args, wait=False): # pragma: no cover, looks never used!
# todo: remove this because it looks never used anywhere...
'PUT and HTTP request to a daemon\n\n :param path: path to do the request\n :type path: str\n :param args: data to send in the request\n :type args:\n :return: Content of the HTTP response if server returned 200\n :rtype: str\n '
uri = self.make_uri(path)
timeout = self.make_timeout(wait)
try:
logger.debug('put: %s, timeout: %s, params: %s', uri, timeout, args)
rsp = self._requests_con.put(uri, args, timeout=timeout, verify=self.strong_ssl)
logger.debug('got: %d - %s', rsp.status_code, rsp.text)
if rsp.status_code != 200:
raise HTTPClientDataException(rsp.status_code, rsp.text, uri) # depends on [control=['if'], data=[]]
return rsp.content # depends on [control=['try'], data=[]]
except (requests.Timeout, requests.ConnectTimeout):
raise HTTPClientTimeoutException(timeout, uri) # depends on [control=['except'], data=[]]
except requests.ConnectionError as exp:
raise HTTPClientConnectionException(uri, exp.args[0]) # depends on [control=['except'], data=['exp']]
except Exception as exp:
raise HTTPClientException('Request error to %s: %s' % (uri, exp)) # depends on [control=['except'], data=['exp']] |
def child_(self, ctx):
"""
If the root resource is requested, return the primary
application's front page, if a primary application has been
chosen. Otherwise return 'self', since this page can render a
simple index.
"""
if self.frontPageItem.defaultApplication is None:
return self.webViewer.wrapModel(
_OfferingsFragment(self.frontPageItem))
else:
return SharingIndex(self.frontPageItem.defaultApplication.open(),
self.webViewer).locateChild(ctx, [''])[0] | def function[child_, parameter[self, ctx]]:
constant[
If the root resource is requested, return the primary
application's front page, if a primary application has been
chosen. Otherwise return 'self', since this page can render a
simple index.
]
if compare[name[self].frontPageItem.defaultApplication is constant[None]] begin[:]
return[call[name[self].webViewer.wrapModel, parameter[call[name[_OfferingsFragment], parameter[name[self].frontPageItem]]]]] | keyword[def] identifier[child_] ( identifier[self] , identifier[ctx] ):
literal[string]
keyword[if] identifier[self] . identifier[frontPageItem] . identifier[defaultApplication] keyword[is] keyword[None] :
keyword[return] identifier[self] . identifier[webViewer] . identifier[wrapModel] (
identifier[_OfferingsFragment] ( identifier[self] . identifier[frontPageItem] ))
keyword[else] :
keyword[return] identifier[SharingIndex] ( identifier[self] . identifier[frontPageItem] . identifier[defaultApplication] . identifier[open] (),
identifier[self] . identifier[webViewer] ). identifier[locateChild] ( identifier[ctx] ,[ literal[string] ])[ literal[int] ] | def child_(self, ctx):
"""
If the root resource is requested, return the primary
application's front page, if a primary application has been
chosen. Otherwise return 'self', since this page can render a
simple index.
"""
if self.frontPageItem.defaultApplication is None:
return self.webViewer.wrapModel(_OfferingsFragment(self.frontPageItem)) # depends on [control=['if'], data=[]]
else:
return SharingIndex(self.frontPageItem.defaultApplication.open(), self.webViewer).locateChild(ctx, [''])[0] |
def calculate_splits(sdf_file, split_size):
"""Retrieve
"""
counts = _sdfstats(sdf_file)["counts"]
splits = []
cur = 0
for i in range(counts // split_size + (0 if counts % split_size == 0 else 1)):
splits.append("%s-%s" % (cur, min(counts, cur + split_size)))
cur += split_size
return splits | def function[calculate_splits, parameter[sdf_file, split_size]]:
constant[Retrieve
]
variable[counts] assign[=] call[call[name[_sdfstats], parameter[name[sdf_file]]]][constant[counts]]
variable[splits] assign[=] list[[]]
variable[cur] assign[=] constant[0]
for taget[name[i]] in starred[call[name[range], parameter[binary_operation[binary_operation[name[counts] <ast.FloorDiv object at 0x7da2590d6bc0> name[split_size]] + <ast.IfExp object at 0x7da1b26aeec0>]]]] begin[:]
call[name[splits].append, parameter[binary_operation[constant[%s-%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b26acfa0>, <ast.Call object at 0x7da1b26af340>]]]]]
<ast.AugAssign object at 0x7da1b26afc40>
return[name[splits]] | keyword[def] identifier[calculate_splits] ( identifier[sdf_file] , identifier[split_size] ):
literal[string]
identifier[counts] = identifier[_sdfstats] ( identifier[sdf_file] )[ literal[string] ]
identifier[splits] =[]
identifier[cur] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[counts] // identifier[split_size] +( literal[int] keyword[if] identifier[counts] % identifier[split_size] == literal[int] keyword[else] literal[int] )):
identifier[splits] . identifier[append] ( literal[string] %( identifier[cur] , identifier[min] ( identifier[counts] , identifier[cur] + identifier[split_size] )))
identifier[cur] += identifier[split_size]
keyword[return] identifier[splits] | def calculate_splits(sdf_file, split_size):
"""Retrieve
"""
counts = _sdfstats(sdf_file)['counts']
splits = []
cur = 0
for i in range(counts // split_size + (0 if counts % split_size == 0 else 1)):
splits.append('%s-%s' % (cur, min(counts, cur + split_size)))
cur += split_size # depends on [control=['for'], data=[]]
return splits |
def previous_session_label(self, session_label):
"""
Given a session label, returns the label of the previous session.
Parameters
----------
session_label: pd.Timestamp
A session whose previous session is desired.
Returns
-------
pd.Timestamp
The previous session label (midnight UTC).
Notes
-----
Raises ValueError if the given session is the first session in this
calendar.
"""
idx = self.schedule.index.get_loc(session_label)
if idx == 0:
raise ValueError("There is no previous session as this is the"
" beginning of the exchange calendar.")
return self.schedule.index[idx - 1] | def function[previous_session_label, parameter[self, session_label]]:
constant[
Given a session label, returns the label of the previous session.
Parameters
----------
session_label: pd.Timestamp
A session whose previous session is desired.
Returns
-------
pd.Timestamp
The previous session label (midnight UTC).
Notes
-----
Raises ValueError if the given session is the first session in this
calendar.
]
variable[idx] assign[=] call[name[self].schedule.index.get_loc, parameter[name[session_label]]]
if compare[name[idx] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da18fe93ca0>
return[call[name[self].schedule.index][binary_operation[name[idx] - constant[1]]]] | keyword[def] identifier[previous_session_label] ( identifier[self] , identifier[session_label] ):
literal[string]
identifier[idx] = identifier[self] . identifier[schedule] . identifier[index] . identifier[get_loc] ( identifier[session_label] )
keyword[if] identifier[idx] == literal[int] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
keyword[return] identifier[self] . identifier[schedule] . identifier[index] [ identifier[idx] - literal[int] ] | def previous_session_label(self, session_label):
"""
Given a session label, returns the label of the previous session.
Parameters
----------
session_label: pd.Timestamp
A session whose previous session is desired.
Returns
-------
pd.Timestamp
The previous session label (midnight UTC).
Notes
-----
Raises ValueError if the given session is the first session in this
calendar.
"""
idx = self.schedule.index.get_loc(session_label)
if idx == 0:
raise ValueError('There is no previous session as this is the beginning of the exchange calendar.') # depends on [control=['if'], data=[]]
return self.schedule.index[idx - 1] |
def get_all_requisite_objectives(self, objective_id=None):
"""Gets a list of Objectives that are the requisites for the given
Objective including the requistes of the requisites, and so on.
In plenary mode, the returned list contains all of the immediate
requisites, or an error results if an Objective is not found or
inaccessible. Otherwise, inaccessible Objectives may be omitted
from the list and may present the elements in any order
including returning a unique set.
arg: objective_id (osid.id.Id): Id of the Objective
return: (osid.learning.ObjectiveList) - the returned Objective
list
raise: NotFound - objective_id not found
raise: NullArgument - objective_id is null
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
compliance: mandatory - This method must be implemented.
"""
# This should be re-implemented if and when handcar supports
# getting all requisites directly
requisites = list()
requisite_ids = list()
all_requisites = self._get_requisites_recursively(objective_id, requisites, requisite_ids)
return objects.ObjectiveList(all_requisites) | def function[get_all_requisite_objectives, parameter[self, objective_id]]:
constant[Gets a list of Objectives that are the requisites for the given
Objective including the requistes of the requisites, and so on.
In plenary mode, the returned list contains all of the immediate
requisites, or an error results if an Objective is not found or
inaccessible. Otherwise, inaccessible Objectives may be omitted
from the list and may present the elements in any order
including returning a unique set.
arg: objective_id (osid.id.Id): Id of the Objective
return: (osid.learning.ObjectiveList) - the returned Objective
list
raise: NotFound - objective_id not found
raise: NullArgument - objective_id is null
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
compliance: mandatory - This method must be implemented.
]
variable[requisites] assign[=] call[name[list], parameter[]]
variable[requisite_ids] assign[=] call[name[list], parameter[]]
variable[all_requisites] assign[=] call[name[self]._get_requisites_recursively, parameter[name[objective_id], name[requisites], name[requisite_ids]]]
return[call[name[objects].ObjectiveList, parameter[name[all_requisites]]]] | keyword[def] identifier[get_all_requisite_objectives] ( identifier[self] , identifier[objective_id] = keyword[None] ):
literal[string]
identifier[requisites] = identifier[list] ()
identifier[requisite_ids] = identifier[list] ()
identifier[all_requisites] = identifier[self] . identifier[_get_requisites_recursively] ( identifier[objective_id] , identifier[requisites] , identifier[requisite_ids] )
keyword[return] identifier[objects] . identifier[ObjectiveList] ( identifier[all_requisites] ) | def get_all_requisite_objectives(self, objective_id=None):
"""Gets a list of Objectives that are the requisites for the given
Objective including the requistes of the requisites, and so on.
In plenary mode, the returned list contains all of the immediate
requisites, or an error results if an Objective is not found or
inaccessible. Otherwise, inaccessible Objectives may be omitted
from the list and may present the elements in any order
including returning a unique set.
arg: objective_id (osid.id.Id): Id of the Objective
return: (osid.learning.ObjectiveList) - the returned Objective
list
raise: NotFound - objective_id not found
raise: NullArgument - objective_id is null
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
compliance: mandatory - This method must be implemented.
"""
# This should be re-implemented if and when handcar supports
# getting all requisites directly
requisites = list()
requisite_ids = list()
all_requisites = self._get_requisites_recursively(objective_id, requisites, requisite_ids)
return objects.ObjectiveList(all_requisites) |
def pca_to_mapping(pca,**extra_props):
"""
A helper to return a mapping of a PCA result set suitable for
reconstructing a planar error surface in other software packages
kwargs: method (defaults to sampling axes)
"""
from .axes import sampling_axes
method = extra_props.pop('method',sampling_axes)
return dict(
axes=pca.axes.tolist(),
covariance=method(pca).tolist(),
**extra_props) | def function[pca_to_mapping, parameter[pca]]:
constant[
A helper to return a mapping of a PCA result set suitable for
reconstructing a planar error surface in other software packages
kwargs: method (defaults to sampling axes)
]
from relative_module[axes] import module[sampling_axes]
variable[method] assign[=] call[name[extra_props].pop, parameter[constant[method], name[sampling_axes]]]
return[call[name[dict], parameter[]]] | keyword[def] identifier[pca_to_mapping] ( identifier[pca] ,** identifier[extra_props] ):
literal[string]
keyword[from] . identifier[axes] keyword[import] identifier[sampling_axes]
identifier[method] = identifier[extra_props] . identifier[pop] ( literal[string] , identifier[sampling_axes] )
keyword[return] identifier[dict] (
identifier[axes] = identifier[pca] . identifier[axes] . identifier[tolist] (),
identifier[covariance] = identifier[method] ( identifier[pca] ). identifier[tolist] (),
** identifier[extra_props] ) | def pca_to_mapping(pca, **extra_props):
"""
A helper to return a mapping of a PCA result set suitable for
reconstructing a planar error surface in other software packages
kwargs: method (defaults to sampling axes)
"""
from .axes import sampling_axes
method = extra_props.pop('method', sampling_axes)
return dict(axes=pca.axes.tolist(), covariance=method(pca).tolist(), **extra_props) |
def git_root(self):
"""
Find the root git folder
"""
if not getattr(self, "_git_folder", None):
root_folder = os.path.abspath(self.parent_dir)
while not os.path.exists(os.path.join(root_folder, '.git')):
if root_folder == '/':
raise HarpoonError("Couldn't find a .git folder", start_at=self.parent_dir)
root_folder = os.path.dirname(root_folder)
self._git_folder = root_folder
return self._git_folder | def function[git_root, parameter[self]]:
constant[
Find the root git folder
]
if <ast.UnaryOp object at 0x7da18ede5990> begin[:]
variable[root_folder] assign[=] call[name[os].path.abspath, parameter[name[self].parent_dir]]
while <ast.UnaryOp object at 0x7da18ede6860> begin[:]
if compare[name[root_folder] equal[==] constant[/]] begin[:]
<ast.Raise object at 0x7da18ede79a0>
variable[root_folder] assign[=] call[name[os].path.dirname, parameter[name[root_folder]]]
name[self]._git_folder assign[=] name[root_folder]
return[name[self]._git_folder] | keyword[def] identifier[git_root] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[getattr] ( identifier[self] , literal[string] , keyword[None] ):
identifier[root_folder] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[self] . identifier[parent_dir] )
keyword[while] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[os] . identifier[path] . identifier[join] ( identifier[root_folder] , literal[string] )):
keyword[if] identifier[root_folder] == literal[string] :
keyword[raise] identifier[HarpoonError] ( literal[string] , identifier[start_at] = identifier[self] . identifier[parent_dir] )
identifier[root_folder] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[root_folder] )
identifier[self] . identifier[_git_folder] = identifier[root_folder]
keyword[return] identifier[self] . identifier[_git_folder] | def git_root(self):
"""
Find the root git folder
"""
if not getattr(self, '_git_folder', None):
root_folder = os.path.abspath(self.parent_dir)
while not os.path.exists(os.path.join(root_folder, '.git')):
if root_folder == '/':
raise HarpoonError("Couldn't find a .git folder", start_at=self.parent_dir) # depends on [control=['if'], data=[]]
root_folder = os.path.dirname(root_folder) # depends on [control=['while'], data=[]]
self._git_folder = root_folder # depends on [control=['if'], data=[]]
return self._git_folder |
def t_NAMESPACE(self, t):
r"([0-9a-zA-Z_])+(?=::)"
t.endlexpos = t.lexpos + len(t.value)
return t | def function[t_NAMESPACE, parameter[self, t]]:
constant[([0-9a-zA-Z_])+(?=::)]
name[t].endlexpos assign[=] binary_operation[name[t].lexpos + call[name[len], parameter[name[t].value]]]
return[name[t]] | keyword[def] identifier[t_NAMESPACE] ( identifier[self] , identifier[t] ):
literal[string]
identifier[t] . identifier[endlexpos] = identifier[t] . identifier[lexpos] + identifier[len] ( identifier[t] . identifier[value] )
keyword[return] identifier[t] | def t_NAMESPACE(self, t):
"""([0-9a-zA-Z_])+(?=::)"""
t.endlexpos = t.lexpos + len(t.value)
return t |
def check_version_info(redis_client):
"""Check if various version info of this process is correct.
This will be used to detect if workers or drivers are started using
different versions of Python, pyarrow, or Ray. If the version
information is not present in Redis, then no check is done.
Args:
redis_client: A client for the primary Redis shard.
Raises:
Exception: An exception is raised if there is a version mismatch.
"""
redis_reply = redis_client.get("VERSION_INFO")
# Don't do the check if there is no version information in Redis. This
# is to make it easier to do things like start the processes by hand.
if redis_reply is None:
return
true_version_info = tuple(json.loads(ray.utils.decode(redis_reply)))
version_info = _compute_version_info()
if version_info != true_version_info:
node_ip_address = ray.services.get_node_ip_address()
error_message = ("Version mismatch: The cluster was started with:\n"
" Ray: " + true_version_info[0] + "\n"
" Python: " + true_version_info[1] + "\n"
" Pyarrow: " + str(true_version_info[2]) + "\n"
"This process on node " + node_ip_address +
" was started with:" + "\n"
" Ray: " + version_info[0] + "\n"
" Python: " + version_info[1] + "\n"
" Pyarrow: " + str(version_info[2]))
if version_info[:2] != true_version_info[:2]:
raise Exception(error_message)
else:
logger.warning(error_message) | def function[check_version_info, parameter[redis_client]]:
constant[Check if various version info of this process is correct.
This will be used to detect if workers or drivers are started using
different versions of Python, pyarrow, or Ray. If the version
information is not present in Redis, then no check is done.
Args:
redis_client: A client for the primary Redis shard.
Raises:
Exception: An exception is raised if there is a version mismatch.
]
variable[redis_reply] assign[=] call[name[redis_client].get, parameter[constant[VERSION_INFO]]]
if compare[name[redis_reply] is constant[None]] begin[:]
return[None]
variable[true_version_info] assign[=] call[name[tuple], parameter[call[name[json].loads, parameter[call[name[ray].utils.decode, parameter[name[redis_reply]]]]]]]
variable[version_info] assign[=] call[name[_compute_version_info], parameter[]]
if compare[name[version_info] not_equal[!=] name[true_version_info]] begin[:]
variable[node_ip_address] assign[=] call[name[ray].services.get_node_ip_address, parameter[]]
variable[error_message] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[constant[Version mismatch: The cluster was started with:
Ray: ] + call[name[true_version_info]][constant[0]]] + constant[
Python: ]] + call[name[true_version_info]][constant[1]]] + constant[
Pyarrow: ]] + call[name[str], parameter[call[name[true_version_info]][constant[2]]]]] + constant[
This process on node ]] + name[node_ip_address]] + constant[ was started with:]] + constant[
Ray: ]] + call[name[version_info]][constant[0]]] + constant[
Python: ]] + call[name[version_info]][constant[1]]] + constant[
Pyarrow: ]] + call[name[str], parameter[call[name[version_info]][constant[2]]]]]
if compare[call[name[version_info]][<ast.Slice object at 0x7da18fe92d70>] not_equal[!=] call[name[true_version_info]][<ast.Slice object at 0x7da18fe93490>]] begin[:]
<ast.Raise object at 0x7da18fe906a0> | keyword[def] identifier[check_version_info] ( identifier[redis_client] ):
literal[string]
identifier[redis_reply] = identifier[redis_client] . identifier[get] ( literal[string] )
keyword[if] identifier[redis_reply] keyword[is] keyword[None] :
keyword[return]
identifier[true_version_info] = identifier[tuple] ( identifier[json] . identifier[loads] ( identifier[ray] . identifier[utils] . identifier[decode] ( identifier[redis_reply] )))
identifier[version_info] = identifier[_compute_version_info] ()
keyword[if] identifier[version_info] != identifier[true_version_info] :
identifier[node_ip_address] = identifier[ray] . identifier[services] . identifier[get_node_ip_address] ()
identifier[error_message] =( literal[string]
literal[string] + identifier[true_version_info] [ literal[int] ]+ literal[string]
literal[string] + identifier[true_version_info] [ literal[int] ]+ literal[string]
literal[string] + identifier[str] ( identifier[true_version_info] [ literal[int] ])+ literal[string]
literal[string] + identifier[node_ip_address] +
literal[string] + literal[string]
literal[string] + identifier[version_info] [ literal[int] ]+ literal[string]
literal[string] + identifier[version_info] [ literal[int] ]+ literal[string]
literal[string] + identifier[str] ( identifier[version_info] [ literal[int] ]))
keyword[if] identifier[version_info] [: literal[int] ]!= identifier[true_version_info] [: literal[int] ]:
keyword[raise] identifier[Exception] ( identifier[error_message] )
keyword[else] :
identifier[logger] . identifier[warning] ( identifier[error_message] ) | def check_version_info(redis_client):
"""Check if various version info of this process is correct.
This will be used to detect if workers or drivers are started using
different versions of Python, pyarrow, or Ray. If the version
information is not present in Redis, then no check is done.
Args:
redis_client: A client for the primary Redis shard.
Raises:
Exception: An exception is raised if there is a version mismatch.
"""
redis_reply = redis_client.get('VERSION_INFO')
# Don't do the check if there is no version information in Redis. This
# is to make it easier to do things like start the processes by hand.
if redis_reply is None:
return # depends on [control=['if'], data=[]]
true_version_info = tuple(json.loads(ray.utils.decode(redis_reply)))
version_info = _compute_version_info()
if version_info != true_version_info:
node_ip_address = ray.services.get_node_ip_address()
error_message = 'Version mismatch: The cluster was started with:\n Ray: ' + true_version_info[0] + '\n Python: ' + true_version_info[1] + '\n Pyarrow: ' + str(true_version_info[2]) + '\nThis process on node ' + node_ip_address + ' was started with:' + '\n Ray: ' + version_info[0] + '\n Python: ' + version_info[1] + '\n Pyarrow: ' + str(version_info[2])
if version_info[:2] != true_version_info[:2]:
raise Exception(error_message) # depends on [control=['if'], data=[]]
else:
logger.warning(error_message) # depends on [control=['if'], data=['version_info', 'true_version_info']] |
def _step(self):
"""
A single step in the loop.
Basically gets an input bag, send it to the node, interpret the results.
"""
# Pull and check data
input_bag = self._get()
# Sent through the stack
results = self._stack(input_bag)
# self._exec_time += timer.duration
# Put data onto output channels
if isinstance(results, GeneratorType):
while True:
try:
# if kill flag was step, stop iterating.
if self._killed:
break
result = next(results)
except StopIteration:
# That's not an error, we're just done.
break
else:
# Push data (in case of an iterator)
self._put(self._cast(input_bag, result))
elif results:
# Push data (returned value)
self._put(self._cast(input_bag, results))
else:
# case with no result, an execution went through anyway, use for stats.
# self._exec_count += 1
pass | def function[_step, parameter[self]]:
constant[
A single step in the loop.
Basically gets an input bag, send it to the node, interpret the results.
]
variable[input_bag] assign[=] call[name[self]._get, parameter[]]
variable[results] assign[=] call[name[self]._stack, parameter[name[input_bag]]]
if call[name[isinstance], parameter[name[results], name[GeneratorType]]] begin[:]
while constant[True] begin[:]
<ast.Try object at 0x7da20e9b1990> | keyword[def] identifier[_step] ( identifier[self] ):
literal[string]
identifier[input_bag] = identifier[self] . identifier[_get] ()
identifier[results] = identifier[self] . identifier[_stack] ( identifier[input_bag] )
keyword[if] identifier[isinstance] ( identifier[results] , identifier[GeneratorType] ):
keyword[while] keyword[True] :
keyword[try] :
keyword[if] identifier[self] . identifier[_killed] :
keyword[break]
identifier[result] = identifier[next] ( identifier[results] )
keyword[except] identifier[StopIteration] :
keyword[break]
keyword[else] :
identifier[self] . identifier[_put] ( identifier[self] . identifier[_cast] ( identifier[input_bag] , identifier[result] ))
keyword[elif] identifier[results] :
identifier[self] . identifier[_put] ( identifier[self] . identifier[_cast] ( identifier[input_bag] , identifier[results] ))
keyword[else] :
keyword[pass] | def _step(self):
"""
A single step in the loop.
Basically gets an input bag, send it to the node, interpret the results.
"""
# Pull and check data
input_bag = self._get()
# Sent through the stack
results = self._stack(input_bag)
# self._exec_time += timer.duration
# Put data onto output channels
if isinstance(results, GeneratorType):
while True:
try:
# if kill flag was step, stop iterating.
if self._killed:
break # depends on [control=['if'], data=[]]
result = next(results) # depends on [control=['try'], data=[]]
except StopIteration:
# That's not an error, we're just done.
break # depends on [control=['except'], data=[]]
else:
# Push data (in case of an iterator)
self._put(self._cast(input_bag, result)) # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
elif results:
# Push data (returned value)
self._put(self._cast(input_bag, results)) # depends on [control=['if'], data=[]]
else:
# case with no result, an execution went through anyway, use for stats.
# self._exec_count += 1
pass |
def xmatch_search(lcc_server,
file_to_upload,
xmatch_dist_arcsec=3.0,
result_visibility='unlisted',
email_when_done=False,
collections=None,
columns=None,
filters=None,
sortspec=None,
limitspec=None,
samplespec=None,
download_data=True,
outdir=None,
maxtimeout=300.0,
refresh=15.0):
'''This runs a cross-match search query.
Parameters
----------
lcc_server : str
This is the base URL of the LCC-Server to talk to. (e.g. for HAT, use:
https://data.hatsurveys.org)
file_to_upload : str
This is the path to a text file containing objectid, RA, declination
rows for the objects to cross-match against the LCC-Server
collections. This should follow the format of the following example::
# example object and coordinate list
# objectid ra dec
aaa 289.99698 44.99839
bbb 293.358 -23.206
ccc 294.197 +23.181
ddd 19 25 27.9129 +42 47 03.693
eee 19:25:27 -42:47:03.21
# .
# .
# .
# etc. lines starting with '#' will be ignored
# (max 5000 objects)
xmatch_dist_arcsec : float
This is the maximum distance in arcseconds to consider when
cross-matching objects in the uploaded file to the LCC-Server's
collections. The maximum allowed distance is 30 arcseconds. Multiple
matches to an uploaded object are possible and will be returned in order
of increasing distance grouped by input `objectid`.
result_visibility : {'private', 'unlisted', 'public'}
This sets the visibility of the dataset produced from the search
result::
'private' -> the dataset and its products are not visible or
accessible by any user other than the one that
created the dataset.
'unlisted' -> the dataset and its products are not visible in the
list of public datasets, but can be accessed if the
dataset URL is known
'public' -> the dataset and its products are visible in the list
of public datasets and can be accessed by anyone.
email_when_done : bool
If True, the LCC-Server will email you when the search is complete. This
will also set `download_data` to False. Using this requires an
LCC-Server account and an API key tied to that account.
collections : list of str or None
This is a list of LC collections to search in. If this is None, all
collections will be searched.
columns : list of str or None
This is a list of columns to return in the results. Matching objects'
object IDs, RAs, DECs, and links to light curve files will always be
returned so there is no need to specify these columns. If None, only
these columns will be returned: 'objectid', 'ra', 'decl', 'lcfname'
filters : str or None
This is an SQL-like string to use to filter on database columns in the
LCC-Server's collections. To see the columns available for a search,
visit the Collections tab in the LCC-Server's browser UI. The filter
operators allowed are::
lt -> less than
gt -> greater than
ge -> greater than or equal to
le -> less than or equal to
eq -> equal to
ne -> not equal to
ct -> contains text
isnull -> column value is null
notnull -> column value is not null
You may use the `and` and `or` operators between filter specifications
to chain them together logically.
Example filter strings::
"(propermotion gt 200.0) and (sdssr lt 11.0)"
"(dered_jmag_kmag gt 2.0) and (aep_000_stetsonj gt 10.0)"
"(gaia_status ct 'ok') and (propermotion gt 300.0)"
"(simbad_best_objtype ct 'RR') and (dered_sdssu_sdssg lt 0.5)"
sortspec : tuple of two strs or None
If not None, this should be a tuple of two items::
('column to sort by', 'asc|desc')
This sets the column to sort the results by. For cone_search, the
default column and sort order are 'dist_arcsec' and 'asc', meaning the
distance from the search center in ascending order.
samplespec : int or None
If this is an int, will indicate how many rows from the initial search
result will be uniformly random sampled and returned.
limitspec : int or None
If this is an int, will indicate how many rows from the initial search
result to return in total.
`sortspec`, `samplespec`, and `limitspec` are applied in this order:
sample -> sort -> limit
download_data : bool
This sets if the accompanying data from the search results will be
downloaded automatically. This includes the data table CSV, the dataset
pickle file, and a light curve ZIP file. Note that if the search service
indicates that your query is still in progress, this function will block
until the light curve ZIP file becomes available. The maximum wait time
in seconds is set by maxtimeout and the refresh interval is set by
refresh.
To avoid the wait block, set download_data to False and the function
will write a pickle file to `~/.astrobase/lccs/query-[setid].pkl`
containing all the information necessary to retrieve these data files
later when the query is done. To do so, call the
`retrieve_dataset_files` with the path to this pickle file (it will be
returned).
outdir : str or None
If this is provided, sets the output directory of the downloaded dataset
files. If None, they will be downloaded to the current directory.
maxtimeout : float
The maximum time in seconds to wait for the LCC-Server to respond with a
result before timing out. You can use the `retrieve_dataset_files`
function to get results later as needed.
refresh : float
The time to wait in seconds before pinging the LCC-Server to see if a
search query has completed and dataset result files can be downloaded.
Returns
-------
tuple
Returns a tuple with the following elements::
(search result status dict,
search result CSV file path,
search result LC ZIP path)
'''
with open(file_to_upload) as infd:
xmq = infd.read()
# check the number of lines in the input
xmqlines = len(xmq.split('\n')[:-1])
if xmqlines > 5000:
LOGERROR('you have more than 5000 lines in the file to upload: %s' %
file_to_upload)
return None, None, None
# turn the input into a param dict
params = {'xmq':xmq,
'xmd':xmatch_dist_arcsec}
if collections:
params['collections'] = collections
if columns:
params['columns'] = columns
if filters:
params['filters'] = filters
if sortspec:
params['sortspec'] = json.dumps([sortspec])
if samplespec:
params['samplespec'] = int(samplespec)
if limitspec:
params['limitspec'] = int(limitspec)
params['visibility'] = result_visibility
params['emailwhendone'] = email_when_done
# we won't wait for the LC ZIP to complete if email_when_done = True
if email_when_done:
download_data = False
# check if we have an API key already
have_apikey, apikey, expires = check_existing_apikey(lcc_server)
# if not, get a new one
if not have_apikey:
apikey, expires = get_new_apikey(lcc_server)
# hit the server
api_url = '%s/api/xmatch' % lcc_server
searchresult = submit_post_searchquery(api_url, params, apikey)
# check the status of the search
status = searchresult[0]
# now we'll check if we want to download the data
if download_data:
if status == 'ok':
LOGINFO('query complete, downloading associated data...')
csv, lczip, pkl = retrieve_dataset_files(searchresult,
outdir=outdir,
apikey=apikey)
if pkl:
return searchresult[1], csv, lczip, pkl
else:
return searchresult[1], csv, lczip
elif status == 'background':
LOGINFO('query is not yet complete, '
'waiting up to %.1f minutes, '
'updates every %s seconds (hit Ctrl+C to cancel)...' %
(maxtimeout/60.0, refresh))
timewaited = 0.0
while timewaited < maxtimeout:
try:
time.sleep(refresh)
csv, lczip, pkl = retrieve_dataset_files(searchresult,
outdir=outdir,
apikey=apikey)
if (csv and os.path.exists(csv) and
lczip and os.path.exists(lczip)):
LOGINFO('all dataset products collected')
return searchresult[1], csv, lczip
timewaited = timewaited + refresh
except KeyboardInterrupt:
LOGWARNING('abandoned wait for downloading data')
return searchresult[1], None, None
LOGERROR('wait timed out.')
return searchresult[1], None, None
else:
LOGERROR('could not download the data for this query result')
return searchresult[1], None, None
else:
return searchresult[1], None, None | def function[xmatch_search, parameter[lcc_server, file_to_upload, xmatch_dist_arcsec, result_visibility, email_when_done, collections, columns, filters, sortspec, limitspec, samplespec, download_data, outdir, maxtimeout, refresh]]:
constant[This runs a cross-match search query.
Parameters
----------
lcc_server : str
This is the base URL of the LCC-Server to talk to. (e.g. for HAT, use:
https://data.hatsurveys.org)
file_to_upload : str
This is the path to a text file containing objectid, RA, declination
rows for the objects to cross-match against the LCC-Server
collections. This should follow the format of the following example::
# example object and coordinate list
# objectid ra dec
aaa 289.99698 44.99839
bbb 293.358 -23.206
ccc 294.197 +23.181
ddd 19 25 27.9129 +42 47 03.693
eee 19:25:27 -42:47:03.21
# .
# .
# .
# etc. lines starting with '#' will be ignored
# (max 5000 objects)
xmatch_dist_arcsec : float
This is the maximum distance in arcseconds to consider when
cross-matching objects in the uploaded file to the LCC-Server's
collections. The maximum allowed distance is 30 arcseconds. Multiple
matches to an uploaded object are possible and will be returned in order
of increasing distance grouped by input `objectid`.
result_visibility : {'private', 'unlisted', 'public'}
This sets the visibility of the dataset produced from the search
result::
'private' -> the dataset and its products are not visible or
accessible by any user other than the one that
created the dataset.
'unlisted' -> the dataset and its products are not visible in the
list of public datasets, but can be accessed if the
dataset URL is known
'public' -> the dataset and its products are visible in the list
of public datasets and can be accessed by anyone.
email_when_done : bool
If True, the LCC-Server will email you when the search is complete. This
will also set `download_data` to False. Using this requires an
LCC-Server account and an API key tied to that account.
collections : list of str or None
This is a list of LC collections to search in. If this is None, all
collections will be searched.
columns : list of str or None
This is a list of columns to return in the results. Matching objects'
object IDs, RAs, DECs, and links to light curve files will always be
returned so there is no need to specify these columns. If None, only
these columns will be returned: 'objectid', 'ra', 'decl', 'lcfname'
filters : str or None
This is an SQL-like string to use to filter on database columns in the
LCC-Server's collections. To see the columns available for a search,
visit the Collections tab in the LCC-Server's browser UI. The filter
operators allowed are::
lt -> less than
gt -> greater than
ge -> greater than or equal to
le -> less than or equal to
eq -> equal to
ne -> not equal to
ct -> contains text
isnull -> column value is null
notnull -> column value is not null
You may use the `and` and `or` operators between filter specifications
to chain them together logically.
Example filter strings::
"(propermotion gt 200.0) and (sdssr lt 11.0)"
"(dered_jmag_kmag gt 2.0) and (aep_000_stetsonj gt 10.0)"
"(gaia_status ct 'ok') and (propermotion gt 300.0)"
"(simbad_best_objtype ct 'RR') and (dered_sdssu_sdssg lt 0.5)"
sortspec : tuple of two strs or None
If not None, this should be a tuple of two items::
('column to sort by', 'asc|desc')
This sets the column to sort the results by. For cone_search, the
default column and sort order are 'dist_arcsec' and 'asc', meaning the
distance from the search center in ascending order.
samplespec : int or None
If this is an int, will indicate how many rows from the initial search
result will be uniformly random sampled and returned.
limitspec : int or None
If this is an int, will indicate how many rows from the initial search
result to return in total.
`sortspec`, `samplespec`, and `limitspec` are applied in this order:
sample -> sort -> limit
download_data : bool
This sets if the accompanying data from the search results will be
downloaded automatically. This includes the data table CSV, the dataset
pickle file, and a light curve ZIP file. Note that if the search service
indicates that your query is still in progress, this function will block
until the light curve ZIP file becomes available. The maximum wait time
in seconds is set by maxtimeout and the refresh interval is set by
refresh.
To avoid the wait block, set download_data to False and the function
will write a pickle file to `~/.astrobase/lccs/query-[setid].pkl`
containing all the information necessary to retrieve these data files
later when the query is done. To do so, call the
`retrieve_dataset_files` with the path to this pickle file (it will be
returned).
outdir : str or None
If this is provided, sets the output directory of the downloaded dataset
files. If None, they will be downloaded to the current directory.
maxtimeout : float
The maximum time in seconds to wait for the LCC-Server to respond with a
result before timing out. You can use the `retrieve_dataset_files`
function to get results later as needed.
refresh : float
The time to wait in seconds before pinging the LCC-Server to see if a
search query has completed and dataset result files can be downloaded.
Returns
-------
tuple
Returns a tuple with the following elements::
(search result status dict,
search result CSV file path,
search result LC ZIP path)
]
with call[name[open], parameter[name[file_to_upload]]] begin[:]
variable[xmq] assign[=] call[name[infd].read, parameter[]]
variable[xmqlines] assign[=] call[name[len], parameter[call[call[name[xmq].split, parameter[constant[
]]]][<ast.Slice object at 0x7da1b007c4f0>]]]
if compare[name[xmqlines] greater[>] constant[5000]] begin[:]
call[name[LOGERROR], parameter[binary_operation[constant[you have more than 5000 lines in the file to upload: %s] <ast.Mod object at 0x7da2590d6920> name[file_to_upload]]]]
return[tuple[[<ast.Constant object at 0x7da1b007cdf0>, <ast.Constant object at 0x7da1b007c790>, <ast.Constant object at 0x7da1b007c0a0>]]]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b007eb00>, <ast.Constant object at 0x7da1b007ca30>], [<ast.Name object at 0x7da1b007ca00>, <ast.Name object at 0x7da1b007ca90>]]
if name[collections] begin[:]
call[name[params]][constant[collections]] assign[=] name[collections]
if name[columns] begin[:]
call[name[params]][constant[columns]] assign[=] name[columns]
if name[filters] begin[:]
call[name[params]][constant[filters]] assign[=] name[filters]
if name[sortspec] begin[:]
call[name[params]][constant[sortspec]] assign[=] call[name[json].dumps, parameter[list[[<ast.Name object at 0x7da1b007d540>]]]]
if name[samplespec] begin[:]
call[name[params]][constant[samplespec]] assign[=] call[name[int], parameter[name[samplespec]]]
if name[limitspec] begin[:]
call[name[params]][constant[limitspec]] assign[=] call[name[int], parameter[name[limitspec]]]
call[name[params]][constant[visibility]] assign[=] name[result_visibility]
call[name[params]][constant[emailwhendone]] assign[=] name[email_when_done]
if name[email_when_done] begin[:]
variable[download_data] assign[=] constant[False]
<ast.Tuple object at 0x7da1b007cb50> assign[=] call[name[check_existing_apikey], parameter[name[lcc_server]]]
if <ast.UnaryOp object at 0x7da1b007c8b0> begin[:]
<ast.Tuple object at 0x7da1b007c160> assign[=] call[name[get_new_apikey], parameter[name[lcc_server]]]
variable[api_url] assign[=] binary_operation[constant[%s/api/xmatch] <ast.Mod object at 0x7da2590d6920> name[lcc_server]]
variable[searchresult] assign[=] call[name[submit_post_searchquery], parameter[name[api_url], name[params], name[apikey]]]
variable[status] assign[=] call[name[searchresult]][constant[0]]
if name[download_data] begin[:]
if compare[name[status] equal[==] constant[ok]] begin[:]
call[name[LOGINFO], parameter[constant[query complete, downloading associated data...]]]
<ast.Tuple object at 0x7da1b007f9d0> assign[=] call[name[retrieve_dataset_files], parameter[name[searchresult]]]
if name[pkl] begin[:]
return[tuple[[<ast.Subscript object at 0x7da1b00dac20>, <ast.Name object at 0x7da1b00d96f0>, <ast.Name object at 0x7da1b00dba00>, <ast.Name object at 0x7da1b00d9300>]]] | keyword[def] identifier[xmatch_search] ( identifier[lcc_server] ,
identifier[file_to_upload] ,
identifier[xmatch_dist_arcsec] = literal[int] ,
identifier[result_visibility] = literal[string] ,
identifier[email_when_done] = keyword[False] ,
identifier[collections] = keyword[None] ,
identifier[columns] = keyword[None] ,
identifier[filters] = keyword[None] ,
identifier[sortspec] = keyword[None] ,
identifier[limitspec] = keyword[None] ,
identifier[samplespec] = keyword[None] ,
identifier[download_data] = keyword[True] ,
identifier[outdir] = keyword[None] ,
identifier[maxtimeout] = literal[int] ,
identifier[refresh] = literal[int] ):
literal[string]
keyword[with] identifier[open] ( identifier[file_to_upload] ) keyword[as] identifier[infd] :
identifier[xmq] = identifier[infd] . identifier[read] ()
identifier[xmqlines] = identifier[len] ( identifier[xmq] . identifier[split] ( literal[string] )[:- literal[int] ])
keyword[if] identifier[xmqlines] > literal[int] :
identifier[LOGERROR] ( literal[string] %
identifier[file_to_upload] )
keyword[return] keyword[None] , keyword[None] , keyword[None]
identifier[params] ={ literal[string] : identifier[xmq] ,
literal[string] : identifier[xmatch_dist_arcsec] }
keyword[if] identifier[collections] :
identifier[params] [ literal[string] ]= identifier[collections]
keyword[if] identifier[columns] :
identifier[params] [ literal[string] ]= identifier[columns]
keyword[if] identifier[filters] :
identifier[params] [ literal[string] ]= identifier[filters]
keyword[if] identifier[sortspec] :
identifier[params] [ literal[string] ]= identifier[json] . identifier[dumps] ([ identifier[sortspec] ])
keyword[if] identifier[samplespec] :
identifier[params] [ literal[string] ]= identifier[int] ( identifier[samplespec] )
keyword[if] identifier[limitspec] :
identifier[params] [ literal[string] ]= identifier[int] ( identifier[limitspec] )
identifier[params] [ literal[string] ]= identifier[result_visibility]
identifier[params] [ literal[string] ]= identifier[email_when_done]
keyword[if] identifier[email_when_done] :
identifier[download_data] = keyword[False]
identifier[have_apikey] , identifier[apikey] , identifier[expires] = identifier[check_existing_apikey] ( identifier[lcc_server] )
keyword[if] keyword[not] identifier[have_apikey] :
identifier[apikey] , identifier[expires] = identifier[get_new_apikey] ( identifier[lcc_server] )
identifier[api_url] = literal[string] % identifier[lcc_server]
identifier[searchresult] = identifier[submit_post_searchquery] ( identifier[api_url] , identifier[params] , identifier[apikey] )
identifier[status] = identifier[searchresult] [ literal[int] ]
keyword[if] identifier[download_data] :
keyword[if] identifier[status] == literal[string] :
identifier[LOGINFO] ( literal[string] )
identifier[csv] , identifier[lczip] , identifier[pkl] = identifier[retrieve_dataset_files] ( identifier[searchresult] ,
identifier[outdir] = identifier[outdir] ,
identifier[apikey] = identifier[apikey] )
keyword[if] identifier[pkl] :
keyword[return] identifier[searchresult] [ literal[int] ], identifier[csv] , identifier[lczip] , identifier[pkl]
keyword[else] :
keyword[return] identifier[searchresult] [ literal[int] ], identifier[csv] , identifier[lczip]
keyword[elif] identifier[status] == literal[string] :
identifier[LOGINFO] ( literal[string]
literal[string]
literal[string] %
( identifier[maxtimeout] / literal[int] , identifier[refresh] ))
identifier[timewaited] = literal[int]
keyword[while] identifier[timewaited] < identifier[maxtimeout] :
keyword[try] :
identifier[time] . identifier[sleep] ( identifier[refresh] )
identifier[csv] , identifier[lczip] , identifier[pkl] = identifier[retrieve_dataset_files] ( identifier[searchresult] ,
identifier[outdir] = identifier[outdir] ,
identifier[apikey] = identifier[apikey] )
keyword[if] ( identifier[csv] keyword[and] identifier[os] . identifier[path] . identifier[exists] ( identifier[csv] ) keyword[and]
identifier[lczip] keyword[and] identifier[os] . identifier[path] . identifier[exists] ( identifier[lczip] )):
identifier[LOGINFO] ( literal[string] )
keyword[return] identifier[searchresult] [ literal[int] ], identifier[csv] , identifier[lczip]
identifier[timewaited] = identifier[timewaited] + identifier[refresh]
keyword[except] identifier[KeyboardInterrupt] :
identifier[LOGWARNING] ( literal[string] )
keyword[return] identifier[searchresult] [ literal[int] ], keyword[None] , keyword[None]
identifier[LOGERROR] ( literal[string] )
keyword[return] identifier[searchresult] [ literal[int] ], keyword[None] , keyword[None]
keyword[else] :
identifier[LOGERROR] ( literal[string] )
keyword[return] identifier[searchresult] [ literal[int] ], keyword[None] , keyword[None]
keyword[else] :
keyword[return] identifier[searchresult] [ literal[int] ], keyword[None] , keyword[None] | def xmatch_search(lcc_server, file_to_upload, xmatch_dist_arcsec=3.0, result_visibility='unlisted', email_when_done=False, collections=None, columns=None, filters=None, sortspec=None, limitspec=None, samplespec=None, download_data=True, outdir=None, maxtimeout=300.0, refresh=15.0):
"""This runs a cross-match search query.
Parameters
----------
lcc_server : str
This is the base URL of the LCC-Server to talk to. (e.g. for HAT, use:
https://data.hatsurveys.org)
file_to_upload : str
This is the path to a text file containing objectid, RA, declination
rows for the objects to cross-match against the LCC-Server
collections. This should follow the format of the following example::
# example object and coordinate list
# objectid ra dec
aaa 289.99698 44.99839
bbb 293.358 -23.206
ccc 294.197 +23.181
ddd 19 25 27.9129 +42 47 03.693
eee 19:25:27 -42:47:03.21
# .
# .
# .
# etc. lines starting with '#' will be ignored
# (max 5000 objects)
xmatch_dist_arcsec : float
This is the maximum distance in arcseconds to consider when
cross-matching objects in the uploaded file to the LCC-Server's
collections. The maximum allowed distance is 30 arcseconds. Multiple
matches to an uploaded object are possible and will be returned in order
of increasing distance grouped by input `objectid`.
result_visibility : {'private', 'unlisted', 'public'}
This sets the visibility of the dataset produced from the search
result::
'private' -> the dataset and its products are not visible or
accessible by any user other than the one that
created the dataset.
'unlisted' -> the dataset and its products are not visible in the
list of public datasets, but can be accessed if the
dataset URL is known
'public' -> the dataset and its products are visible in the list
of public datasets and can be accessed by anyone.
email_when_done : bool
If True, the LCC-Server will email you when the search is complete. This
will also set `download_data` to False. Using this requires an
LCC-Server account and an API key tied to that account.
collections : list of str or None
This is a list of LC collections to search in. If this is None, all
collections will be searched.
columns : list of str or None
This is a list of columns to return in the results. Matching objects'
object IDs, RAs, DECs, and links to light curve files will always be
returned so there is no need to specify these columns. If None, only
these columns will be returned: 'objectid', 'ra', 'decl', 'lcfname'
filters : str or None
This is an SQL-like string to use to filter on database columns in the
LCC-Server's collections. To see the columns available for a search,
visit the Collections tab in the LCC-Server's browser UI. The filter
operators allowed are::
lt -> less than
gt -> greater than
ge -> greater than or equal to
le -> less than or equal to
eq -> equal to
ne -> not equal to
ct -> contains text
isnull -> column value is null
notnull -> column value is not null
You may use the `and` and `or` operators between filter specifications
to chain them together logically.
Example filter strings::
"(propermotion gt 200.0) and (sdssr lt 11.0)"
"(dered_jmag_kmag gt 2.0) and (aep_000_stetsonj gt 10.0)"
"(gaia_status ct 'ok') and (propermotion gt 300.0)"
"(simbad_best_objtype ct 'RR') and (dered_sdssu_sdssg lt 0.5)"
sortspec : tuple of two strs or None
If not None, this should be a tuple of two items::
('column to sort by', 'asc|desc')
This sets the column to sort the results by. For cone_search, the
default column and sort order are 'dist_arcsec' and 'asc', meaning the
distance from the search center in ascending order.
samplespec : int or None
If this is an int, will indicate how many rows from the initial search
result will be uniformly random sampled and returned.
limitspec : int or None
If this is an int, will indicate how many rows from the initial search
result to return in total.
`sortspec`, `samplespec`, and `limitspec` are applied in this order:
sample -> sort -> limit
download_data : bool
This sets if the accompanying data from the search results will be
downloaded automatically. This includes the data table CSV, the dataset
pickle file, and a light curve ZIP file. Note that if the search service
indicates that your query is still in progress, this function will block
until the light curve ZIP file becomes available. The maximum wait time
in seconds is set by maxtimeout and the refresh interval is set by
refresh.
To avoid the wait block, set download_data to False and the function
will write a pickle file to `~/.astrobase/lccs/query-[setid].pkl`
containing all the information necessary to retrieve these data files
later when the query is done. To do so, call the
`retrieve_dataset_files` with the path to this pickle file (it will be
returned).
outdir : str or None
If this is provided, sets the output directory of the downloaded dataset
files. If None, they will be downloaded to the current directory.
maxtimeout : float
The maximum time in seconds to wait for the LCC-Server to respond with a
result before timing out. You can use the `retrieve_dataset_files`
function to get results later as needed.
refresh : float
The time to wait in seconds before pinging the LCC-Server to see if a
search query has completed and dataset result files can be downloaded.
Returns
-------
tuple
Returns a tuple with the following elements::
(search result status dict,
search result CSV file path,
search result LC ZIP path)
"""
with open(file_to_upload) as infd:
xmq = infd.read() # depends on [control=['with'], data=['infd']]
# check the number of lines in the input
xmqlines = len(xmq.split('\n')[:-1])
if xmqlines > 5000:
LOGERROR('you have more than 5000 lines in the file to upload: %s' % file_to_upload)
return (None, None, None) # depends on [control=['if'], data=[]]
# turn the input into a param dict
params = {'xmq': xmq, 'xmd': xmatch_dist_arcsec}
if collections:
params['collections'] = collections # depends on [control=['if'], data=[]]
if columns:
params['columns'] = columns # depends on [control=['if'], data=[]]
if filters:
params['filters'] = filters # depends on [control=['if'], data=[]]
if sortspec:
params['sortspec'] = json.dumps([sortspec]) # depends on [control=['if'], data=[]]
if samplespec:
params['samplespec'] = int(samplespec) # depends on [control=['if'], data=[]]
if limitspec:
params['limitspec'] = int(limitspec) # depends on [control=['if'], data=[]]
params['visibility'] = result_visibility
params['emailwhendone'] = email_when_done
# we won't wait for the LC ZIP to complete if email_when_done = True
if email_when_done:
download_data = False # depends on [control=['if'], data=[]]
# check if we have an API key already
(have_apikey, apikey, expires) = check_existing_apikey(lcc_server)
# if not, get a new one
if not have_apikey:
(apikey, expires) = get_new_apikey(lcc_server) # depends on [control=['if'], data=[]]
# hit the server
api_url = '%s/api/xmatch' % lcc_server
searchresult = submit_post_searchquery(api_url, params, apikey)
# check the status of the search
status = searchresult[0]
# now we'll check if we want to download the data
if download_data:
if status == 'ok':
LOGINFO('query complete, downloading associated data...')
(csv, lczip, pkl) = retrieve_dataset_files(searchresult, outdir=outdir, apikey=apikey)
if pkl:
return (searchresult[1], csv, lczip, pkl) # depends on [control=['if'], data=[]]
else:
return (searchresult[1], csv, lczip) # depends on [control=['if'], data=[]]
elif status == 'background':
LOGINFO('query is not yet complete, waiting up to %.1f minutes, updates every %s seconds (hit Ctrl+C to cancel)...' % (maxtimeout / 60.0, refresh))
timewaited = 0.0
while timewaited < maxtimeout:
try:
time.sleep(refresh)
(csv, lczip, pkl) = retrieve_dataset_files(searchresult, outdir=outdir, apikey=apikey)
if csv and os.path.exists(csv) and lczip and os.path.exists(lczip):
LOGINFO('all dataset products collected')
return (searchresult[1], csv, lczip) # depends on [control=['if'], data=[]]
timewaited = timewaited + refresh # depends on [control=['try'], data=[]]
except KeyboardInterrupt:
LOGWARNING('abandoned wait for downloading data')
return (searchresult[1], None, None) # depends on [control=['except'], data=[]] # depends on [control=['while'], data=['timewaited']]
LOGERROR('wait timed out.')
return (searchresult[1], None, None) # depends on [control=['if'], data=[]]
else:
LOGERROR('could not download the data for this query result')
return (searchresult[1], None, None) # depends on [control=['if'], data=[]]
else:
return (searchresult[1], None, None) |
def capture_dash_in_url_name(self, node):
"""
Capture dash in URL name
"""
for keyword in node.keywords:
if keyword.arg == 'name' and '-' in keyword.value.s:
return DJ04(
lineno=node.lineno,
col=node.col_offset,
) | def function[capture_dash_in_url_name, parameter[self, node]]:
constant[
Capture dash in URL name
]
for taget[name[keyword]] in starred[name[node].keywords] begin[:]
if <ast.BoolOp object at 0x7da1b0780bb0> begin[:]
return[call[name[DJ04], parameter[]]] | keyword[def] identifier[capture_dash_in_url_name] ( identifier[self] , identifier[node] ):
literal[string]
keyword[for] identifier[keyword] keyword[in] identifier[node] . identifier[keywords] :
keyword[if] identifier[keyword] . identifier[arg] == literal[string] keyword[and] literal[string] keyword[in] identifier[keyword] . identifier[value] . identifier[s] :
keyword[return] identifier[DJ04] (
identifier[lineno] = identifier[node] . identifier[lineno] ,
identifier[col] = identifier[node] . identifier[col_offset] ,
) | def capture_dash_in_url_name(self, node):
"""
Capture dash in URL name
"""
for keyword in node.keywords:
if keyword.arg == 'name' and '-' in keyword.value.s:
return DJ04(lineno=node.lineno, col=node.col_offset) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['keyword']] |
def tmpdir(prefix='npythy_tempdir_', delete=True):
'''
tmpdir() creates a temporary directory and yields its path. At python exit, the directory and
all of its contents are recursively deleted (so long as the the normal python exit process is
allowed to call the atexit handlers).
tmpdir(prefix) uses the given prefix in the tempfile.mkdtemp() call.
The option delete may be set to False to specify that the tempdir should not be deleted on exit.
'''
path = tempfile.mkdtemp(prefix=prefix)
if not os.path.isdir(path): raise ValueError('Could not find or create temp directory')
if delete: atexit.register(shutil.rmtree, path)
return path | def function[tmpdir, parameter[prefix, delete]]:
constant[
tmpdir() creates a temporary directory and yields its path. At python exit, the directory and
all of its contents are recursively deleted (so long as the the normal python exit process is
allowed to call the atexit handlers).
tmpdir(prefix) uses the given prefix in the tempfile.mkdtemp() call.
The option delete may be set to False to specify that the tempdir should not be deleted on exit.
]
variable[path] assign[=] call[name[tempfile].mkdtemp, parameter[]]
if <ast.UnaryOp object at 0x7da18eb54190> begin[:]
<ast.Raise object at 0x7da18eb56620>
if name[delete] begin[:]
call[name[atexit].register, parameter[name[shutil].rmtree, name[path]]]
return[name[path]] | keyword[def] identifier[tmpdir] ( identifier[prefix] = literal[string] , identifier[delete] = keyword[True] ):
literal[string]
identifier[path] = identifier[tempfile] . identifier[mkdtemp] ( identifier[prefix] = identifier[prefix] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[path] ): keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[delete] : identifier[atexit] . identifier[register] ( identifier[shutil] . identifier[rmtree] , identifier[path] )
keyword[return] identifier[path] | def tmpdir(prefix='npythy_tempdir_', delete=True):
"""
tmpdir() creates a temporary directory and yields its path. At python exit, the directory and
all of its contents are recursively deleted (so long as the the normal python exit process is
allowed to call the atexit handlers).
tmpdir(prefix) uses the given prefix in the tempfile.mkdtemp() call.
The option delete may be set to False to specify that the tempdir should not be deleted on exit.
"""
path = tempfile.mkdtemp(prefix=prefix)
if not os.path.isdir(path):
raise ValueError('Could not find or create temp directory') # depends on [control=['if'], data=[]]
if delete:
atexit.register(shutil.rmtree, path) # depends on [control=['if'], data=[]]
return path |
def sample_stats_prior_to_xarray(self):
"""Extract sample_stats_prior from prior."""
prior = self.prior
prior_model = self.prior_model
data = get_sample_stats_stan3(prior, model=prior_model)
return dict_to_dataset(data, library=self.stan, coords=self.coords, dims=self.dims) | def function[sample_stats_prior_to_xarray, parameter[self]]:
constant[Extract sample_stats_prior from prior.]
variable[prior] assign[=] name[self].prior
variable[prior_model] assign[=] name[self].prior_model
variable[data] assign[=] call[name[get_sample_stats_stan3], parameter[name[prior]]]
return[call[name[dict_to_dataset], parameter[name[data]]]] | keyword[def] identifier[sample_stats_prior_to_xarray] ( identifier[self] ):
literal[string]
identifier[prior] = identifier[self] . identifier[prior]
identifier[prior_model] = identifier[self] . identifier[prior_model]
identifier[data] = identifier[get_sample_stats_stan3] ( identifier[prior] , identifier[model] = identifier[prior_model] )
keyword[return] identifier[dict_to_dataset] ( identifier[data] , identifier[library] = identifier[self] . identifier[stan] , identifier[coords] = identifier[self] . identifier[coords] , identifier[dims] = identifier[self] . identifier[dims] ) | def sample_stats_prior_to_xarray(self):
"""Extract sample_stats_prior from prior."""
prior = self.prior
prior_model = self.prior_model
data = get_sample_stats_stan3(prior, model=prior_model)
return dict_to_dataset(data, library=self.stan, coords=self.coords, dims=self.dims) |
def sub_bytes(state):
"""
Transformation in the Cipher that processes the State using a nonlinear
byte substitution table (S-box) that operates on each of the State bytes
independently.
"""
state = state.reshape(4, 32)
return fcat(
subword(state[0]),
subword(state[1]),
subword(state[2]),
subword(state[3]),
) | def function[sub_bytes, parameter[state]]:
constant[
Transformation in the Cipher that processes the State using a nonlinear
byte substitution table (S-box) that operates on each of the State bytes
independently.
]
variable[state] assign[=] call[name[state].reshape, parameter[constant[4], constant[32]]]
return[call[name[fcat], parameter[call[name[subword], parameter[call[name[state]][constant[0]]]], call[name[subword], parameter[call[name[state]][constant[1]]]], call[name[subword], parameter[call[name[state]][constant[2]]]], call[name[subword], parameter[call[name[state]][constant[3]]]]]]] | keyword[def] identifier[sub_bytes] ( identifier[state] ):
literal[string]
identifier[state] = identifier[state] . identifier[reshape] ( literal[int] , literal[int] )
keyword[return] identifier[fcat] (
identifier[subword] ( identifier[state] [ literal[int] ]),
identifier[subword] ( identifier[state] [ literal[int] ]),
identifier[subword] ( identifier[state] [ literal[int] ]),
identifier[subword] ( identifier[state] [ literal[int] ]),
) | def sub_bytes(state):
"""
Transformation in the Cipher that processes the State using a non\xadlinear
byte substitution table (S-box) that operates on each of the State bytes
independently.
"""
state = state.reshape(4, 32)
return fcat(subword(state[0]), subword(state[1]), subword(state[2]), subword(state[3])) |
def rollback(self):
"""
Rolls back changes to this database.
"""
with self.native(writeAccess=True) as conn:
return self._rollback(conn) | def function[rollback, parameter[self]]:
constant[
Rolls back changes to this database.
]
with call[name[self].native, parameter[]] begin[:]
return[call[name[self]._rollback, parameter[name[conn]]]] | keyword[def] identifier[rollback] ( identifier[self] ):
literal[string]
keyword[with] identifier[self] . identifier[native] ( identifier[writeAccess] = keyword[True] ) keyword[as] identifier[conn] :
keyword[return] identifier[self] . identifier[_rollback] ( identifier[conn] ) | def rollback(self):
"""
Rolls back changes to this database.
"""
with self.native(writeAccess=True) as conn:
return self._rollback(conn) # depends on [control=['with'], data=['conn']] |
def plot_normal(x=None, mean_x=None,std_x=None,color='red',linewidth=2,alpha=1,bins=20,xlim=False,plot_mean=True,plot_std=False,plot_2std=True,figure=None,annotate=True,histogram=True):
"""
plot a fit of a normal distribution to the data in x.
"""
import pylab
if figure is None:
figure=pylab.figure()
if mean_x is None:
#fit maximum likelihood Normal distribution mean to samples X
mean_x = x.mean() #sample mean
if std_x is None:
#fit maximum likelihood Normal distribution standard deviation to samples X
std_x = x.std() #sample standard deviation
xvals=np.arange(mean_x-5*std_x,mean_x+5*std_x,.001)
yvals=st.norm.pdf(xvals,mean_x,std_x)
#plot normal distribution:
ax = pylab.plot(xvals,yvals,color=color,linewidth=linewidth,alpha=alpha)
if x is not None and histogram:
#plot histogram of x-values
pylab.hist(x,bins,normed=True)
if plot_mean:
#evaluate distribution at the mean:
max_cdf=st.norm.pdf(mean_x,mean_x,std_x)
pylab.plot([mean_x,mean_x],[0,max_cdf],color=color,linewidth=linewidth,alpha=alpha,linestyle="--")
if annotate:
pylab.annotate('$\mu$', xy=(mean_x+0.6*std_x, 1.0*max_cdf),
horizontalalignment='center', verticalalignment='center',fontsize=15,color=color)
if plot_std:#plot mean +- 1*standard deviation (64% interval)
std_cdf=st.norm.pdf(mean_x+std_x,mean_x,std_x)
pylab.plot([mean_x+std_x,mean_x+std_x],[0,std_cdf],color=color,linewidth=linewidth,alpha=alpha,linestyle="--")
pylab.plot([mean_x-std_x,mean_x-std_x],[0,std_cdf],color=color,linewidth=linewidth,alpha=alpha,linestyle="--")
if annotate:
pylab.annotate('$\mu+\sigma$', xy=(mean_x+1.6*std_x, 1.5*std_cdf),
horizontalalignment='center', verticalalignment='center',fontsize=15,color=color)
if plot_2std:#plot mean +- 2*standard deviations (95% interval)
std2_cdf=st.norm.pdf(mean_x+2*std_x,mean_x,std_x)
pylab.plot([mean_x+2*std_x,mean_x+2*std_x],[0,std2_cdf],color=color,linewidth=linewidth,alpha=alpha,linestyle="--")
pylab.plot([mean_x-2*std_x,mean_x-2*std_x],[0,std2_cdf],color=color,linewidth=linewidth,alpha=alpha,linestyle="--")
if annotate:
pylab.annotate('$\mu+2\sigma$', xy=(mean_x+2.6*std_x, 1.5*std2_cdf),
horizontalalignment='center', verticalalignment='center',fontsize=15,color=color)
if xlim: #cut of unused space on y-axis
pylab.xlim([mean_x-4*std_x,mean_x+4*std_x])
return figure | def function[plot_normal, parameter[x, mean_x, std_x, color, linewidth, alpha, bins, xlim, plot_mean, plot_std, plot_2std, figure, annotate, histogram]]:
constant[
plot a fit of a normal distribution to the data in x.
]
import module[pylab]
if compare[name[figure] is constant[None]] begin[:]
variable[figure] assign[=] call[name[pylab].figure, parameter[]]
if compare[name[mean_x] is constant[None]] begin[:]
variable[mean_x] assign[=] call[name[x].mean, parameter[]]
if compare[name[std_x] is constant[None]] begin[:]
variable[std_x] assign[=] call[name[x].std, parameter[]]
variable[xvals] assign[=] call[name[np].arange, parameter[binary_operation[name[mean_x] - binary_operation[constant[5] * name[std_x]]], binary_operation[name[mean_x] + binary_operation[constant[5] * name[std_x]]], constant[0.001]]]
variable[yvals] assign[=] call[name[st].norm.pdf, parameter[name[xvals], name[mean_x], name[std_x]]]
variable[ax] assign[=] call[name[pylab].plot, parameter[name[xvals], name[yvals]]]
if <ast.BoolOp object at 0x7da18f813430> begin[:]
call[name[pylab].hist, parameter[name[x], name[bins]]]
if name[plot_mean] begin[:]
variable[max_cdf] assign[=] call[name[st].norm.pdf, parameter[name[mean_x], name[mean_x], name[std_x]]]
call[name[pylab].plot, parameter[list[[<ast.Name object at 0x7da18f813070>, <ast.Name object at 0x7da18f813d30>]], list[[<ast.Constant object at 0x7da18f813280>, <ast.Name object at 0x7da18f8123e0>]]]]
if name[annotate] begin[:]
call[name[pylab].annotate, parameter[constant[$\mu$]]]
if name[plot_std] begin[:]
variable[std_cdf] assign[=] call[name[st].norm.pdf, parameter[binary_operation[name[mean_x] + name[std_x]], name[mean_x], name[std_x]]]
call[name[pylab].plot, parameter[list[[<ast.BinOp object at 0x7da18f810370>, <ast.BinOp object at 0x7da18f812b00>]], list[[<ast.Constant object at 0x7da18f811780>, <ast.Name object at 0x7da18f811cc0>]]]]
call[name[pylab].plot, parameter[list[[<ast.BinOp object at 0x7da18f812530>, <ast.BinOp object at 0x7da18f812650>]], list[[<ast.Constant object at 0x7da18f8128f0>, <ast.Name object at 0x7da18f811a20>]]]]
if name[annotate] begin[:]
call[name[pylab].annotate, parameter[constant[$\mu+\sigma$]]]
if name[plot_2std] begin[:]
variable[std2_cdf] assign[=] call[name[st].norm.pdf, parameter[binary_operation[name[mean_x] + binary_operation[constant[2] * name[std_x]]], name[mean_x], name[std_x]]]
call[name[pylab].plot, parameter[list[[<ast.BinOp object at 0x7da18f812050>, <ast.BinOp object at 0x7da18f813f40>]], list[[<ast.Constant object at 0x7da18f810610>, <ast.Name object at 0x7da18f8117e0>]]]]
call[name[pylab].plot, parameter[list[[<ast.BinOp object at 0x7da18f811480>, <ast.BinOp object at 0x7da18f811000>]], list[[<ast.Constant object at 0x7da18f812080>, <ast.Name object at 0x7da18f8107f0>]]]]
if name[annotate] begin[:]
call[name[pylab].annotate, parameter[constant[$\mu+2\sigma$]]]
if name[xlim] begin[:]
call[name[pylab].xlim, parameter[list[[<ast.BinOp object at 0x7da20c7c9e40>, <ast.BinOp object at 0x7da20c7cada0>]]]]
return[name[figure]] | keyword[def] identifier[plot_normal] ( identifier[x] = keyword[None] , identifier[mean_x] = keyword[None] , identifier[std_x] = keyword[None] , identifier[color] = literal[string] , identifier[linewidth] = literal[int] , identifier[alpha] = literal[int] , identifier[bins] = literal[int] , identifier[xlim] = keyword[False] , identifier[plot_mean] = keyword[True] , identifier[plot_std] = keyword[False] , identifier[plot_2std] = keyword[True] , identifier[figure] = keyword[None] , identifier[annotate] = keyword[True] , identifier[histogram] = keyword[True] ):
literal[string]
keyword[import] identifier[pylab]
keyword[if] identifier[figure] keyword[is] keyword[None] :
identifier[figure] = identifier[pylab] . identifier[figure] ()
keyword[if] identifier[mean_x] keyword[is] keyword[None] :
identifier[mean_x] = identifier[x] . identifier[mean] ()
keyword[if] identifier[std_x] keyword[is] keyword[None] :
identifier[std_x] = identifier[x] . identifier[std] ()
identifier[xvals] = identifier[np] . identifier[arange] ( identifier[mean_x] - literal[int] * identifier[std_x] , identifier[mean_x] + literal[int] * identifier[std_x] , literal[int] )
identifier[yvals] = identifier[st] . identifier[norm] . identifier[pdf] ( identifier[xvals] , identifier[mean_x] , identifier[std_x] )
identifier[ax] = identifier[pylab] . identifier[plot] ( identifier[xvals] , identifier[yvals] , identifier[color] = identifier[color] , identifier[linewidth] = identifier[linewidth] , identifier[alpha] = identifier[alpha] )
keyword[if] identifier[x] keyword[is] keyword[not] keyword[None] keyword[and] identifier[histogram] :
identifier[pylab] . identifier[hist] ( identifier[x] , identifier[bins] , identifier[normed] = keyword[True] )
keyword[if] identifier[plot_mean] :
identifier[max_cdf] = identifier[st] . identifier[norm] . identifier[pdf] ( identifier[mean_x] , identifier[mean_x] , identifier[std_x] )
identifier[pylab] . identifier[plot] ([ identifier[mean_x] , identifier[mean_x] ],[ literal[int] , identifier[max_cdf] ], identifier[color] = identifier[color] , identifier[linewidth] = identifier[linewidth] , identifier[alpha] = identifier[alpha] , identifier[linestyle] = literal[string] )
keyword[if] identifier[annotate] :
identifier[pylab] . identifier[annotate] ( literal[string] , identifier[xy] =( identifier[mean_x] + literal[int] * identifier[std_x] , literal[int] * identifier[max_cdf] ),
identifier[horizontalalignment] = literal[string] , identifier[verticalalignment] = literal[string] , identifier[fontsize] = literal[int] , identifier[color] = identifier[color] )
keyword[if] identifier[plot_std] :
identifier[std_cdf] = identifier[st] . identifier[norm] . identifier[pdf] ( identifier[mean_x] + identifier[std_x] , identifier[mean_x] , identifier[std_x] )
identifier[pylab] . identifier[plot] ([ identifier[mean_x] + identifier[std_x] , identifier[mean_x] + identifier[std_x] ],[ literal[int] , identifier[std_cdf] ], identifier[color] = identifier[color] , identifier[linewidth] = identifier[linewidth] , identifier[alpha] = identifier[alpha] , identifier[linestyle] = literal[string] )
identifier[pylab] . identifier[plot] ([ identifier[mean_x] - identifier[std_x] , identifier[mean_x] - identifier[std_x] ],[ literal[int] , identifier[std_cdf] ], identifier[color] = identifier[color] , identifier[linewidth] = identifier[linewidth] , identifier[alpha] = identifier[alpha] , identifier[linestyle] = literal[string] )
keyword[if] identifier[annotate] :
identifier[pylab] . identifier[annotate] ( literal[string] , identifier[xy] =( identifier[mean_x] + literal[int] * identifier[std_x] , literal[int] * identifier[std_cdf] ),
identifier[horizontalalignment] = literal[string] , identifier[verticalalignment] = literal[string] , identifier[fontsize] = literal[int] , identifier[color] = identifier[color] )
keyword[if] identifier[plot_2std] :
identifier[std2_cdf] = identifier[st] . identifier[norm] . identifier[pdf] ( identifier[mean_x] + literal[int] * identifier[std_x] , identifier[mean_x] , identifier[std_x] )
identifier[pylab] . identifier[plot] ([ identifier[mean_x] + literal[int] * identifier[std_x] , identifier[mean_x] + literal[int] * identifier[std_x] ],[ literal[int] , identifier[std2_cdf] ], identifier[color] = identifier[color] , identifier[linewidth] = identifier[linewidth] , identifier[alpha] = identifier[alpha] , identifier[linestyle] = literal[string] )
identifier[pylab] . identifier[plot] ([ identifier[mean_x] - literal[int] * identifier[std_x] , identifier[mean_x] - literal[int] * identifier[std_x] ],[ literal[int] , identifier[std2_cdf] ], identifier[color] = identifier[color] , identifier[linewidth] = identifier[linewidth] , identifier[alpha] = identifier[alpha] , identifier[linestyle] = literal[string] )
keyword[if] identifier[annotate] :
identifier[pylab] . identifier[annotate] ( literal[string] , identifier[xy] =( identifier[mean_x] + literal[int] * identifier[std_x] , literal[int] * identifier[std2_cdf] ),
identifier[horizontalalignment] = literal[string] , identifier[verticalalignment] = literal[string] , identifier[fontsize] = literal[int] , identifier[color] = identifier[color] )
keyword[if] identifier[xlim] :
identifier[pylab] . identifier[xlim] ([ identifier[mean_x] - literal[int] * identifier[std_x] , identifier[mean_x] + literal[int] * identifier[std_x] ])
keyword[return] identifier[figure] | def plot_normal(x=None, mean_x=None, std_x=None, color='red', linewidth=2, alpha=1, bins=20, xlim=False, plot_mean=True, plot_std=False, plot_2std=True, figure=None, annotate=True, histogram=True):
"""
plot a fit of a normal distribution to the data in x.
"""
import pylab
if figure is None:
figure = pylab.figure() # depends on [control=['if'], data=['figure']]
if mean_x is None:
#fit maximum likelihood Normal distribution mean to samples X
mean_x = x.mean() #sample mean # depends on [control=['if'], data=['mean_x']]
if std_x is None:
#fit maximum likelihood Normal distribution standard deviation to samples X
std_x = x.std() #sample standard deviation # depends on [control=['if'], data=['std_x']]
xvals = np.arange(mean_x - 5 * std_x, mean_x + 5 * std_x, 0.001)
yvals = st.norm.pdf(xvals, mean_x, std_x)
#plot normal distribution:
ax = pylab.plot(xvals, yvals, color=color, linewidth=linewidth, alpha=alpha)
if x is not None and histogram:
#plot histogram of x-values
pylab.hist(x, bins, normed=True) # depends on [control=['if'], data=[]]
if plot_mean:
#evaluate distribution at the mean:
max_cdf = st.norm.pdf(mean_x, mean_x, std_x)
pylab.plot([mean_x, mean_x], [0, max_cdf], color=color, linewidth=linewidth, alpha=alpha, linestyle='--')
if annotate:
pylab.annotate('$\\mu$', xy=(mean_x + 0.6 * std_x, 1.0 * max_cdf), horizontalalignment='center', verticalalignment='center', fontsize=15, color=color) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if plot_std: #plot mean +- 1*standard deviation (64% interval)
std_cdf = st.norm.pdf(mean_x + std_x, mean_x, std_x)
pylab.plot([mean_x + std_x, mean_x + std_x], [0, std_cdf], color=color, linewidth=linewidth, alpha=alpha, linestyle='--')
pylab.plot([mean_x - std_x, mean_x - std_x], [0, std_cdf], color=color, linewidth=linewidth, alpha=alpha, linestyle='--')
if annotate:
pylab.annotate('$\\mu+\\sigma$', xy=(mean_x + 1.6 * std_x, 1.5 * std_cdf), horizontalalignment='center', verticalalignment='center', fontsize=15, color=color) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if plot_2std: #plot mean +- 2*standard deviations (95% interval)
std2_cdf = st.norm.pdf(mean_x + 2 * std_x, mean_x, std_x)
pylab.plot([mean_x + 2 * std_x, mean_x + 2 * std_x], [0, std2_cdf], color=color, linewidth=linewidth, alpha=alpha, linestyle='--')
pylab.plot([mean_x - 2 * std_x, mean_x - 2 * std_x], [0, std2_cdf], color=color, linewidth=linewidth, alpha=alpha, linestyle='--')
if annotate:
pylab.annotate('$\\mu+2\\sigma$', xy=(mean_x + 2.6 * std_x, 1.5 * std2_cdf), horizontalalignment='center', verticalalignment='center', fontsize=15, color=color) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if xlim: #cut of unused space on y-axis
pylab.xlim([mean_x - 4 * std_x, mean_x + 4 * std_x]) # depends on [control=['if'], data=[]]
return figure |
def batch_map_mean(func, batch_iter, progress_iter_func=None, sum_axis=None,
n_batches=None, prepend_args=None):
"""
Apply a function to all the samples that are accessed as mini-batches
obtained from an iterator.
Returns the across-samples mean of the results returned by `func`
The `sum_axis` arguments tells `mean_batch_map` how to process the
results of `func` before accumulating them:
- If `sum_axis` is `None`, `func` should return the
across-samples SUM of the results of operating on the mini-batch the
sum of the values for the samples, e.g. for loss and error it should
return `(sum([loss0, loss1, ... lossN]), sum([err0, err1, ... errN]))`
- Otherwise, `sum_axis` should specify the axis or axes over which
the the batch results should be summed, e.g. if `func` returns a
per-sample loss and error in two arrays
`[[loss0, loss1, ... lossN], [err0, err1, ... errN]`, give `sum_axis`
a value of `0` to sum over axis 0 to get the per-batch loss and error.
These results will be accumulated and divided by the number of samples
at the end to get the mean.
Parameters
----------
func: callable `func(*batch) -> results`
The function to call on each mini-batch. Note that the results
must be `None`, a tuple or a NumPy array
batch_iter: data set iterator
Iterator that generates mini-batches of data
progress_iter_func: [optional] callable
`progress_iter_func(iterator, total=total, leave=leave)`
A `tqdm` style function that will be passed the iterator that
generates training batches along with the total number of batches
and `False` for the `leave` parameter. By passing either
`tqdm.tqdm` or `tqdm.tqdm_notebook` as this argument you can have
the training loop display a progress bar.
sum_axis: (default=`None`) int, tuple of ints or None
If an integer or a tuple of integers, the results returned by `func`
will be summed across this axis / these axes before being accumulated;
e.g. if `func` returns an array of per-sample losses, with axis 0
being the sample dimension, passing a value of `0` as `sum_axis`
will cause these results to be summed along axis 0 to get the
per-batch sum before accumulating the losses. The total summed loss
will be divided by the number of samples at the end in order to
compute the mean loss.
n_batches: [optional] integer that specifies the number of mini-batches
to process before returning
prepend_args: [optional] tuple
Arguments to prepend to the arguments passed to `func`
Returns
-------
tuple
The sum of the results of the function `fn` divided by the number of
samples processed, e.g.
`(sum(outA_per_batch) / n_samples,
sum(outB_per_batch) / n_samples,
...)`
Examples
--------
The following examples will demonstrate the use of `mean_batch_map`
to compute binary cross entropy loss over a data set.
A few variants will be demonstrated:
- the default behaviour in which the function being applied should
return the sum over the batch sample axis
- having the function return per sample results and maving
`mean_batch_map` perform the sum operation. This is easier to
understand but less efficient as a Theano function would have to
move more data back from the GPU.
- limiting the number of batches that will be processed in order to get
partial results when dealing with a large data set
Define a function to compute the per-sample binary cross entropy
loss:
>>> def binary_crossentropy_loss(pred, target):
... e = -target * np.log(pred) - (1 - target) * np.log(1 - pred)
... return e.mean(axis=1)
Now define a function that computes the *SUM* of the binary cross
entropy losses over the sample axis (axis 0), as the default
behaviour of `mean_batch_map` will sum them up and divide by the
number of samples at the end:
>>> def binary_crossentropy_loss_sum(pred, target):
... return binary_crossentropy_loss(pred, target).sum()
Construct prediction and target data
>>> pred = np.random.uniform(0.1, 0.9, size=(7, 10))
>>> tgt = np.random.uniform(0.1, 0.9, size=(7, 10))
>>> ds = ArrayDataSource([pred, tgt])
Apply the loss sum function defined above:
>>> batch_iter = ds.batch_iterator(batch_size=5)
>>> loss = batch_map_mean(binary_crossentropy_loss_sum, batch_iter)
>>> assert np.allclose(
... loss, binary_crossentropy_loss(pred, tgt).mean())
Have `mean_batch_map` sum over axis 0:
>>> batch_iter = ds.batch_iterator(batch_size=5)
>>> loss = batch_map_mean(binary_crossentropy_loss, batch_iter,
... sum_axis=0)
>>> assert np.allclose(
... loss, binary_crossentropy_loss(pred, tgt).mean())
Construct a large data set and use `batch
>>> pred_large = np.random.uniform(0.1, 0.9, size=(100, 10))
>>> tgt_large = np.random.uniform(0.1, 0.9, size=(100, 10))
>>> ds_large = ArrayDataSource([pred_large, tgt_large])
>>> iter_large = ds_large.batch_iterator(batch_size=5)
>>> for i in range(10):
... partial_loss = batch_map_mean(binary_crossentropy_loss_sum,
... iter_large, n_batches=2)
... j = i * 10
... assert np.allclose(
... partial_loss, binary_crossentropy_loss(
... pred_large[j:j + 10], tgt_large[j:j + 10]).mean())
"""
# Accumulator for results and number of samples
results_accum = None
n_samples_accum = 0
# If `progress_iter_func` is not `None`, apply it
if progress_iter_func is not None:
batch_iter = progress_iter_func(batch_iter, total=n_batches,
leave=False)
# Train on each batch
n_processed = 0
for batch in batch_iter:
# Get number of samples in batch; can vary
batch_n = _length_of_batch(batch)
# Apply on batch and check the type of the results
if prepend_args is not None:
batch_results = func(*(prepend_args + tuple(batch)))
else:
batch_results = func(*batch)
if batch_results is None:
pass
elif isinstance(batch_results, (np.ndarray, float)):
batch_results = (batch_results,)
elif isinstance(batch_results, tuple):
pass
else:
raise TypeError(
'Batch function should return a tuple of results, a '
'single result as a NumPy array or float, or None, '
'not {}'.format(type(batch_results)))
# Accumulate results and number of samples
if results_accum is None:
# Initialise the accumulator to the batch results if `func`
# returns summed results or if it returned None;
# don't attempt to iterate over None and sum each item
if batch_results is None:
pass
elif sum_axis is None:
results_accum = list(batch_results)
else:
results_accum = [br.sum(axis=sum_axis) for br in batch_results]
else:
if batch_results is not None:
for i in range(len(results_accum)):
br = batch_results[i]
if sum_axis is not None:
br = br.sum(axis=sum_axis)
results_accum[i] += br
n_samples_accum += batch_n
n_processed += 1
if n_batches is not None and n_processed >= n_batches:
break
# Divide by the number of training examples used to compute mean
if results_accum is not None:
results_accum = tuple([np.array(r).astype(float) / n_samples_accum
for r in results_accum])
return results_accum | def function[batch_map_mean, parameter[func, batch_iter, progress_iter_func, sum_axis, n_batches, prepend_args]]:
constant[
Apply a function to all the samples that are accessed as mini-batches
obtained from an iterator.
Returns the across-samples mean of the results returned by `func`
The `sum_axis` arguments tells `mean_batch_map` how to process the
results of `func` before accumulating them:
- If `sum_axis` is `None`, `func` should return the
across-samples SUM of the results of operating on the mini-batch the
sum of the values for the samples, e.g. for loss and error it should
return `(sum([loss0, loss1, ... lossN]), sum([err0, err1, ... errN]))`
- Otherwise, `sum_axis` should specify the axis or axes over which
the the batch results should be summed, e.g. if `func` returns a
per-sample loss and error in two arrays
`[[loss0, loss1, ... lossN], [err0, err1, ... errN]`, give `sum_axis`
a value of `0` to sum over axis 0 to get the per-batch loss and error.
These results will be accumulated and divided by the number of samples
at the end to get the mean.
Parameters
----------
func: callable `func(*batch) -> results`
The function to call on each mini-batch. Note that the results
must be `None`, a tuple or a NumPy array
batch_iter: data set iterator
Iterator that generates mini-batches of data
progress_iter_func: [optional] callable
`progress_iter_func(iterator, total=total, leave=leave)`
A `tqdm` style function that will be passed the iterator that
generates training batches along with the total number of batches
and `False` for the `leave` parameter. By passing either
`tqdm.tqdm` or `tqdm.tqdm_notebook` as this argument you can have
the training loop display a progress bar.
sum_axis: (default=`None`) int, tuple of ints or None
If an integer or a tuple of integers, the results returned by `func`
will be summed across this axis / these axes before being accumulated;
e.g. if `func` returns an array of per-sample losses, with axis 0
being the sample dimension, passing a value of `0` as `sum_axis`
will cause these results to be summed along axis 0 to get the
per-batch sum before accumulating the losses. The total summed loss
will be divided by the number of samples at the end in order to
compute the mean loss.
n_batches: [optional] integer that specifies the number of mini-batches
to process before returning
prepend_args: [optional] tuple
Arguments to prepend to the arguments passed to `func`
Returns
-------
tuple
The sum of the results of the function `fn` divided by the number of
samples processed, e.g.
`(sum(outA_per_batch) / n_samples,
sum(outB_per_batch) / n_samples,
...)`
Examples
--------
The following examples will demonstrate the use of `mean_batch_map`
to compute binary cross entropy loss over a data set.
A few variants will be demonstrated:
- the default behaviour in which the function being applied should
return the sum over the batch sample axis
- having the function return per sample results and maving
`mean_batch_map` perform the sum operation. This is easier to
understand but less efficient as a Theano function would have to
move more data back from the GPU.
- limiting the number of batches that will be processed in order to get
partial results when dealing with a large data set
Define a function to compute the per-sample binary cross entropy
loss:
>>> def binary_crossentropy_loss(pred, target):
... e = -target * np.log(pred) - (1 - target) * np.log(1 - pred)
... return e.mean(axis=1)
Now define a function that computes the *SUM* of the binary cross
entropy losses over the sample axis (axis 0), as the default
behaviour of `mean_batch_map` will sum them up and divide by the
number of samples at the end:
>>> def binary_crossentropy_loss_sum(pred, target):
... return binary_crossentropy_loss(pred, target).sum()
Construct prediction and target data
>>> pred = np.random.uniform(0.1, 0.9, size=(7, 10))
>>> tgt = np.random.uniform(0.1, 0.9, size=(7, 10))
>>> ds = ArrayDataSource([pred, tgt])
Apply the loss sum function defined above:
>>> batch_iter = ds.batch_iterator(batch_size=5)
>>> loss = batch_map_mean(binary_crossentropy_loss_sum, batch_iter)
>>> assert np.allclose(
... loss, binary_crossentropy_loss(pred, tgt).mean())
Have `mean_batch_map` sum over axis 0:
>>> batch_iter = ds.batch_iterator(batch_size=5)
>>> loss = batch_map_mean(binary_crossentropy_loss, batch_iter,
... sum_axis=0)
>>> assert np.allclose(
... loss, binary_crossentropy_loss(pred, tgt).mean())
Construct a large data set and use `batch
>>> pred_large = np.random.uniform(0.1, 0.9, size=(100, 10))
>>> tgt_large = np.random.uniform(0.1, 0.9, size=(100, 10))
>>> ds_large = ArrayDataSource([pred_large, tgt_large])
>>> iter_large = ds_large.batch_iterator(batch_size=5)
>>> for i in range(10):
... partial_loss = batch_map_mean(binary_crossentropy_loss_sum,
... iter_large, n_batches=2)
... j = i * 10
... assert np.allclose(
... partial_loss, binary_crossentropy_loss(
... pred_large[j:j + 10], tgt_large[j:j + 10]).mean())
]
variable[results_accum] assign[=] constant[None]
variable[n_samples_accum] assign[=] constant[0]
if compare[name[progress_iter_func] is_not constant[None]] begin[:]
variable[batch_iter] assign[=] call[name[progress_iter_func], parameter[name[batch_iter]]]
variable[n_processed] assign[=] constant[0]
for taget[name[batch]] in starred[name[batch_iter]] begin[:]
variable[batch_n] assign[=] call[name[_length_of_batch], parameter[name[batch]]]
if compare[name[prepend_args] is_not constant[None]] begin[:]
variable[batch_results] assign[=] call[name[func], parameter[<ast.Starred object at 0x7da18ede5240>]]
if compare[name[batch_results] is constant[None]] begin[:]
pass
if compare[name[results_accum] is constant[None]] begin[:]
if compare[name[batch_results] is constant[None]] begin[:]
pass
<ast.AugAssign object at 0x7da18ede59c0>
<ast.AugAssign object at 0x7da18ede4640>
if <ast.BoolOp object at 0x7da18ede7c10> begin[:]
break
if compare[name[results_accum] is_not constant[None]] begin[:]
variable[results_accum] assign[=] call[name[tuple], parameter[<ast.ListComp object at 0x7da207f9a6b0>]]
return[name[results_accum]] | keyword[def] identifier[batch_map_mean] ( identifier[func] , identifier[batch_iter] , identifier[progress_iter_func] = keyword[None] , identifier[sum_axis] = keyword[None] ,
identifier[n_batches] = keyword[None] , identifier[prepend_args] = keyword[None] ):
literal[string]
identifier[results_accum] = keyword[None]
identifier[n_samples_accum] = literal[int]
keyword[if] identifier[progress_iter_func] keyword[is] keyword[not] keyword[None] :
identifier[batch_iter] = identifier[progress_iter_func] ( identifier[batch_iter] , identifier[total] = identifier[n_batches] ,
identifier[leave] = keyword[False] )
identifier[n_processed] = literal[int]
keyword[for] identifier[batch] keyword[in] identifier[batch_iter] :
identifier[batch_n] = identifier[_length_of_batch] ( identifier[batch] )
keyword[if] identifier[prepend_args] keyword[is] keyword[not] keyword[None] :
identifier[batch_results] = identifier[func] (*( identifier[prepend_args] + identifier[tuple] ( identifier[batch] )))
keyword[else] :
identifier[batch_results] = identifier[func] (* identifier[batch] )
keyword[if] identifier[batch_results] keyword[is] keyword[None] :
keyword[pass]
keyword[elif] identifier[isinstance] ( identifier[batch_results] ,( identifier[np] . identifier[ndarray] , identifier[float] )):
identifier[batch_results] =( identifier[batch_results] ,)
keyword[elif] identifier[isinstance] ( identifier[batch_results] , identifier[tuple] ):
keyword[pass]
keyword[else] :
keyword[raise] identifier[TypeError] (
literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[type] ( identifier[batch_results] )))
keyword[if] identifier[results_accum] keyword[is] keyword[None] :
keyword[if] identifier[batch_results] keyword[is] keyword[None] :
keyword[pass]
keyword[elif] identifier[sum_axis] keyword[is] keyword[None] :
identifier[results_accum] = identifier[list] ( identifier[batch_results] )
keyword[else] :
identifier[results_accum] =[ identifier[br] . identifier[sum] ( identifier[axis] = identifier[sum_axis] ) keyword[for] identifier[br] keyword[in] identifier[batch_results] ]
keyword[else] :
keyword[if] identifier[batch_results] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[results_accum] )):
identifier[br] = identifier[batch_results] [ identifier[i] ]
keyword[if] identifier[sum_axis] keyword[is] keyword[not] keyword[None] :
identifier[br] = identifier[br] . identifier[sum] ( identifier[axis] = identifier[sum_axis] )
identifier[results_accum] [ identifier[i] ]+= identifier[br]
identifier[n_samples_accum] += identifier[batch_n]
identifier[n_processed] += literal[int]
keyword[if] identifier[n_batches] keyword[is] keyword[not] keyword[None] keyword[and] identifier[n_processed] >= identifier[n_batches] :
keyword[break]
keyword[if] identifier[results_accum] keyword[is] keyword[not] keyword[None] :
identifier[results_accum] = identifier[tuple] ([ identifier[np] . identifier[array] ( identifier[r] ). identifier[astype] ( identifier[float] )/ identifier[n_samples_accum]
keyword[for] identifier[r] keyword[in] identifier[results_accum] ])
keyword[return] identifier[results_accum] | def batch_map_mean(func, batch_iter, progress_iter_func=None, sum_axis=None, n_batches=None, prepend_args=None):
"""
Apply a function to all the samples that are accessed as mini-batches
obtained from an iterator.
Returns the across-samples mean of the results returned by `func`
The `sum_axis` arguments tells `mean_batch_map` how to process the
results of `func` before accumulating them:
- If `sum_axis` is `None`, `func` should return the
across-samples SUM of the results of operating on the mini-batch the
sum of the values for the samples, e.g. for loss and error it should
return `(sum([loss0, loss1, ... lossN]), sum([err0, err1, ... errN]))`
- Otherwise, `sum_axis` should specify the axis or axes over which
the the batch results should be summed, e.g. if `func` returns a
per-sample loss and error in two arrays
`[[loss0, loss1, ... lossN], [err0, err1, ... errN]`, give `sum_axis`
a value of `0` to sum over axis 0 to get the per-batch loss and error.
These results will be accumulated and divided by the number of samples
at the end to get the mean.
Parameters
----------
func: callable `func(*batch) -> results`
The function to call on each mini-batch. Note that the results
must be `None`, a tuple or a NumPy array
batch_iter: data set iterator
Iterator that generates mini-batches of data
progress_iter_func: [optional] callable
`progress_iter_func(iterator, total=total, leave=leave)`
A `tqdm` style function that will be passed the iterator that
generates training batches along with the total number of batches
and `False` for the `leave` parameter. By passing either
`tqdm.tqdm` or `tqdm.tqdm_notebook` as this argument you can have
the training loop display a progress bar.
sum_axis: (default=`None`) int, tuple of ints or None
If an integer or a tuple of integers, the results returned by `func`
will be summed across this axis / these axes before being accumulated;
e.g. if `func` returns an array of per-sample losses, with axis 0
being the sample dimension, passing a value of `0` as `sum_axis`
will cause these results to be summed along axis 0 to get the
per-batch sum before accumulating the losses. The total summed loss
will be divided by the number of samples at the end in order to
compute the mean loss.
n_batches: [optional] integer that specifies the number of mini-batches
to process before returning
prepend_args: [optional] tuple
Arguments to prepend to the arguments passed to `func`
Returns
-------
tuple
The sum of the results of the function `fn` divided by the number of
samples processed, e.g.
`(sum(outA_per_batch) / n_samples,
sum(outB_per_batch) / n_samples,
...)`
Examples
--------
The following examples will demonstrate the use of `mean_batch_map`
to compute binary cross entropy loss over a data set.
A few variants will be demonstrated:
- the default behaviour in which the function being applied should
return the sum over the batch sample axis
- having the function return per sample results and maving
`mean_batch_map` perform the sum operation. This is easier to
understand but less efficient as a Theano function would have to
move more data back from the GPU.
- limiting the number of batches that will be processed in order to get
partial results when dealing with a large data set
Define a function to compute the per-sample binary cross entropy
loss:
>>> def binary_crossentropy_loss(pred, target):
... e = -target * np.log(pred) - (1 - target) * np.log(1 - pred)
... return e.mean(axis=1)
Now define a function that computes the *SUM* of the binary cross
entropy losses over the sample axis (axis 0), as the default
behaviour of `mean_batch_map` will sum them up and divide by the
number of samples at the end:
>>> def binary_crossentropy_loss_sum(pred, target):
... return binary_crossentropy_loss(pred, target).sum()
Construct prediction and target data
>>> pred = np.random.uniform(0.1, 0.9, size=(7, 10))
>>> tgt = np.random.uniform(0.1, 0.9, size=(7, 10))
>>> ds = ArrayDataSource([pred, tgt])
Apply the loss sum function defined above:
>>> batch_iter = ds.batch_iterator(batch_size=5)
>>> loss = batch_map_mean(binary_crossentropy_loss_sum, batch_iter)
>>> assert np.allclose(
... loss, binary_crossentropy_loss(pred, tgt).mean())
Have `mean_batch_map` sum over axis 0:
>>> batch_iter = ds.batch_iterator(batch_size=5)
>>> loss = batch_map_mean(binary_crossentropy_loss, batch_iter,
... sum_axis=0)
>>> assert np.allclose(
... loss, binary_crossentropy_loss(pred, tgt).mean())
Construct a large data set and use `batch
>>> pred_large = np.random.uniform(0.1, 0.9, size=(100, 10))
>>> tgt_large = np.random.uniform(0.1, 0.9, size=(100, 10))
>>> ds_large = ArrayDataSource([pred_large, tgt_large])
>>> iter_large = ds_large.batch_iterator(batch_size=5)
>>> for i in range(10):
... partial_loss = batch_map_mean(binary_crossentropy_loss_sum,
... iter_large, n_batches=2)
... j = i * 10
... assert np.allclose(
... partial_loss, binary_crossentropy_loss(
... pred_large[j:j + 10], tgt_large[j:j + 10]).mean())
"""
# Accumulator for results and number of samples
results_accum = None
n_samples_accum = 0
# If `progress_iter_func` is not `None`, apply it
if progress_iter_func is not None:
batch_iter = progress_iter_func(batch_iter, total=n_batches, leave=False) # depends on [control=['if'], data=['progress_iter_func']]
# Train on each batch
n_processed = 0
for batch in batch_iter:
# Get number of samples in batch; can vary
batch_n = _length_of_batch(batch)
# Apply on batch and check the type of the results
if prepend_args is not None:
batch_results = func(*prepend_args + tuple(batch)) # depends on [control=['if'], data=['prepend_args']]
else:
batch_results = func(*batch)
if batch_results is None:
pass # depends on [control=['if'], data=[]]
elif isinstance(batch_results, (np.ndarray, float)):
batch_results = (batch_results,) # depends on [control=['if'], data=[]]
elif isinstance(batch_results, tuple):
pass # depends on [control=['if'], data=[]]
else:
raise TypeError('Batch function should return a tuple of results, a single result as a NumPy array or float, or None, not {}'.format(type(batch_results)))
# Accumulate results and number of samples
if results_accum is None:
# Initialise the accumulator to the batch results if `func`
# returns summed results or if it returned None;
# don't attempt to iterate over None and sum each item
if batch_results is None:
pass # depends on [control=['if'], data=[]]
elif sum_axis is None:
results_accum = list(batch_results) # depends on [control=['if'], data=[]]
else:
results_accum = [br.sum(axis=sum_axis) for br in batch_results] # depends on [control=['if'], data=['results_accum']]
elif batch_results is not None:
for i in range(len(results_accum)):
br = batch_results[i]
if sum_axis is not None:
br = br.sum(axis=sum_axis) # depends on [control=['if'], data=['sum_axis']]
results_accum[i] += br # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=['batch_results']]
n_samples_accum += batch_n
n_processed += 1
if n_batches is not None and n_processed >= n_batches:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['batch']]
# Divide by the number of training examples used to compute mean
if results_accum is not None:
results_accum = tuple([np.array(r).astype(float) / n_samples_accum for r in results_accum]) # depends on [control=['if'], data=['results_accum']]
return results_accum |
def build(
self,
requirements, # type: Iterable[InstallRequirement]
session, # type: PipSession
autobuilding=False # type: bool
):
# type: (...) -> List[InstallRequirement]
"""Build wheels.
:param unpack: If True, replace the sdist we built from with the
newly built wheel, in preparation for installation.
:return: True if all the wheels built correctly.
"""
buildset = []
format_control = self.finder.format_control
# Whether a cache directory is available for autobuilding=True.
cache_available = bool(self._wheel_dir or self.wheel_cache.cache_dir)
for req in requirements:
ephem_cache = should_use_ephemeral_cache(
req, format_control=format_control, autobuilding=autobuilding,
cache_available=cache_available,
)
if ephem_cache is None:
continue
buildset.append((req, ephem_cache))
if not buildset:
return []
# Is any wheel build not using the ephemeral cache?
if any(not ephem_cache for _, ephem_cache in buildset):
have_directory_for_build = self._wheel_dir or (
autobuilding and self.wheel_cache.cache_dir
)
assert have_directory_for_build
# TODO by @pradyunsg
# Should break up this method into 2 separate methods.
# Build the wheels.
logger.info(
'Building wheels for collected packages: %s',
', '.join([req.name for (req, _) in buildset]),
)
_cache = self.wheel_cache # shorter name
with indent_log():
build_success, build_failure = [], []
for req, ephem in buildset:
python_tag = None
if autobuilding:
python_tag = pep425tags.implementation_tag
if ephem:
output_dir = _cache.get_ephem_path_for_link(req.link)
else:
output_dir = _cache.get_path_for_link(req.link)
try:
ensure_dir(output_dir)
except OSError as e:
logger.warning("Building wheel for %s failed: %s",
req.name, e)
build_failure.append(req)
continue
else:
output_dir = self._wheel_dir
wheel_file = self._build_one(
req, output_dir,
python_tag=python_tag,
)
if wheel_file:
build_success.append(req)
if autobuilding:
# XXX: This is mildly duplicative with prepare_files,
# but not close enough to pull out to a single common
# method.
# The code below assumes temporary source dirs -
# prevent it doing bad things.
if req.source_dir and not os.path.exists(os.path.join(
req.source_dir, PIP_DELETE_MARKER_FILENAME)):
raise AssertionError(
"bad source dir - missing marker")
# Delete the source we built the wheel from
req.remove_temporary_source()
# set the build directory again - name is known from
# the work prepare_files did.
req.source_dir = req.build_location(
self.preparer.build_dir
)
# Update the link for this.
req.link = Link(path_to_url(wheel_file))
assert req.link.is_wheel
# extract the wheel into the dir
unpack_url(
req.link, req.source_dir, None, False,
session=session,
)
else:
build_failure.append(req)
# notify success/failure
if build_success:
logger.info(
'Successfully built %s',
' '.join([req.name for req in build_success]),
)
if build_failure:
logger.info(
'Failed to build %s',
' '.join([req.name for req in build_failure]),
)
# Return a list of requirements that failed to build
return build_failure | def function[build, parameter[self, requirements, session, autobuilding]]:
constant[Build wheels.
:param unpack: If True, replace the sdist we built from with the
newly built wheel, in preparation for installation.
:return: True if all the wheels built correctly.
]
variable[buildset] assign[=] list[[]]
variable[format_control] assign[=] name[self].finder.format_control
variable[cache_available] assign[=] call[name[bool], parameter[<ast.BoolOp object at 0x7da1b1f82740>]]
for taget[name[req]] in starred[name[requirements]] begin[:]
variable[ephem_cache] assign[=] call[name[should_use_ephemeral_cache], parameter[name[req]]]
if compare[name[ephem_cache] is constant[None]] begin[:]
continue
call[name[buildset].append, parameter[tuple[[<ast.Name object at 0x7da1b1f81a80>, <ast.Name object at 0x7da1b1f82e00>]]]]
if <ast.UnaryOp object at 0x7da1b1f81ab0> begin[:]
return[list[[]]]
if call[name[any], parameter[<ast.GeneratorExp object at 0x7da1b1f81f30>]] begin[:]
variable[have_directory_for_build] assign[=] <ast.BoolOp object at 0x7da1b1f83400>
assert[name[have_directory_for_build]]
call[name[logger].info, parameter[constant[Building wheels for collected packages: %s], call[constant[, ].join, parameter[<ast.ListComp object at 0x7da1b1f83f10>]]]]
variable[_cache] assign[=] name[self].wheel_cache
with call[name[indent_log], parameter[]] begin[:]
<ast.Tuple object at 0x7da1b1f83d60> assign[=] tuple[[<ast.List object at 0x7da1b1f802e0>, <ast.List object at 0x7da1b1f83580>]]
for taget[tuple[[<ast.Name object at 0x7da1b1f83430>, <ast.Name object at 0x7da1b1f80f70>]]] in starred[name[buildset]] begin[:]
variable[python_tag] assign[=] constant[None]
if name[autobuilding] begin[:]
variable[python_tag] assign[=] name[pep425tags].implementation_tag
if name[ephem] begin[:]
variable[output_dir] assign[=] call[name[_cache].get_ephem_path_for_link, parameter[name[req].link]]
<ast.Try object at 0x7da18f00c100>
variable[wheel_file] assign[=] call[name[self]._build_one, parameter[name[req], name[output_dir]]]
if name[wheel_file] begin[:]
call[name[build_success].append, parameter[name[req]]]
if name[autobuilding] begin[:]
if <ast.BoolOp object at 0x7da18f00d960> begin[:]
<ast.Raise object at 0x7da1b1f839d0>
call[name[req].remove_temporary_source, parameter[]]
name[req].source_dir assign[=] call[name[req].build_location, parameter[name[self].preparer.build_dir]]
name[req].link assign[=] call[name[Link], parameter[call[name[path_to_url], parameter[name[wheel_file]]]]]
assert[name[req].link.is_wheel]
call[name[unpack_url], parameter[name[req].link, name[req].source_dir, constant[None], constant[False]]]
if name[build_success] begin[:]
call[name[logger].info, parameter[constant[Successfully built %s], call[constant[ ].join, parameter[<ast.ListComp object at 0x7da2054a5f30>]]]]
if name[build_failure] begin[:]
call[name[logger].info, parameter[constant[Failed to build %s], call[constant[ ].join, parameter[<ast.ListComp object at 0x7da2054a5990>]]]]
return[name[build_failure]] | keyword[def] identifier[build] (
identifier[self] ,
identifier[requirements] ,
identifier[session] ,
identifier[autobuilding] = keyword[False]
):
literal[string]
identifier[buildset] =[]
identifier[format_control] = identifier[self] . identifier[finder] . identifier[format_control]
identifier[cache_available] = identifier[bool] ( identifier[self] . identifier[_wheel_dir] keyword[or] identifier[self] . identifier[wheel_cache] . identifier[cache_dir] )
keyword[for] identifier[req] keyword[in] identifier[requirements] :
identifier[ephem_cache] = identifier[should_use_ephemeral_cache] (
identifier[req] , identifier[format_control] = identifier[format_control] , identifier[autobuilding] = identifier[autobuilding] ,
identifier[cache_available] = identifier[cache_available] ,
)
keyword[if] identifier[ephem_cache] keyword[is] keyword[None] :
keyword[continue]
identifier[buildset] . identifier[append] (( identifier[req] , identifier[ephem_cache] ))
keyword[if] keyword[not] identifier[buildset] :
keyword[return] []
keyword[if] identifier[any] ( keyword[not] identifier[ephem_cache] keyword[for] identifier[_] , identifier[ephem_cache] keyword[in] identifier[buildset] ):
identifier[have_directory_for_build] = identifier[self] . identifier[_wheel_dir] keyword[or] (
identifier[autobuilding] keyword[and] identifier[self] . identifier[wheel_cache] . identifier[cache_dir]
)
keyword[assert] identifier[have_directory_for_build]
identifier[logger] . identifier[info] (
literal[string] ,
literal[string] . identifier[join] ([ identifier[req] . identifier[name] keyword[for] ( identifier[req] , identifier[_] ) keyword[in] identifier[buildset] ]),
)
identifier[_cache] = identifier[self] . identifier[wheel_cache]
keyword[with] identifier[indent_log] ():
identifier[build_success] , identifier[build_failure] =[],[]
keyword[for] identifier[req] , identifier[ephem] keyword[in] identifier[buildset] :
identifier[python_tag] = keyword[None]
keyword[if] identifier[autobuilding] :
identifier[python_tag] = identifier[pep425tags] . identifier[implementation_tag]
keyword[if] identifier[ephem] :
identifier[output_dir] = identifier[_cache] . identifier[get_ephem_path_for_link] ( identifier[req] . identifier[link] )
keyword[else] :
identifier[output_dir] = identifier[_cache] . identifier[get_path_for_link] ( identifier[req] . identifier[link] )
keyword[try] :
identifier[ensure_dir] ( identifier[output_dir] )
keyword[except] identifier[OSError] keyword[as] identifier[e] :
identifier[logger] . identifier[warning] ( literal[string] ,
identifier[req] . identifier[name] , identifier[e] )
identifier[build_failure] . identifier[append] ( identifier[req] )
keyword[continue]
keyword[else] :
identifier[output_dir] = identifier[self] . identifier[_wheel_dir]
identifier[wheel_file] = identifier[self] . identifier[_build_one] (
identifier[req] , identifier[output_dir] ,
identifier[python_tag] = identifier[python_tag] ,
)
keyword[if] identifier[wheel_file] :
identifier[build_success] . identifier[append] ( identifier[req] )
keyword[if] identifier[autobuilding] :
keyword[if] identifier[req] . identifier[source_dir] keyword[and] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[os] . identifier[path] . identifier[join] (
identifier[req] . identifier[source_dir] , identifier[PIP_DELETE_MARKER_FILENAME] )):
keyword[raise] identifier[AssertionError] (
literal[string] )
identifier[req] . identifier[remove_temporary_source] ()
identifier[req] . identifier[source_dir] = identifier[req] . identifier[build_location] (
identifier[self] . identifier[preparer] . identifier[build_dir]
)
identifier[req] . identifier[link] = identifier[Link] ( identifier[path_to_url] ( identifier[wheel_file] ))
keyword[assert] identifier[req] . identifier[link] . identifier[is_wheel]
identifier[unpack_url] (
identifier[req] . identifier[link] , identifier[req] . identifier[source_dir] , keyword[None] , keyword[False] ,
identifier[session] = identifier[session] ,
)
keyword[else] :
identifier[build_failure] . identifier[append] ( identifier[req] )
keyword[if] identifier[build_success] :
identifier[logger] . identifier[info] (
literal[string] ,
literal[string] . identifier[join] ([ identifier[req] . identifier[name] keyword[for] identifier[req] keyword[in] identifier[build_success] ]),
)
keyword[if] identifier[build_failure] :
identifier[logger] . identifier[info] (
literal[string] ,
literal[string] . identifier[join] ([ identifier[req] . identifier[name] keyword[for] identifier[req] keyword[in] identifier[build_failure] ]),
)
keyword[return] identifier[build_failure] | def build(self, requirements, session, autobuilding=False): # type: Iterable[InstallRequirement]
# type: PipSession
# type: bool
# type: (...) -> List[InstallRequirement]
'Build wheels.\n\n :param unpack: If True, replace the sdist we built from with the\n newly built wheel, in preparation for installation.\n :return: True if all the wheels built correctly.\n '
buildset = []
format_control = self.finder.format_control
# Whether a cache directory is available for autobuilding=True.
cache_available = bool(self._wheel_dir or self.wheel_cache.cache_dir)
for req in requirements:
ephem_cache = should_use_ephemeral_cache(req, format_control=format_control, autobuilding=autobuilding, cache_available=cache_available)
if ephem_cache is None:
continue # depends on [control=['if'], data=[]]
buildset.append((req, ephem_cache)) # depends on [control=['for'], data=['req']]
if not buildset:
return [] # depends on [control=['if'], data=[]]
# Is any wheel build not using the ephemeral cache?
if any((not ephem_cache for (_, ephem_cache) in buildset)):
have_directory_for_build = self._wheel_dir or (autobuilding and self.wheel_cache.cache_dir)
assert have_directory_for_build # depends on [control=['if'], data=[]]
# TODO by @pradyunsg
# Should break up this method into 2 separate methods.
# Build the wheels.
logger.info('Building wheels for collected packages: %s', ', '.join([req.name for (req, _) in buildset]))
_cache = self.wheel_cache # shorter name
with indent_log():
(build_success, build_failure) = ([], [])
for (req, ephem) in buildset:
python_tag = None
if autobuilding:
python_tag = pep425tags.implementation_tag
if ephem:
output_dir = _cache.get_ephem_path_for_link(req.link) # depends on [control=['if'], data=[]]
else:
output_dir = _cache.get_path_for_link(req.link)
try:
ensure_dir(output_dir) # depends on [control=['try'], data=[]]
except OSError as e:
logger.warning('Building wheel for %s failed: %s', req.name, e)
build_failure.append(req)
continue # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]]
else:
output_dir = self._wheel_dir
wheel_file = self._build_one(req, output_dir, python_tag=python_tag)
if wheel_file:
build_success.append(req)
if autobuilding:
# XXX: This is mildly duplicative with prepare_files,
# but not close enough to pull out to a single common
# method.
# The code below assumes temporary source dirs -
# prevent it doing bad things.
if req.source_dir and (not os.path.exists(os.path.join(req.source_dir, PIP_DELETE_MARKER_FILENAME))):
raise AssertionError('bad source dir - missing marker') # depends on [control=['if'], data=[]]
# Delete the source we built the wheel from
req.remove_temporary_source()
# set the build directory again - name is known from
# the work prepare_files did.
req.source_dir = req.build_location(self.preparer.build_dir)
# Update the link for this.
req.link = Link(path_to_url(wheel_file))
assert req.link.is_wheel
# extract the wheel into the dir
unpack_url(req.link, req.source_dir, None, False, session=session) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
build_failure.append(req) # depends on [control=['for'], data=[]] # depends on [control=['with'], data=[]]
# notify success/failure
if build_success:
logger.info('Successfully built %s', ' '.join([req.name for req in build_success])) # depends on [control=['if'], data=[]]
if build_failure:
logger.info('Failed to build %s', ' '.join([req.name for req in build_failure])) # depends on [control=['if'], data=[]]
# Return a list of requirements that failed to build
return build_failure |
def twos_comp_repr(val, bitwidth):
"""
Converts a value to it's two's-complement (positive) integer representation using a
given bitwidth (only converts the value if it is negative).
For use with Simulation.step() etc. in passing negative numbers, which it does not accept
"""
correctbw = abs(val).bit_length() + 1
if bitwidth < correctbw:
raise pyrtl.PyrtlError("please choose a larger target bitwidth")
if val >= 0:
return val
else:
return (~abs(val) & (2**bitwidth-1)) + 1 | def function[twos_comp_repr, parameter[val, bitwidth]]:
constant[
Converts a value to it's two's-complement (positive) integer representation using a
given bitwidth (only converts the value if it is negative).
For use with Simulation.step() etc. in passing negative numbers, which it does not accept
]
variable[correctbw] assign[=] binary_operation[call[call[name[abs], parameter[name[val]]].bit_length, parameter[]] + constant[1]]
if compare[name[bitwidth] less[<] name[correctbw]] begin[:]
<ast.Raise object at 0x7da1b057af50>
if compare[name[val] greater_or_equal[>=] constant[0]] begin[:]
return[name[val]] | keyword[def] identifier[twos_comp_repr] ( identifier[val] , identifier[bitwidth] ):
literal[string]
identifier[correctbw] = identifier[abs] ( identifier[val] ). identifier[bit_length] ()+ literal[int]
keyword[if] identifier[bitwidth] < identifier[correctbw] :
keyword[raise] identifier[pyrtl] . identifier[PyrtlError] ( literal[string] )
keyword[if] identifier[val] >= literal[int] :
keyword[return] identifier[val]
keyword[else] :
keyword[return] (~ identifier[abs] ( identifier[val] )&( literal[int] ** identifier[bitwidth] - literal[int] ))+ literal[int] | def twos_comp_repr(val, bitwidth):
"""
Converts a value to it's two's-complement (positive) integer representation using a
given bitwidth (only converts the value if it is negative).
For use with Simulation.step() etc. in passing negative numbers, which it does not accept
"""
correctbw = abs(val).bit_length() + 1
if bitwidth < correctbw:
raise pyrtl.PyrtlError('please choose a larger target bitwidth') # depends on [control=['if'], data=[]]
if val >= 0:
return val # depends on [control=['if'], data=['val']]
else:
return (~abs(val) & 2 ** bitwidth - 1) + 1 |
def apply(self, function):
"""
For each row or column in cuts, read a list of its colors,
apply the function to that list of colors, then write it back
to the layout.
"""
for cut in self.cuts:
value = self.read(cut)
function(value)
self.write(cut, value) | def function[apply, parameter[self, function]]:
constant[
For each row or column in cuts, read a list of its colors,
apply the function to that list of colors, then write it back
to the layout.
]
for taget[name[cut]] in starred[name[self].cuts] begin[:]
variable[value] assign[=] call[name[self].read, parameter[name[cut]]]
call[name[function], parameter[name[value]]]
call[name[self].write, parameter[name[cut], name[value]]] | keyword[def] identifier[apply] ( identifier[self] , identifier[function] ):
literal[string]
keyword[for] identifier[cut] keyword[in] identifier[self] . identifier[cuts] :
identifier[value] = identifier[self] . identifier[read] ( identifier[cut] )
identifier[function] ( identifier[value] )
identifier[self] . identifier[write] ( identifier[cut] , identifier[value] ) | def apply(self, function):
"""
For each row or column in cuts, read a list of its colors,
apply the function to that list of colors, then write it back
to the layout.
"""
for cut in self.cuts:
value = self.read(cut)
function(value)
self.write(cut, value) # depends on [control=['for'], data=['cut']] |
def transform_dataframe(self, dataframe):
"""
Unstack the dataframe so header consists of a composite 'value' header
plus any other header fields.
"""
coord_fields = self.get_coord_fields()
header_fields = self.get_header_fields()
# Remove any pairs that don't have data for both x & y
for i in range(len(coord_fields)):
dataframe = dataframe.unstack()
dataframe = dataframe.dropna(axis=1, how='all')
dataframe = dataframe.dropna(axis=0, how='any')
# Unstack series header
for i in range(len(header_fields)):
dataframe = dataframe.unstack()
# Compute new column headers
columns = []
for i in range(len(header_fields) + 1):
columns.append([])
for col in dataframe.columns:
value_name = col[0]
coord_names = list(col[1:len(coord_fields) + 1])
header_names = list(col[len(coord_fields) + 1:])
coord_name = ''
for name in coord_names:
if name != self.index_none_value:
coord_name += name + '-'
coord_name += value_name
columns[0].append(coord_name)
for i, header_name in enumerate(header_names):
columns[1 + i].append(header_name)
dataframe.columns = columns
dataframe.columns.names = [''] + header_fields
return dataframe | def function[transform_dataframe, parameter[self, dataframe]]:
constant[
Unstack the dataframe so header consists of a composite 'value' header
plus any other header fields.
]
variable[coord_fields] assign[=] call[name[self].get_coord_fields, parameter[]]
variable[header_fields] assign[=] call[name[self].get_header_fields, parameter[]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[coord_fields]]]]]] begin[:]
variable[dataframe] assign[=] call[name[dataframe].unstack, parameter[]]
variable[dataframe] assign[=] call[name[dataframe].dropna, parameter[]]
variable[dataframe] assign[=] call[name[dataframe].dropna, parameter[]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[header_fields]]]]]] begin[:]
variable[dataframe] assign[=] call[name[dataframe].unstack, parameter[]]
variable[columns] assign[=] list[[]]
for taget[name[i]] in starred[call[name[range], parameter[binary_operation[call[name[len], parameter[name[header_fields]]] + constant[1]]]]] begin[:]
call[name[columns].append, parameter[list[[]]]]
for taget[name[col]] in starred[name[dataframe].columns] begin[:]
variable[value_name] assign[=] call[name[col]][constant[0]]
variable[coord_names] assign[=] call[name[list], parameter[call[name[col]][<ast.Slice object at 0x7da2041da320>]]]
variable[header_names] assign[=] call[name[list], parameter[call[name[col]][<ast.Slice object at 0x7da2041d98a0>]]]
variable[coord_name] assign[=] constant[]
for taget[name[name]] in starred[name[coord_names]] begin[:]
if compare[name[name] not_equal[!=] name[self].index_none_value] begin[:]
<ast.AugAssign object at 0x7da1b1db6fb0>
<ast.AugAssign object at 0x7da1b1db73a0>
call[call[name[columns]][constant[0]].append, parameter[name[coord_name]]]
for taget[tuple[[<ast.Name object at 0x7da1b1db71f0>, <ast.Name object at 0x7da1b1db7580>]]] in starred[call[name[enumerate], parameter[name[header_names]]]] begin[:]
call[call[name[columns]][binary_operation[constant[1] + name[i]]].append, parameter[name[header_name]]]
name[dataframe].columns assign[=] name[columns]
name[dataframe].columns.names assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b1db6f50>]] + name[header_fields]]
return[name[dataframe]] | keyword[def] identifier[transform_dataframe] ( identifier[self] , identifier[dataframe] ):
literal[string]
identifier[coord_fields] = identifier[self] . identifier[get_coord_fields] ()
identifier[header_fields] = identifier[self] . identifier[get_header_fields] ()
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[coord_fields] )):
identifier[dataframe] = identifier[dataframe] . identifier[unstack] ()
identifier[dataframe] = identifier[dataframe] . identifier[dropna] ( identifier[axis] = literal[int] , identifier[how] = literal[string] )
identifier[dataframe] = identifier[dataframe] . identifier[dropna] ( identifier[axis] = literal[int] , identifier[how] = literal[string] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[header_fields] )):
identifier[dataframe] = identifier[dataframe] . identifier[unstack] ()
identifier[columns] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[header_fields] )+ literal[int] ):
identifier[columns] . identifier[append] ([])
keyword[for] identifier[col] keyword[in] identifier[dataframe] . identifier[columns] :
identifier[value_name] = identifier[col] [ literal[int] ]
identifier[coord_names] = identifier[list] ( identifier[col] [ literal[int] : identifier[len] ( identifier[coord_fields] )+ literal[int] ])
identifier[header_names] = identifier[list] ( identifier[col] [ identifier[len] ( identifier[coord_fields] )+ literal[int] :])
identifier[coord_name] = literal[string]
keyword[for] identifier[name] keyword[in] identifier[coord_names] :
keyword[if] identifier[name] != identifier[self] . identifier[index_none_value] :
identifier[coord_name] += identifier[name] + literal[string]
identifier[coord_name] += identifier[value_name]
identifier[columns] [ literal[int] ]. identifier[append] ( identifier[coord_name] )
keyword[for] identifier[i] , identifier[header_name] keyword[in] identifier[enumerate] ( identifier[header_names] ):
identifier[columns] [ literal[int] + identifier[i] ]. identifier[append] ( identifier[header_name] )
identifier[dataframe] . identifier[columns] = identifier[columns]
identifier[dataframe] . identifier[columns] . identifier[names] =[ literal[string] ]+ identifier[header_fields]
keyword[return] identifier[dataframe] | def transform_dataframe(self, dataframe):
"""
Unstack the dataframe so header consists of a composite 'value' header
plus any other header fields.
"""
coord_fields = self.get_coord_fields()
header_fields = self.get_header_fields()
# Remove any pairs that don't have data for both x & y
for i in range(len(coord_fields)):
dataframe = dataframe.unstack() # depends on [control=['for'], data=[]]
dataframe = dataframe.dropna(axis=1, how='all')
dataframe = dataframe.dropna(axis=0, how='any')
# Unstack series header
for i in range(len(header_fields)):
dataframe = dataframe.unstack() # depends on [control=['for'], data=[]]
# Compute new column headers
columns = []
for i in range(len(header_fields) + 1):
columns.append([]) # depends on [control=['for'], data=[]]
for col in dataframe.columns:
value_name = col[0]
coord_names = list(col[1:len(coord_fields) + 1])
header_names = list(col[len(coord_fields) + 1:])
coord_name = ''
for name in coord_names:
if name != self.index_none_value:
coord_name += name + '-' # depends on [control=['if'], data=['name']] # depends on [control=['for'], data=['name']]
coord_name += value_name
columns[0].append(coord_name)
for (i, header_name) in enumerate(header_names):
columns[1 + i].append(header_name) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['col']]
dataframe.columns = columns
dataframe.columns.names = [''] + header_fields
return dataframe |
def pause(self):
"""Pauses the stream."""
res = librtmp.RTMP_Pause(self.client.rtmp, 1)
if res < 1:
raise RTMPError("Failed to pause") | def function[pause, parameter[self]]:
constant[Pauses the stream.]
variable[res] assign[=] call[name[librtmp].RTMP_Pause, parameter[name[self].client.rtmp, constant[1]]]
if compare[name[res] less[<] constant[1]] begin[:]
<ast.Raise object at 0x7da20c6e6980> | keyword[def] identifier[pause] ( identifier[self] ):
literal[string]
identifier[res] = identifier[librtmp] . identifier[RTMP_Pause] ( identifier[self] . identifier[client] . identifier[rtmp] , literal[int] )
keyword[if] identifier[res] < literal[int] :
keyword[raise] identifier[RTMPError] ( literal[string] ) | def pause(self):
"""Pauses the stream."""
res = librtmp.RTMP_Pause(self.client.rtmp, 1)
if res < 1:
raise RTMPError('Failed to pause') # depends on [control=['if'], data=[]] |
def _serialize_record(self, record):
"""Serialize whole Record"""
f = self._empty_to_dot
row = [record.CHROM, record.POS]
row.append(f(";".join(record.ID)))
row.append(f(record.REF))
if not record.ALT:
row.append(".")
else:
row.append(",".join([f(a.serialize()) for a in record.ALT]))
row.append(f(record.QUAL))
row.append(f(";".join(record.FILTER)))
row.append(f(self._serialize_info(record)))
if record.FORMAT:
row.append(":".join(record.FORMAT))
row += [
self._serialize_call(record.FORMAT, record.call_for_sample[s])
for s in self.header.samples.names
]
print(*row, sep="\t", file=self.stream) | def function[_serialize_record, parameter[self, record]]:
constant[Serialize whole Record]
variable[f] assign[=] name[self]._empty_to_dot
variable[row] assign[=] list[[<ast.Attribute object at 0x7da18f00f430>, <ast.Attribute object at 0x7da18f00f700>]]
call[name[row].append, parameter[call[name[f], parameter[call[constant[;].join, parameter[name[record].ID]]]]]]
call[name[row].append, parameter[call[name[f], parameter[name[record].REF]]]]
if <ast.UnaryOp object at 0x7da18dc07c40> begin[:]
call[name[row].append, parameter[constant[.]]]
call[name[row].append, parameter[call[name[f], parameter[name[record].QUAL]]]]
call[name[row].append, parameter[call[name[f], parameter[call[constant[;].join, parameter[name[record].FILTER]]]]]]
call[name[row].append, parameter[call[name[f], parameter[call[name[self]._serialize_info, parameter[name[record]]]]]]]
if name[record].FORMAT begin[:]
call[name[row].append, parameter[call[constant[:].join, parameter[name[record].FORMAT]]]]
<ast.AugAssign object at 0x7da18dc04fd0>
call[name[print], parameter[<ast.Starred object at 0x7da18dc06e00>]] | keyword[def] identifier[_serialize_record] ( identifier[self] , identifier[record] ):
literal[string]
identifier[f] = identifier[self] . identifier[_empty_to_dot]
identifier[row] =[ identifier[record] . identifier[CHROM] , identifier[record] . identifier[POS] ]
identifier[row] . identifier[append] ( identifier[f] ( literal[string] . identifier[join] ( identifier[record] . identifier[ID] )))
identifier[row] . identifier[append] ( identifier[f] ( identifier[record] . identifier[REF] ))
keyword[if] keyword[not] identifier[record] . identifier[ALT] :
identifier[row] . identifier[append] ( literal[string] )
keyword[else] :
identifier[row] . identifier[append] ( literal[string] . identifier[join] ([ identifier[f] ( identifier[a] . identifier[serialize] ()) keyword[for] identifier[a] keyword[in] identifier[record] . identifier[ALT] ]))
identifier[row] . identifier[append] ( identifier[f] ( identifier[record] . identifier[QUAL] ))
identifier[row] . identifier[append] ( identifier[f] ( literal[string] . identifier[join] ( identifier[record] . identifier[FILTER] )))
identifier[row] . identifier[append] ( identifier[f] ( identifier[self] . identifier[_serialize_info] ( identifier[record] )))
keyword[if] identifier[record] . identifier[FORMAT] :
identifier[row] . identifier[append] ( literal[string] . identifier[join] ( identifier[record] . identifier[FORMAT] ))
identifier[row] +=[
identifier[self] . identifier[_serialize_call] ( identifier[record] . identifier[FORMAT] , identifier[record] . identifier[call_for_sample] [ identifier[s] ])
keyword[for] identifier[s] keyword[in] identifier[self] . identifier[header] . identifier[samples] . identifier[names]
]
identifier[print] (* identifier[row] , identifier[sep] = literal[string] , identifier[file] = identifier[self] . identifier[stream] ) | def _serialize_record(self, record):
"""Serialize whole Record"""
f = self._empty_to_dot
row = [record.CHROM, record.POS]
row.append(f(';'.join(record.ID)))
row.append(f(record.REF))
if not record.ALT:
row.append('.') # depends on [control=['if'], data=[]]
else:
row.append(','.join([f(a.serialize()) for a in record.ALT]))
row.append(f(record.QUAL))
row.append(f(';'.join(record.FILTER)))
row.append(f(self._serialize_info(record)))
if record.FORMAT:
row.append(':'.join(record.FORMAT)) # depends on [control=['if'], data=[]]
row += [self._serialize_call(record.FORMAT, record.call_for_sample[s]) for s in self.header.samples.names]
print(*row, sep='\t', file=self.stream) |
def _got_session(self, response):
"""Private function to navigate SOL payload activation
"""
if 'error' in response:
self._print_error(response['error'])
return
if not self.ipmi_session:
self.callgotsession = response
return
# Send activate sol payload directive
# netfn= 6 (application)
# command = 0x48 (activate payload)
# data = (1, sol payload type
# 1, first instance
# 0b11000000, -encrypt, authenticate,
# disable serial/modem alerts, CTS fine
# 0, 0, 0 reserved
response = self.ipmi_session.raw_command(netfn=0x6, command=0x48,
data=(1, 1, 192, 0, 0, 0))
# given that these are specific to the command,
# it's probably best if one can grep the error
# here instead of in constants
sol_activate_codes = {
0x81: 'SOL is disabled',
0x82: 'Maximum SOL session count reached',
0x83: 'Cannot activate payload with encryption',
0x84: 'Cannot activate payload without encryption',
}
if 'code' in response and response['code']:
if response['code'] in constants.ipmi_completion_codes:
self._print_error(
constants.ipmi_completion_codes[response['code']])
return
elif response['code'] == 0x80:
if self.force_session and not self.retriedpayload:
self.retriedpayload = 1
sessrsp = self.ipmi_session.raw_command(
netfn=0x6,
command=0x49,
data=(1, 1, 0, 0, 0, 0))
self._got_session(sessrsp)
return
else:
self._print_error('SOL Session active for another client')
return
elif response['code'] in sol_activate_codes:
self._print_error(sol_activate_codes[response['code']])
return
else:
self._print_error(
'SOL encountered Unrecognized error code %d' %
response['code'])
return
if 'error' in response:
self._print_error(response['error'])
return
self.activated = True
# data[0:3] is reserved except for the test mode, which we don't use
data = response['data']
self.maxoutcount = (data[5] << 8) + data[4]
# BMC tells us this is the maximum allowed size
# data[6:7] is the promise of how small packets are going to be, but we
# don't have any reason to worry about it
# some BMCs disagree on the endianness, so do both
valid_ports = (self.port, struct.unpack(
'<H', struct.pack('>H', self.port))[0])
if (data[8] + (data[9] << 8)) not in valid_ports:
# TODO(jbjohnso): support atypical SOL port number
raise NotImplementedError("Non-standard SOL Port Number")
# ignore data[10:11] for now, the vlan detail, shouldn't matter to this
# code anyway...
# NOTE(jbjohnso):
# We will use a special purpose keepalive
if self.ipmi_session.sol_handler is not None:
# If there is erroneously another SOL handler already, notify
# it of newly established session
self.ipmi_session.sol_handler({'error': 'Session Disconnected'})
self.keepaliveid = self.ipmi_session.register_keepalive(
cmd={'netfn': 6, 'command': 0x4b, 'data': (1, 1)},
callback=self._got_payload_instance_info)
self.ipmi_session.sol_handler = self._got_sol_payload
self.connected = True
# self._sendpendingoutput() checks len(self._sendpendingoutput)
self._sendpendingoutput() | def function[_got_session, parameter[self, response]]:
constant[Private function to navigate SOL payload activation
]
if compare[constant[error] in name[response]] begin[:]
call[name[self]._print_error, parameter[call[name[response]][constant[error]]]]
return[None]
if <ast.UnaryOp object at 0x7da18dc06b30> begin[:]
name[self].callgotsession assign[=] name[response]
return[None]
variable[response] assign[=] call[name[self].ipmi_session.raw_command, parameter[]]
variable[sol_activate_codes] assign[=] dictionary[[<ast.Constant object at 0x7da18dc07160>, <ast.Constant object at 0x7da18dc048b0>, <ast.Constant object at 0x7da18dc07bb0>, <ast.Constant object at 0x7da18dc06da0>], [<ast.Constant object at 0x7da18dc04340>, <ast.Constant object at 0x7da18dc06800>, <ast.Constant object at 0x7da18dc07940>, <ast.Constant object at 0x7da18dc054b0>]]
if <ast.BoolOp object at 0x7da18dc05900> begin[:]
if compare[call[name[response]][constant[code]] in name[constants].ipmi_completion_codes] begin[:]
call[name[self]._print_error, parameter[call[name[constants].ipmi_completion_codes][call[name[response]][constant[code]]]]]
return[None]
if compare[constant[error] in name[response]] begin[:]
call[name[self]._print_error, parameter[call[name[response]][constant[error]]]]
return[None]
name[self].activated assign[=] constant[True]
variable[data] assign[=] call[name[response]][constant[data]]
name[self].maxoutcount assign[=] binary_operation[binary_operation[call[name[data]][constant[5]] <ast.LShift object at 0x7da2590d69e0> constant[8]] + call[name[data]][constant[4]]]
variable[valid_ports] assign[=] tuple[[<ast.Attribute object at 0x7da18dc046d0>, <ast.Subscript object at 0x7da18dc052d0>]]
if compare[binary_operation[call[name[data]][constant[8]] + binary_operation[call[name[data]][constant[9]] <ast.LShift object at 0x7da2590d69e0> constant[8]]] <ast.NotIn object at 0x7da2590d7190> name[valid_ports]] begin[:]
<ast.Raise object at 0x7da20e956740>
if compare[name[self].ipmi_session.sol_handler is_not constant[None]] begin[:]
call[name[self].ipmi_session.sol_handler, parameter[dictionary[[<ast.Constant object at 0x7da20e954dc0>], [<ast.Constant object at 0x7da20e9559c0>]]]]
name[self].keepaliveid assign[=] call[name[self].ipmi_session.register_keepalive, parameter[]]
name[self].ipmi_session.sol_handler assign[=] name[self]._got_sol_payload
name[self].connected assign[=] constant[True]
call[name[self]._sendpendingoutput, parameter[]] | keyword[def] identifier[_got_session] ( identifier[self] , identifier[response] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[response] :
identifier[self] . identifier[_print_error] ( identifier[response] [ literal[string] ])
keyword[return]
keyword[if] keyword[not] identifier[self] . identifier[ipmi_session] :
identifier[self] . identifier[callgotsession] = identifier[response]
keyword[return]
identifier[response] = identifier[self] . identifier[ipmi_session] . identifier[raw_command] ( identifier[netfn] = literal[int] , identifier[command] = literal[int] ,
identifier[data] =( literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ))
identifier[sol_activate_codes] ={
literal[int] : literal[string] ,
literal[int] : literal[string] ,
literal[int] : literal[string] ,
literal[int] : literal[string] ,
}
keyword[if] literal[string] keyword[in] identifier[response] keyword[and] identifier[response] [ literal[string] ]:
keyword[if] identifier[response] [ literal[string] ] keyword[in] identifier[constants] . identifier[ipmi_completion_codes] :
identifier[self] . identifier[_print_error] (
identifier[constants] . identifier[ipmi_completion_codes] [ identifier[response] [ literal[string] ]])
keyword[return]
keyword[elif] identifier[response] [ literal[string] ]== literal[int] :
keyword[if] identifier[self] . identifier[force_session] keyword[and] keyword[not] identifier[self] . identifier[retriedpayload] :
identifier[self] . identifier[retriedpayload] = literal[int]
identifier[sessrsp] = identifier[self] . identifier[ipmi_session] . identifier[raw_command] (
identifier[netfn] = literal[int] ,
identifier[command] = literal[int] ,
identifier[data] =( literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ))
identifier[self] . identifier[_got_session] ( identifier[sessrsp] )
keyword[return]
keyword[else] :
identifier[self] . identifier[_print_error] ( literal[string] )
keyword[return]
keyword[elif] identifier[response] [ literal[string] ] keyword[in] identifier[sol_activate_codes] :
identifier[self] . identifier[_print_error] ( identifier[sol_activate_codes] [ identifier[response] [ literal[string] ]])
keyword[return]
keyword[else] :
identifier[self] . identifier[_print_error] (
literal[string] %
identifier[response] [ literal[string] ])
keyword[return]
keyword[if] literal[string] keyword[in] identifier[response] :
identifier[self] . identifier[_print_error] ( identifier[response] [ literal[string] ])
keyword[return]
identifier[self] . identifier[activated] = keyword[True]
identifier[data] = identifier[response] [ literal[string] ]
identifier[self] . identifier[maxoutcount] =( identifier[data] [ literal[int] ]<< literal[int] )+ identifier[data] [ literal[int] ]
identifier[valid_ports] =( identifier[self] . identifier[port] , identifier[struct] . identifier[unpack] (
literal[string] , identifier[struct] . identifier[pack] ( literal[string] , identifier[self] . identifier[port] ))[ literal[int] ])
keyword[if] ( identifier[data] [ literal[int] ]+( identifier[data] [ literal[int] ]<< literal[int] )) keyword[not] keyword[in] identifier[valid_ports] :
keyword[raise] identifier[NotImplementedError] ( literal[string] )
keyword[if] identifier[self] . identifier[ipmi_session] . identifier[sol_handler] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[ipmi_session] . identifier[sol_handler] ({ literal[string] : literal[string] })
identifier[self] . identifier[keepaliveid] = identifier[self] . identifier[ipmi_session] . identifier[register_keepalive] (
identifier[cmd] ={ literal[string] : literal[int] , literal[string] : literal[int] , literal[string] :( literal[int] , literal[int] )},
identifier[callback] = identifier[self] . identifier[_got_payload_instance_info] )
identifier[self] . identifier[ipmi_session] . identifier[sol_handler] = identifier[self] . identifier[_got_sol_payload]
identifier[self] . identifier[connected] = keyword[True]
identifier[self] . identifier[_sendpendingoutput] () | def _got_session(self, response):
"""Private function to navigate SOL payload activation
"""
if 'error' in response:
self._print_error(response['error'])
return # depends on [control=['if'], data=['response']]
if not self.ipmi_session:
self.callgotsession = response
return # depends on [control=['if'], data=[]]
# Send activate sol payload directive
# netfn= 6 (application)
# command = 0x48 (activate payload)
# data = (1, sol payload type
# 1, first instance
# 0b11000000, -encrypt, authenticate,
# disable serial/modem alerts, CTS fine
# 0, 0, 0 reserved
response = self.ipmi_session.raw_command(netfn=6, command=72, data=(1, 1, 192, 0, 0, 0))
# given that these are specific to the command,
# it's probably best if one can grep the error
# here instead of in constants
sol_activate_codes = {129: 'SOL is disabled', 130: 'Maximum SOL session count reached', 131: 'Cannot activate payload with encryption', 132: 'Cannot activate payload without encryption'}
if 'code' in response and response['code']:
if response['code'] in constants.ipmi_completion_codes:
self._print_error(constants.ipmi_completion_codes[response['code']])
return # depends on [control=['if'], data=[]]
elif response['code'] == 128:
if self.force_session and (not self.retriedpayload):
self.retriedpayload = 1
sessrsp = self.ipmi_session.raw_command(netfn=6, command=73, data=(1, 1, 0, 0, 0, 0))
self._got_session(sessrsp)
return # depends on [control=['if'], data=[]]
else:
self._print_error('SOL Session active for another client')
return # depends on [control=['if'], data=[]]
elif response['code'] in sol_activate_codes:
self._print_error(sol_activate_codes[response['code']])
return # depends on [control=['if'], data=['sol_activate_codes']]
else:
self._print_error('SOL encountered Unrecognized error code %d' % response['code'])
return # depends on [control=['if'], data=[]]
if 'error' in response:
self._print_error(response['error'])
return # depends on [control=['if'], data=['response']]
self.activated = True
# data[0:3] is reserved except for the test mode, which we don't use
data = response['data']
self.maxoutcount = (data[5] << 8) + data[4]
# BMC tells us this is the maximum allowed size
# data[6:7] is the promise of how small packets are going to be, but we
# don't have any reason to worry about it
# some BMCs disagree on the endianness, so do both
valid_ports = (self.port, struct.unpack('<H', struct.pack('>H', self.port))[0])
if data[8] + (data[9] << 8) not in valid_ports:
# TODO(jbjohnso): support atypical SOL port number
raise NotImplementedError('Non-standard SOL Port Number') # depends on [control=['if'], data=[]]
# ignore data[10:11] for now, the vlan detail, shouldn't matter to this
# code anyway...
# NOTE(jbjohnso):
# We will use a special purpose keepalive
if self.ipmi_session.sol_handler is not None:
# If there is erroneously another SOL handler already, notify
# it of newly established session
self.ipmi_session.sol_handler({'error': 'Session Disconnected'}) # depends on [control=['if'], data=[]]
self.keepaliveid = self.ipmi_session.register_keepalive(cmd={'netfn': 6, 'command': 75, 'data': (1, 1)}, callback=self._got_payload_instance_info)
self.ipmi_session.sol_handler = self._got_sol_payload
self.connected = True
# self._sendpendingoutput() checks len(self._sendpendingoutput)
self._sendpendingoutput() |
def getColor(rgb=None, hsv=None):
"""
Convert a color or list of colors to (r,g,b) format from many input formats.
:param bool hsv: if set to `True`, rgb is assumed as (hue, saturation, value).
Example:
- RGB = (255, 255, 255), corresponds to white
- rgb = (1,1,1) is white
- hex = #FFFF00 is yellow
- string = 'white'
- string = 'w' is white nickname
- string = 'dr' is darkred
- int = 7 picks color nr. 7 in a predefined color list
- int = -7 picks color nr. 7 in a different predefined list
.. hint:: |colorcubes| |colorcubes.py|_
"""
#recursion, return a list if input is list of colors:
if _isSequence(rgb) and len(rgb) > 3:
seqcol = []
for sc in rgb:
seqcol.append(getColor(sc))
return seqcol
if str(rgb).isdigit():
rgb = int(rgb)
if hsv:
c = hsv2rgb(hsv)
else:
c = rgb
if _isSequence(c):
if c[0] <= 1 and c[1] <= 1 and c[2] <= 1:
return c # already rgb
else:
if len(c) == 3:
return list(np.array(c) / 255.0) # RGB
else:
return (c[0] / 255.0, c[1] / 255.0, c[2] / 255.0, c[3]) # RGBA
elif isinstance(c, str): # is string
c = c.replace(",", " ").replace("/", " ").replace("alpha=", "")
c = c.replace("grey", "gray")
c = c.split()[0] # ignore possible opacity float inside string
if 0 < len(c) < 3: # single/double letter color
if c.lower() in color_nicks.keys():
c = color_nicks[c.lower()]
else:
print("Unknow color nickname:", c)
print("Available abbreviations:", color_nicks)
return (0.5, 0.5, 0.5)
if c.lower() in colors.keys(): # matplotlib name color
c = colors[c.lower()]
else: # vtk name color
namedColors = vtk.vtkNamedColors()
rgba = [0, 0, 0, 0]
namedColors.GetColor(c, rgba)
return list(np.array(rgba[0:3]) / 255.0)
if "#" in c: # hex to rgb
h = c.lstrip("#")
rgb255 = list(int(h[i : i + 2], 16) for i in (0, 2, 4))
rgbh = np.array(rgb255) / 255.0
if np.sum(rgbh) > 3:
print("Error in getColor(): Wrong hex color", c)
return (0.5, 0.5, 0.5)
return tuple(rgbh)
elif isinstance(c, int): # color number
if c >= 0:
return colors1[c % 10]
else:
return colors2[-c % 10]
elif isinstance(c, float):
if c >= 0:
return colors1[int(c) % 10]
else:
return colors2[int(-c) % 10]
#print("Unknown color:", c)
return (0.5, 0.5, 0.5) | def function[getColor, parameter[rgb, hsv]]:
constant[
Convert a color or list of colors to (r,g,b) format from many input formats.
:param bool hsv: if set to `True`, rgb is assumed as (hue, saturation, value).
Example:
- RGB = (255, 255, 255), corresponds to white
- rgb = (1,1,1) is white
- hex = #FFFF00 is yellow
- string = 'white'
- string = 'w' is white nickname
- string = 'dr' is darkred
- int = 7 picks color nr. 7 in a predefined color list
- int = -7 picks color nr. 7 in a different predefined list
.. hint:: |colorcubes| |colorcubes.py|_
]
if <ast.BoolOp object at 0x7da20c76f610> begin[:]
variable[seqcol] assign[=] list[[]]
for taget[name[sc]] in starred[name[rgb]] begin[:]
call[name[seqcol].append, parameter[call[name[getColor], parameter[name[sc]]]]]
return[name[seqcol]]
if call[call[name[str], parameter[name[rgb]]].isdigit, parameter[]] begin[:]
variable[rgb] assign[=] call[name[int], parameter[name[rgb]]]
if name[hsv] begin[:]
variable[c] assign[=] call[name[hsv2rgb], parameter[name[hsv]]]
if call[name[_isSequence], parameter[name[c]]] begin[:]
if <ast.BoolOp object at 0x7da18dc98ee0> begin[:]
return[name[c]]
return[tuple[[<ast.Constant object at 0x7da1b0616440>, <ast.Constant object at 0x7da1b0614640>, <ast.Constant object at 0x7da1b0616650>]]] | keyword[def] identifier[getColor] ( identifier[rgb] = keyword[None] , identifier[hsv] = keyword[None] ):
literal[string]
keyword[if] identifier[_isSequence] ( identifier[rgb] ) keyword[and] identifier[len] ( identifier[rgb] )> literal[int] :
identifier[seqcol] =[]
keyword[for] identifier[sc] keyword[in] identifier[rgb] :
identifier[seqcol] . identifier[append] ( identifier[getColor] ( identifier[sc] ))
keyword[return] identifier[seqcol]
keyword[if] identifier[str] ( identifier[rgb] ). identifier[isdigit] ():
identifier[rgb] = identifier[int] ( identifier[rgb] )
keyword[if] identifier[hsv] :
identifier[c] = identifier[hsv2rgb] ( identifier[hsv] )
keyword[else] :
identifier[c] = identifier[rgb]
keyword[if] identifier[_isSequence] ( identifier[c] ):
keyword[if] identifier[c] [ literal[int] ]<= literal[int] keyword[and] identifier[c] [ literal[int] ]<= literal[int] keyword[and] identifier[c] [ literal[int] ]<= literal[int] :
keyword[return] identifier[c]
keyword[else] :
keyword[if] identifier[len] ( identifier[c] )== literal[int] :
keyword[return] identifier[list] ( identifier[np] . identifier[array] ( identifier[c] )/ literal[int] )
keyword[else] :
keyword[return] ( identifier[c] [ literal[int] ]/ literal[int] , identifier[c] [ literal[int] ]/ literal[int] , identifier[c] [ literal[int] ]/ literal[int] , identifier[c] [ literal[int] ])
keyword[elif] identifier[isinstance] ( identifier[c] , identifier[str] ):
identifier[c] = identifier[c] . identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] )
identifier[c] = identifier[c] . identifier[replace] ( literal[string] , literal[string] )
identifier[c] = identifier[c] . identifier[split] ()[ literal[int] ]
keyword[if] literal[int] < identifier[len] ( identifier[c] )< literal[int] :
keyword[if] identifier[c] . identifier[lower] () keyword[in] identifier[color_nicks] . identifier[keys] ():
identifier[c] = identifier[color_nicks] [ identifier[c] . identifier[lower] ()]
keyword[else] :
identifier[print] ( literal[string] , identifier[c] )
identifier[print] ( literal[string] , identifier[color_nicks] )
keyword[return] ( literal[int] , literal[int] , literal[int] )
keyword[if] identifier[c] . identifier[lower] () keyword[in] identifier[colors] . identifier[keys] ():
identifier[c] = identifier[colors] [ identifier[c] . identifier[lower] ()]
keyword[else] :
identifier[namedColors] = identifier[vtk] . identifier[vtkNamedColors] ()
identifier[rgba] =[ literal[int] , literal[int] , literal[int] , literal[int] ]
identifier[namedColors] . identifier[GetColor] ( identifier[c] , identifier[rgba] )
keyword[return] identifier[list] ( identifier[np] . identifier[array] ( identifier[rgba] [ literal[int] : literal[int] ])/ literal[int] )
keyword[if] literal[string] keyword[in] identifier[c] :
identifier[h] = identifier[c] . identifier[lstrip] ( literal[string] )
identifier[rgb255] = identifier[list] ( identifier[int] ( identifier[h] [ identifier[i] : identifier[i] + literal[int] ], literal[int] ) keyword[for] identifier[i] keyword[in] ( literal[int] , literal[int] , literal[int] ))
identifier[rgbh] = identifier[np] . identifier[array] ( identifier[rgb255] )/ literal[int]
keyword[if] identifier[np] . identifier[sum] ( identifier[rgbh] )> literal[int] :
identifier[print] ( literal[string] , identifier[c] )
keyword[return] ( literal[int] , literal[int] , literal[int] )
keyword[return] identifier[tuple] ( identifier[rgbh] )
keyword[elif] identifier[isinstance] ( identifier[c] , identifier[int] ):
keyword[if] identifier[c] >= literal[int] :
keyword[return] identifier[colors1] [ identifier[c] % literal[int] ]
keyword[else] :
keyword[return] identifier[colors2] [- identifier[c] % literal[int] ]
keyword[elif] identifier[isinstance] ( identifier[c] , identifier[float] ):
keyword[if] identifier[c] >= literal[int] :
keyword[return] identifier[colors1] [ identifier[int] ( identifier[c] )% literal[int] ]
keyword[else] :
keyword[return] identifier[colors2] [ identifier[int] (- identifier[c] )% literal[int] ]
keyword[return] ( literal[int] , literal[int] , literal[int] ) | def getColor(rgb=None, hsv=None):
"""
Convert a color or list of colors to (r,g,b) format from many input formats.
:param bool hsv: if set to `True`, rgb is assumed as (hue, saturation, value).
Example:
- RGB = (255, 255, 255), corresponds to white
- rgb = (1,1,1) is white
- hex = #FFFF00 is yellow
- string = 'white'
- string = 'w' is white nickname
- string = 'dr' is darkred
- int = 7 picks color nr. 7 in a predefined color list
- int = -7 picks color nr. 7 in a different predefined list
.. hint:: |colorcubes| |colorcubes.py|_
"""
#recursion, return a list if input is list of colors:
if _isSequence(rgb) and len(rgb) > 3:
seqcol = []
for sc in rgb:
seqcol.append(getColor(sc)) # depends on [control=['for'], data=['sc']]
return seqcol # depends on [control=['if'], data=[]]
if str(rgb).isdigit():
rgb = int(rgb) # depends on [control=['if'], data=[]]
if hsv:
c = hsv2rgb(hsv) # depends on [control=['if'], data=[]]
else:
c = rgb
if _isSequence(c):
if c[0] <= 1 and c[1] <= 1 and (c[2] <= 1):
return c # already rgb # depends on [control=['if'], data=[]]
elif len(c) == 3:
return list(np.array(c) / 255.0) # RGB # depends on [control=['if'], data=[]]
else:
return (c[0] / 255.0, c[1] / 255.0, c[2] / 255.0, c[3]) # RGBA # depends on [control=['if'], data=[]]
elif isinstance(c, str): # is string
c = c.replace(',', ' ').replace('/', ' ').replace('alpha=', '')
c = c.replace('grey', 'gray')
c = c.split()[0] # ignore possible opacity float inside string
if 0 < len(c) < 3: # single/double letter color
if c.lower() in color_nicks.keys():
c = color_nicks[c.lower()] # depends on [control=['if'], data=[]]
else:
print('Unknow color nickname:', c)
print('Available abbreviations:', color_nicks)
return (0.5, 0.5, 0.5) # depends on [control=['if'], data=[]]
if c.lower() in colors.keys(): # matplotlib name color
c = colors[c.lower()] # depends on [control=['if'], data=[]]
else: # vtk name color
namedColors = vtk.vtkNamedColors()
rgba = [0, 0, 0, 0]
namedColors.GetColor(c, rgba)
return list(np.array(rgba[0:3]) / 255.0)
if '#' in c: # hex to rgb
h = c.lstrip('#')
rgb255 = list((int(h[i:i + 2], 16) for i in (0, 2, 4)))
rgbh = np.array(rgb255) / 255.0
if np.sum(rgbh) > 3:
print('Error in getColor(): Wrong hex color', c)
return (0.5, 0.5, 0.5) # depends on [control=['if'], data=[]]
return tuple(rgbh) # depends on [control=['if'], data=['c']] # depends on [control=['if'], data=[]]
elif isinstance(c, int): # color number
if c >= 0:
return colors1[c % 10] # depends on [control=['if'], data=['c']]
else:
return colors2[-c % 10] # depends on [control=['if'], data=[]]
elif isinstance(c, float):
if c >= 0:
return colors1[int(c) % 10] # depends on [control=['if'], data=['c']]
else:
return colors2[int(-c) % 10] # depends on [control=['if'], data=[]]
#print("Unknown color:", c)
return (0.5, 0.5, 0.5) |
def vinet_v_single(p, v0, k0, k0p, min_strain=0.01):
"""
find volume at given pressure using brenth in scipy.optimize
this is for single p value, not vectorized
:param p: pressure in GPa
:param v0: unit-cell volume in A^3 at 1 bar
:param k0: bulk modulus at reference conditions
:param k0p: pressure derivative of bulk modulus at reference conditions
:param min_strain: defining minimum v/v0 value to search volume for
:return: unit cell volume at high pressure in A^3
"""
if p <= 1.e-5:
return v0
def f_diff(v, v0, k0, k0p, p):
return vinet_p(v, v0, k0, k0p) - p
v = brenth(f_diff, v0, v0 * min_strain, args=(v0, k0, k0p, p))
return v | def function[vinet_v_single, parameter[p, v0, k0, k0p, min_strain]]:
constant[
find volume at given pressure using brenth in scipy.optimize
this is for single p value, not vectorized
:param p: pressure in GPa
:param v0: unit-cell volume in A^3 at 1 bar
:param k0: bulk modulus at reference conditions
:param k0p: pressure derivative of bulk modulus at reference conditions
:param min_strain: defining minimum v/v0 value to search volume for
:return: unit cell volume at high pressure in A^3
]
if compare[name[p] less_or_equal[<=] constant[1e-05]] begin[:]
return[name[v0]]
def function[f_diff, parameter[v, v0, k0, k0p, p]]:
return[binary_operation[call[name[vinet_p], parameter[name[v], name[v0], name[k0], name[k0p]]] - name[p]]]
variable[v] assign[=] call[name[brenth], parameter[name[f_diff], name[v0], binary_operation[name[v0] * name[min_strain]]]]
return[name[v]] | keyword[def] identifier[vinet_v_single] ( identifier[p] , identifier[v0] , identifier[k0] , identifier[k0p] , identifier[min_strain] = literal[int] ):
literal[string]
keyword[if] identifier[p] <= literal[int] :
keyword[return] identifier[v0]
keyword[def] identifier[f_diff] ( identifier[v] , identifier[v0] , identifier[k0] , identifier[k0p] , identifier[p] ):
keyword[return] identifier[vinet_p] ( identifier[v] , identifier[v0] , identifier[k0] , identifier[k0p] )- identifier[p]
identifier[v] = identifier[brenth] ( identifier[f_diff] , identifier[v0] , identifier[v0] * identifier[min_strain] , identifier[args] =( identifier[v0] , identifier[k0] , identifier[k0p] , identifier[p] ))
keyword[return] identifier[v] | def vinet_v_single(p, v0, k0, k0p, min_strain=0.01):
"""
find volume at given pressure using brenth in scipy.optimize
this is for single p value, not vectorized
:param p: pressure in GPa
:param v0: unit-cell volume in A^3 at 1 bar
:param k0: bulk modulus at reference conditions
:param k0p: pressure derivative of bulk modulus at reference conditions
:param min_strain: defining minimum v/v0 value to search volume for
:return: unit cell volume at high pressure in A^3
"""
if p <= 1e-05:
return v0 # depends on [control=['if'], data=[]]
def f_diff(v, v0, k0, k0p, p):
return vinet_p(v, v0, k0, k0p) - p
v = brenth(f_diff, v0, v0 * min_strain, args=(v0, k0, k0p, p))
return v |
def t_ID(self, t):
r'[a-zA-Z]+'
if t.value in self._RESERVED.keys():
t.type = self._RESERVED[t.value]
return t
if Information.is_valid_symbol(t.value) or \
Information.is_valid_category(t.value):
t.type = self._INFORMATION_UNIT
return t
if Duration.is_valid_symbol(t.value):
t.type = self._DURATION_UNIT
return t
raise LexingError('Unrecognised token or unit \'{0.value}\' at '
'position {0.lexpos}'.format(t)) | def function[t_ID, parameter[self, t]]:
constant[[a-zA-Z]+]
if compare[name[t].value in call[name[self]._RESERVED.keys, parameter[]]] begin[:]
name[t].type assign[=] call[name[self]._RESERVED][name[t].value]
return[name[t]]
if <ast.BoolOp object at 0x7da1b14604c0> begin[:]
name[t].type assign[=] name[self]._INFORMATION_UNIT
return[name[t]]
if call[name[Duration].is_valid_symbol, parameter[name[t].value]] begin[:]
name[t].type assign[=] name[self]._DURATION_UNIT
return[name[t]]
<ast.Raise object at 0x7da1b1462830> | keyword[def] identifier[t_ID] ( identifier[self] , identifier[t] ):
literal[string]
keyword[if] identifier[t] . identifier[value] keyword[in] identifier[self] . identifier[_RESERVED] . identifier[keys] ():
identifier[t] . identifier[type] = identifier[self] . identifier[_RESERVED] [ identifier[t] . identifier[value] ]
keyword[return] identifier[t]
keyword[if] identifier[Information] . identifier[is_valid_symbol] ( identifier[t] . identifier[value] ) keyword[or] identifier[Information] . identifier[is_valid_category] ( identifier[t] . identifier[value] ):
identifier[t] . identifier[type] = identifier[self] . identifier[_INFORMATION_UNIT]
keyword[return] identifier[t]
keyword[if] identifier[Duration] . identifier[is_valid_symbol] ( identifier[t] . identifier[value] ):
identifier[t] . identifier[type] = identifier[self] . identifier[_DURATION_UNIT]
keyword[return] identifier[t]
keyword[raise] identifier[LexingError] ( literal[string]
literal[string] . identifier[format] ( identifier[t] )) | def t_ID(self, t):
"""[a-zA-Z]+"""
if t.value in self._RESERVED.keys():
t.type = self._RESERVED[t.value]
return t # depends on [control=['if'], data=[]]
if Information.is_valid_symbol(t.value) or Information.is_valid_category(t.value):
t.type = self._INFORMATION_UNIT
return t # depends on [control=['if'], data=[]]
if Duration.is_valid_symbol(t.value):
t.type = self._DURATION_UNIT
return t # depends on [control=['if'], data=[]]
raise LexingError("Unrecognised token or unit '{0.value}' at position {0.lexpos}".format(t)) |
def get_state(self):
"""Get the current directory state"""
return [os.path.join(dp, f)
for dp, _, fn in os.walk(self.dir)
for f in fn] | def function[get_state, parameter[self]]:
constant[Get the current directory state]
return[<ast.ListComp object at 0x7da1afea66b0>] | keyword[def] identifier[get_state] ( identifier[self] ):
literal[string]
keyword[return] [ identifier[os] . identifier[path] . identifier[join] ( identifier[dp] , identifier[f] )
keyword[for] identifier[dp] , identifier[_] , identifier[fn] keyword[in] identifier[os] . identifier[walk] ( identifier[self] . identifier[dir] )
keyword[for] identifier[f] keyword[in] identifier[fn] ] | def get_state(self):
"""Get the current directory state"""
return [os.path.join(dp, f) for (dp, _, fn) in os.walk(self.dir) for f in fn] |
def from_str(date):
"""
Given a date in the format: Jan,21st.2015
will return a datetime of it.
"""
month = date[:3][0] + date[:3][-2:].lower()
if month not in NAMED_MONTHS:
raise CanNotFormatError('Month not recognized')
date = date.replace(',', '').replace(' ', '').replace('.', '')
try:
day_unit = [x for x in ['st', 'rd', 'nd', 'th'] if x in date][0]
day = int(re.search(r'\d+', date.split(day_unit)[0]).group())
year = int(re.search(r'\d+', date.split(day_unit)[1]).group())
numeric_month = NAMED_MONTHS[month]
return datetime.date(int(year), numeric_month, day)
except:
raise CanNotFormatError('Not well formatted. Expecting something like May,21st.2015') | def function[from_str, parameter[date]]:
constant[
Given a date in the format: Jan,21st.2015
will return a datetime of it.
]
variable[month] assign[=] binary_operation[call[call[name[date]][<ast.Slice object at 0x7da1b0cb3070>]][constant[0]] + call[call[call[name[date]][<ast.Slice object at 0x7da1b0cb1060>]][<ast.Slice object at 0x7da1b0cb2290>].lower, parameter[]]]
if compare[name[month] <ast.NotIn object at 0x7da2590d7190> name[NAMED_MONTHS]] begin[:]
<ast.Raise object at 0x7da1b0cb04c0>
variable[date] assign[=] call[call[call[name[date].replace, parameter[constant[,], constant[]]].replace, parameter[constant[ ], constant[]]].replace, parameter[constant[.], constant[]]]
<ast.Try object at 0x7da1b0cb2680> | keyword[def] identifier[from_str] ( identifier[date] ):
literal[string]
identifier[month] = identifier[date] [: literal[int] ][ literal[int] ]+ identifier[date] [: literal[int] ][- literal[int] :]. identifier[lower] ()
keyword[if] identifier[month] keyword[not] keyword[in] identifier[NAMED_MONTHS] :
keyword[raise] identifier[CanNotFormatError] ( literal[string] )
identifier[date] = identifier[date] . identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] )
keyword[try] :
identifier[day_unit] =[ identifier[x] keyword[for] identifier[x] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] ] keyword[if] identifier[x] keyword[in] identifier[date] ][ literal[int] ]
identifier[day] = identifier[int] ( identifier[re] . identifier[search] ( literal[string] , identifier[date] . identifier[split] ( identifier[day_unit] )[ literal[int] ]). identifier[group] ())
identifier[year] = identifier[int] ( identifier[re] . identifier[search] ( literal[string] , identifier[date] . identifier[split] ( identifier[day_unit] )[ literal[int] ]). identifier[group] ())
identifier[numeric_month] = identifier[NAMED_MONTHS] [ identifier[month] ]
keyword[return] identifier[datetime] . identifier[date] ( identifier[int] ( identifier[year] ), identifier[numeric_month] , identifier[day] )
keyword[except] :
keyword[raise] identifier[CanNotFormatError] ( literal[string] ) | def from_str(date):
"""
Given a date in the format: Jan,21st.2015
will return a datetime of it.
"""
month = date[:3][0] + date[:3][-2:].lower()
if month not in NAMED_MONTHS:
raise CanNotFormatError('Month not recognized') # depends on [control=['if'], data=[]]
date = date.replace(',', '').replace(' ', '').replace('.', '')
try:
day_unit = [x for x in ['st', 'rd', 'nd', 'th'] if x in date][0]
day = int(re.search('\\d+', date.split(day_unit)[0]).group())
year = int(re.search('\\d+', date.split(day_unit)[1]).group())
numeric_month = NAMED_MONTHS[month]
return datetime.date(int(year), numeric_month, day) # depends on [control=['try'], data=[]]
except:
raise CanNotFormatError('Not well formatted. Expecting something like May,21st.2015') # depends on [control=['except'], data=[]] |
def hull_moving_average(data, period):
"""
Hull Moving Average.
Formula:
HMA = WMA(2*WMA(n/2) - WMA(n)), sqrt(n)
"""
catch_errors.check_for_period_error(data, period)
hma = wma(
2 * wma(data, int(period/2)) - wma(data, period), int(np.sqrt(period))
)
return hma | def function[hull_moving_average, parameter[data, period]]:
constant[
Hull Moving Average.
Formula:
HMA = WMA(2*WMA(n/2) - WMA(n)), sqrt(n)
]
call[name[catch_errors].check_for_period_error, parameter[name[data], name[period]]]
variable[hma] assign[=] call[name[wma], parameter[binary_operation[binary_operation[constant[2] * call[name[wma], parameter[name[data], call[name[int], parameter[binary_operation[name[period] / constant[2]]]]]]] - call[name[wma], parameter[name[data], name[period]]]], call[name[int], parameter[call[name[np].sqrt, parameter[name[period]]]]]]]
return[name[hma]] | keyword[def] identifier[hull_moving_average] ( identifier[data] , identifier[period] ):
literal[string]
identifier[catch_errors] . identifier[check_for_period_error] ( identifier[data] , identifier[period] )
identifier[hma] = identifier[wma] (
literal[int] * identifier[wma] ( identifier[data] , identifier[int] ( identifier[period] / literal[int] ))- identifier[wma] ( identifier[data] , identifier[period] ), identifier[int] ( identifier[np] . identifier[sqrt] ( identifier[period] ))
)
keyword[return] identifier[hma] | def hull_moving_average(data, period):
"""
Hull Moving Average.
Formula:
HMA = WMA(2*WMA(n/2) - WMA(n)), sqrt(n)
"""
catch_errors.check_for_period_error(data, period)
hma = wma(2 * wma(data, int(period / 2)) - wma(data, period), int(np.sqrt(period)))
return hma |
def reset(self):
""" Initial state
"""
self.regs = {}
self.stack = []
self.mem = defaultdict(new_tmp_val) # Dict of label -> value in memory
for i in 'abcdefhl':
self.regs[i] = new_tmp_val() # Initial unknown state
self.regs["%s'" % i] = new_tmp_val()
self.regs['ixh'] = new_tmp_val()
self.regs['ixl'] = new_tmp_val()
self.regs['iyh'] = new_tmp_val()
self.regs['iyl'] = new_tmp_val()
self.regs['sp'] = new_tmp_val()
self.regs['r'] = new_tmp_val()
self.regs['i'] = new_tmp_val()
self.regs['af'] = new_tmp_val()
self.regs['bc'] = new_tmp_val()
self.regs['de'] = new_tmp_val()
self.regs['hl'] = new_tmp_val()
self.regs['ix'] = new_tmp_val()
self.regs['iy'] = new_tmp_val()
self.regs["af'"] = new_tmp_val()
self.regs["bc'"] = new_tmp_val()
self.regs["de'"] = new_tmp_val()
self.regs["hl'"] = new_tmp_val()
self._16bit = {'b': 'bc', 'c': 'bc', 'd': 'de', 'e': 'de', 'h': 'hl', 'l': 'hl',
"b'": "bc'", "c'": "bc'", "d'": "de'", "e'": "de'", "h'": "hl'", "l'": "hl'",
'ixy': 'ix', 'ixl': 'ix', 'iyh': 'iy', 'iyl': 'iy', 'a': 'af', "a'": "af'",
'f': 'af', "f'": "af'"}
self.reset_flags() | def function[reset, parameter[self]]:
constant[ Initial state
]
name[self].regs assign[=] dictionary[[], []]
name[self].stack assign[=] list[[]]
name[self].mem assign[=] call[name[defaultdict], parameter[name[new_tmp_val]]]
for taget[name[i]] in starred[constant[abcdefhl]] begin[:]
call[name[self].regs][name[i]] assign[=] call[name[new_tmp_val], parameter[]]
call[name[self].regs][binary_operation[constant[%s'] <ast.Mod object at 0x7da2590d6920> name[i]]] assign[=] call[name[new_tmp_val], parameter[]]
call[name[self].regs][constant[ixh]] assign[=] call[name[new_tmp_val], parameter[]]
call[name[self].regs][constant[ixl]] assign[=] call[name[new_tmp_val], parameter[]]
call[name[self].regs][constant[iyh]] assign[=] call[name[new_tmp_val], parameter[]]
call[name[self].regs][constant[iyl]] assign[=] call[name[new_tmp_val], parameter[]]
call[name[self].regs][constant[sp]] assign[=] call[name[new_tmp_val], parameter[]]
call[name[self].regs][constant[r]] assign[=] call[name[new_tmp_val], parameter[]]
call[name[self].regs][constant[i]] assign[=] call[name[new_tmp_val], parameter[]]
call[name[self].regs][constant[af]] assign[=] call[name[new_tmp_val], parameter[]]
call[name[self].regs][constant[bc]] assign[=] call[name[new_tmp_val], parameter[]]
call[name[self].regs][constant[de]] assign[=] call[name[new_tmp_val], parameter[]]
call[name[self].regs][constant[hl]] assign[=] call[name[new_tmp_val], parameter[]]
call[name[self].regs][constant[ix]] assign[=] call[name[new_tmp_val], parameter[]]
call[name[self].regs][constant[iy]] assign[=] call[name[new_tmp_val], parameter[]]
call[name[self].regs][constant[af']] assign[=] call[name[new_tmp_val], parameter[]]
call[name[self].regs][constant[bc']] assign[=] call[name[new_tmp_val], parameter[]]
call[name[self].regs][constant[de']] assign[=] call[name[new_tmp_val], parameter[]]
call[name[self].regs][constant[hl']] assign[=] call[name[new_tmp_val], parameter[]]
name[self]._16bit assign[=] dictionary[[<ast.Constant object at 0x7da2054a7c40>, <ast.Constant object at 0x7da2054a5ea0>, <ast.Constant object at 0x7da2054a6b30>, <ast.Constant object at 0x7da2054a4520>, <ast.Constant object at 0x7da2054a4ca0>, <ast.Constant object at 0x7da2054a4a00>, <ast.Constant object at 0x7da2054a7340>, <ast.Constant object at 0x7da2054a72e0>, <ast.Constant object at 0x7da2054a6b60>, <ast.Constant object at 0x7da2054a55d0>, <ast.Constant object at 0x7da2054a5c00>, <ast.Constant object at 0x7da2054a4100>, <ast.Constant object at 0x7da2054a5960>, <ast.Constant object at 0x7da2054a43d0>, <ast.Constant object at 0x7da2054a70d0>, <ast.Constant object at 0x7da2054a4e50>, <ast.Constant object at 0x7da2054a4b20>, <ast.Constant object at 0x7da2054a5360>, <ast.Constant object at 0x7da2054a51e0>, <ast.Constant object at 0x7da2054a4640>], [<ast.Constant object at 0x7da2054a4e80>, <ast.Constant object at 0x7da2054a6980>, <ast.Constant object at 0x7da2054a5de0>, <ast.Constant object at 0x7da2054a70a0>, <ast.Constant object at 0x7da2054a7370>, <ast.Constant object at 0x7da2054a61d0>, <ast.Constant object at 0x7da2054a6c20>, <ast.Constant object at 0x7da2054a40a0>, <ast.Constant object at 0x7da2054a5cc0>, <ast.Constant object at 0x7da2054a5b70>, <ast.Constant object at 0x7da2054a6350>, <ast.Constant object at 0x7da2054a47f0>, <ast.Constant object at 0x7da2054a7700>, <ast.Constant object at 0x7da2054a5810>, <ast.Constant object at 0x7da2054a4d00>, <ast.Constant object at 0x7da2054a5300>, <ast.Constant object at 0x7da2054a74f0>, <ast.Constant object at 0x7da2054a7e20>, <ast.Constant object at 0x7da2054a49d0>, <ast.Constant object at 0x7da2054a5f60>]]
call[name[self].reset_flags, parameter[]] | keyword[def] identifier[reset] ( identifier[self] ):
literal[string]
identifier[self] . identifier[regs] ={}
identifier[self] . identifier[stack] =[]
identifier[self] . identifier[mem] = identifier[defaultdict] ( identifier[new_tmp_val] )
keyword[for] identifier[i] keyword[in] literal[string] :
identifier[self] . identifier[regs] [ identifier[i] ]= identifier[new_tmp_val] ()
identifier[self] . identifier[regs] [ literal[string] % identifier[i] ]= identifier[new_tmp_val] ()
identifier[self] . identifier[regs] [ literal[string] ]= identifier[new_tmp_val] ()
identifier[self] . identifier[regs] [ literal[string] ]= identifier[new_tmp_val] ()
identifier[self] . identifier[regs] [ literal[string] ]= identifier[new_tmp_val] ()
identifier[self] . identifier[regs] [ literal[string] ]= identifier[new_tmp_val] ()
identifier[self] . identifier[regs] [ literal[string] ]= identifier[new_tmp_val] ()
identifier[self] . identifier[regs] [ literal[string] ]= identifier[new_tmp_val] ()
identifier[self] . identifier[regs] [ literal[string] ]= identifier[new_tmp_val] ()
identifier[self] . identifier[regs] [ literal[string] ]= identifier[new_tmp_val] ()
identifier[self] . identifier[regs] [ literal[string] ]= identifier[new_tmp_val] ()
identifier[self] . identifier[regs] [ literal[string] ]= identifier[new_tmp_val] ()
identifier[self] . identifier[regs] [ literal[string] ]= identifier[new_tmp_val] ()
identifier[self] . identifier[regs] [ literal[string] ]= identifier[new_tmp_val] ()
identifier[self] . identifier[regs] [ literal[string] ]= identifier[new_tmp_val] ()
identifier[self] . identifier[regs] [ literal[string] ]= identifier[new_tmp_val] ()
identifier[self] . identifier[regs] [ literal[string] ]= identifier[new_tmp_val] ()
identifier[self] . identifier[regs] [ literal[string] ]= identifier[new_tmp_val] ()
identifier[self] . identifier[regs] [ literal[string] ]= identifier[new_tmp_val] ()
identifier[self] . identifier[_16bit] ={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] ,
literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] ,
literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] ,
literal[string] : literal[string] , literal[string] : literal[string] }
identifier[self] . identifier[reset_flags] () | def reset(self):
""" Initial state
"""
self.regs = {}
self.stack = []
self.mem = defaultdict(new_tmp_val) # Dict of label -> value in memory
for i in 'abcdefhl':
self.regs[i] = new_tmp_val() # Initial unknown state
self.regs["%s'" % i] = new_tmp_val() # depends on [control=['for'], data=['i']]
self.regs['ixh'] = new_tmp_val()
self.regs['ixl'] = new_tmp_val()
self.regs['iyh'] = new_tmp_val()
self.regs['iyl'] = new_tmp_val()
self.regs['sp'] = new_tmp_val()
self.regs['r'] = new_tmp_val()
self.regs['i'] = new_tmp_val()
self.regs['af'] = new_tmp_val()
self.regs['bc'] = new_tmp_val()
self.regs['de'] = new_tmp_val()
self.regs['hl'] = new_tmp_val()
self.regs['ix'] = new_tmp_val()
self.regs['iy'] = new_tmp_val()
self.regs["af'"] = new_tmp_val()
self.regs["bc'"] = new_tmp_val()
self.regs["de'"] = new_tmp_val()
self.regs["hl'"] = new_tmp_val()
self._16bit = {'b': 'bc', 'c': 'bc', 'd': 'de', 'e': 'de', 'h': 'hl', 'l': 'hl', "b'": "bc'", "c'": "bc'", "d'": "de'", "e'": "de'", "h'": "hl'", "l'": "hl'", 'ixy': 'ix', 'ixl': 'ix', 'iyh': 'iy', 'iyl': 'iy', 'a': 'af', "a'": "af'", 'f': 'af', "f'": "af'"}
self.reset_flags() |
def _history_locked(self):
""" Returns whether history movement is locked.
"""
return (self.history_lock and
(self._get_edited_history(self._history_index) !=
self.input_buffer) and
(self._get_prompt_cursor().blockNumber() !=
self._get_end_cursor().blockNumber())) | def function[_history_locked, parameter[self]]:
constant[ Returns whether history movement is locked.
]
return[<ast.BoolOp object at 0x7da204622d10>] | keyword[def] identifier[_history_locked] ( identifier[self] ):
literal[string]
keyword[return] ( identifier[self] . identifier[history_lock] keyword[and]
( identifier[self] . identifier[_get_edited_history] ( identifier[self] . identifier[_history_index] )!=
identifier[self] . identifier[input_buffer] ) keyword[and]
( identifier[self] . identifier[_get_prompt_cursor] (). identifier[blockNumber] ()!=
identifier[self] . identifier[_get_end_cursor] (). identifier[blockNumber] ())) | def _history_locked(self):
""" Returns whether history movement is locked.
"""
return self.history_lock and self._get_edited_history(self._history_index) != self.input_buffer and (self._get_prompt_cursor().blockNumber() != self._get_end_cursor().blockNumber()) |
def render(self, name, context):
"""Returns the rendered text from a single template file from the
template loader using the given context data"""
if Generator.strict:
self.env.undefined = TestableUndefined
else:
self.env.undefined = Undefined
template = self.get_template(name)
return template.render(context) | def function[render, parameter[self, name, context]]:
constant[Returns the rendered text from a single template file from the
template loader using the given context data]
if name[Generator].strict begin[:]
name[self].env.undefined assign[=] name[TestableUndefined]
variable[template] assign[=] call[name[self].get_template, parameter[name[name]]]
return[call[name[template].render, parameter[name[context]]]] | keyword[def] identifier[render] ( identifier[self] , identifier[name] , identifier[context] ):
literal[string]
keyword[if] identifier[Generator] . identifier[strict] :
identifier[self] . identifier[env] . identifier[undefined] = identifier[TestableUndefined]
keyword[else] :
identifier[self] . identifier[env] . identifier[undefined] = identifier[Undefined]
identifier[template] = identifier[self] . identifier[get_template] ( identifier[name] )
keyword[return] identifier[template] . identifier[render] ( identifier[context] ) | def render(self, name, context):
"""Returns the rendered text from a single template file from the
template loader using the given context data"""
if Generator.strict:
self.env.undefined = TestableUndefined # depends on [control=['if'], data=[]]
else:
self.env.undefined = Undefined
template = self.get_template(name)
return template.render(context) |
def binaryorbit(orbit, comp1, comp2, envelope=None):
"""
Build the string representation of a hierarchy containing a binary
orbit with 2 components.
Generally, this will be used as an input to the kind argument in
:meth:`phoebe.frontend.bundle.Bundle.set_hierarchy`
:parameter comp1: an existing hierarchy string, Parameter, or ParameterSet
:parameter comp2: an existing hierarchy string, Parameter, or ParameterSet
:return: the string representation of the hierarchy
"""
if envelope:
return '{}({}, {}, {})'.format(_to_component(orbit, False), _to_component(comp1), _to_component(comp2), _to_component(envelope, False))
else:
return '{}({}, {})'.format(_to_component(orbit, False), _to_component(comp1), _to_component(comp2)) | def function[binaryorbit, parameter[orbit, comp1, comp2, envelope]]:
constant[
Build the string representation of a hierarchy containing a binary
orbit with 2 components.
Generally, this will be used as an input to the kind argument in
:meth:`phoebe.frontend.bundle.Bundle.set_hierarchy`
:parameter comp1: an existing hierarchy string, Parameter, or ParameterSet
:parameter comp2: an existing hierarchy string, Parameter, or ParameterSet
:return: the string representation of the hierarchy
]
if name[envelope] begin[:]
return[call[constant[{}({}, {}, {})].format, parameter[call[name[_to_component], parameter[name[orbit], constant[False]]], call[name[_to_component], parameter[name[comp1]]], call[name[_to_component], parameter[name[comp2]]], call[name[_to_component], parameter[name[envelope], constant[False]]]]]] | keyword[def] identifier[binaryorbit] ( identifier[orbit] , identifier[comp1] , identifier[comp2] , identifier[envelope] = keyword[None] ):
literal[string]
keyword[if] identifier[envelope] :
keyword[return] literal[string] . identifier[format] ( identifier[_to_component] ( identifier[orbit] , keyword[False] ), identifier[_to_component] ( identifier[comp1] ), identifier[_to_component] ( identifier[comp2] ), identifier[_to_component] ( identifier[envelope] , keyword[False] ))
keyword[else] :
keyword[return] literal[string] . identifier[format] ( identifier[_to_component] ( identifier[orbit] , keyword[False] ), identifier[_to_component] ( identifier[comp1] ), identifier[_to_component] ( identifier[comp2] )) | def binaryorbit(orbit, comp1, comp2, envelope=None):
"""
Build the string representation of a hierarchy containing a binary
orbit with 2 components.
Generally, this will be used as an input to the kind argument in
:meth:`phoebe.frontend.bundle.Bundle.set_hierarchy`
:parameter comp1: an existing hierarchy string, Parameter, or ParameterSet
:parameter comp2: an existing hierarchy string, Parameter, or ParameterSet
:return: the string representation of the hierarchy
"""
if envelope:
return '{}({}, {}, {})'.format(_to_component(orbit, False), _to_component(comp1), _to_component(comp2), _to_component(envelope, False)) # depends on [control=['if'], data=[]]
else:
return '{}({}, {})'.format(_to_component(orbit, False), _to_component(comp1), _to_component(comp2)) |
def build_slabs(self):
"""
Builds the reconstructed slab by:
(1) Obtaining the unreconstructed slab using the specified
parameters for the SlabGenerator.
(2) Applying the appropriate lattice transformation in the
a and b lattice vectors.
(3) Remove any specified sites from both surfaces.
(4) Add any specified sites to both surfaces.
Returns:
(Slab): The reconstructed slab.
"""
slabs = self.get_unreconstructed_slabs()
recon_slabs = []
for slab in slabs:
d = get_d(slab)
top_site = sorted(slab, key=lambda site: site.frac_coords[2])[-1].coords
# Remove any specified sites
if "points_to_remove" in self.reconstruction_json.keys():
pts_to_rm = copy.deepcopy(self.reconstruction_json["points_to_remove"])
for p in pts_to_rm:
p[2] = slab.lattice.get_fractional_coords([top_site[0], top_site[1],
top_site[2]+p[2]*d])[2]
cart_point = slab.lattice.get_cartesian_coords(p)
dist = [site.distance_from_point(cart_point) for site in slab]
site1 = dist.index(min(dist))
slab.symmetrically_remove_atoms([site1])
# Add any specified sites
if "points_to_add" in self.reconstruction_json.keys():
pts_to_add = copy.deepcopy(self.reconstruction_json["points_to_add"])
for p in pts_to_add:
p[2] = slab.lattice.get_fractional_coords([top_site[0], top_site[1],
top_site[2]+p[2]*d])[2]
slab.symmetrically_add_atom(slab[0].specie, p)
slab.reconstruction = self.name
setattr(slab, "recon_trans_matrix", self.trans_matrix)
# Get the oriented_unit_cell with the same axb area.
ouc = slab.oriented_unit_cell.copy()
ouc.make_supercell(self.trans_matrix)
slab.oriented_unit_cell = ouc
recon_slabs.append(slab)
return recon_slabs | def function[build_slabs, parameter[self]]:
constant[
Builds the reconstructed slab by:
(1) Obtaining the unreconstructed slab using the specified
parameters for the SlabGenerator.
(2) Applying the appropriate lattice transformation in the
a and b lattice vectors.
(3) Remove any specified sites from both surfaces.
(4) Add any specified sites to both surfaces.
Returns:
(Slab): The reconstructed slab.
]
variable[slabs] assign[=] call[name[self].get_unreconstructed_slabs, parameter[]]
variable[recon_slabs] assign[=] list[[]]
for taget[name[slab]] in starred[name[slabs]] begin[:]
variable[d] assign[=] call[name[get_d], parameter[name[slab]]]
variable[top_site] assign[=] call[call[name[sorted], parameter[name[slab]]]][<ast.UnaryOp object at 0x7da204564ac0>].coords
if compare[constant[points_to_remove] in call[name[self].reconstruction_json.keys, parameter[]]] begin[:]
variable[pts_to_rm] assign[=] call[name[copy].deepcopy, parameter[call[name[self].reconstruction_json][constant[points_to_remove]]]]
for taget[name[p]] in starred[name[pts_to_rm]] begin[:]
call[name[p]][constant[2]] assign[=] call[call[name[slab].lattice.get_fractional_coords, parameter[list[[<ast.Subscript object at 0x7da204565bd0>, <ast.Subscript object at 0x7da204567130>, <ast.BinOp object at 0x7da204567490>]]]]][constant[2]]
variable[cart_point] assign[=] call[name[slab].lattice.get_cartesian_coords, parameter[name[p]]]
variable[dist] assign[=] <ast.ListComp object at 0x7da204565930>
variable[site1] assign[=] call[name[dist].index, parameter[call[name[min], parameter[name[dist]]]]]
call[name[slab].symmetrically_remove_atoms, parameter[list[[<ast.Name object at 0x7da18bc72440>]]]]
if compare[constant[points_to_add] in call[name[self].reconstruction_json.keys, parameter[]]] begin[:]
variable[pts_to_add] assign[=] call[name[copy].deepcopy, parameter[call[name[self].reconstruction_json][constant[points_to_add]]]]
for taget[name[p]] in starred[name[pts_to_add]] begin[:]
call[name[p]][constant[2]] assign[=] call[call[name[slab].lattice.get_fractional_coords, parameter[list[[<ast.Subscript object at 0x7da18bc73b50>, <ast.Subscript object at 0x7da18bc70220>, <ast.BinOp object at 0x7da18bc73940>]]]]][constant[2]]
call[name[slab].symmetrically_add_atom, parameter[call[name[slab]][constant[0]].specie, name[p]]]
name[slab].reconstruction assign[=] name[self].name
call[name[setattr], parameter[name[slab], constant[recon_trans_matrix], name[self].trans_matrix]]
variable[ouc] assign[=] call[name[slab].oriented_unit_cell.copy, parameter[]]
call[name[ouc].make_supercell, parameter[name[self].trans_matrix]]
name[slab].oriented_unit_cell assign[=] name[ouc]
call[name[recon_slabs].append, parameter[name[slab]]]
return[name[recon_slabs]] | keyword[def] identifier[build_slabs] ( identifier[self] ):
literal[string]
identifier[slabs] = identifier[self] . identifier[get_unreconstructed_slabs] ()
identifier[recon_slabs] =[]
keyword[for] identifier[slab] keyword[in] identifier[slabs] :
identifier[d] = identifier[get_d] ( identifier[slab] )
identifier[top_site] = identifier[sorted] ( identifier[slab] , identifier[key] = keyword[lambda] identifier[site] : identifier[site] . identifier[frac_coords] [ literal[int] ])[- literal[int] ]. identifier[coords]
keyword[if] literal[string] keyword[in] identifier[self] . identifier[reconstruction_json] . identifier[keys] ():
identifier[pts_to_rm] = identifier[copy] . identifier[deepcopy] ( identifier[self] . identifier[reconstruction_json] [ literal[string] ])
keyword[for] identifier[p] keyword[in] identifier[pts_to_rm] :
identifier[p] [ literal[int] ]= identifier[slab] . identifier[lattice] . identifier[get_fractional_coords] ([ identifier[top_site] [ literal[int] ], identifier[top_site] [ literal[int] ],
identifier[top_site] [ literal[int] ]+ identifier[p] [ literal[int] ]* identifier[d] ])[ literal[int] ]
identifier[cart_point] = identifier[slab] . identifier[lattice] . identifier[get_cartesian_coords] ( identifier[p] )
identifier[dist] =[ identifier[site] . identifier[distance_from_point] ( identifier[cart_point] ) keyword[for] identifier[site] keyword[in] identifier[slab] ]
identifier[site1] = identifier[dist] . identifier[index] ( identifier[min] ( identifier[dist] ))
identifier[slab] . identifier[symmetrically_remove_atoms] ([ identifier[site1] ])
keyword[if] literal[string] keyword[in] identifier[self] . identifier[reconstruction_json] . identifier[keys] ():
identifier[pts_to_add] = identifier[copy] . identifier[deepcopy] ( identifier[self] . identifier[reconstruction_json] [ literal[string] ])
keyword[for] identifier[p] keyword[in] identifier[pts_to_add] :
identifier[p] [ literal[int] ]= identifier[slab] . identifier[lattice] . identifier[get_fractional_coords] ([ identifier[top_site] [ literal[int] ], identifier[top_site] [ literal[int] ],
identifier[top_site] [ literal[int] ]+ identifier[p] [ literal[int] ]* identifier[d] ])[ literal[int] ]
identifier[slab] . identifier[symmetrically_add_atom] ( identifier[slab] [ literal[int] ]. identifier[specie] , identifier[p] )
identifier[slab] . identifier[reconstruction] = identifier[self] . identifier[name]
identifier[setattr] ( identifier[slab] , literal[string] , identifier[self] . identifier[trans_matrix] )
identifier[ouc] = identifier[slab] . identifier[oriented_unit_cell] . identifier[copy] ()
identifier[ouc] . identifier[make_supercell] ( identifier[self] . identifier[trans_matrix] )
identifier[slab] . identifier[oriented_unit_cell] = identifier[ouc]
identifier[recon_slabs] . identifier[append] ( identifier[slab] )
keyword[return] identifier[recon_slabs] | def build_slabs(self):
"""
Builds the reconstructed slab by:
(1) Obtaining the unreconstructed slab using the specified
parameters for the SlabGenerator.
(2) Applying the appropriate lattice transformation in the
a and b lattice vectors.
(3) Remove any specified sites from both surfaces.
(4) Add any specified sites to both surfaces.
Returns:
(Slab): The reconstructed slab.
"""
slabs = self.get_unreconstructed_slabs()
recon_slabs = []
for slab in slabs:
d = get_d(slab)
top_site = sorted(slab, key=lambda site: site.frac_coords[2])[-1].coords
# Remove any specified sites
if 'points_to_remove' in self.reconstruction_json.keys():
pts_to_rm = copy.deepcopy(self.reconstruction_json['points_to_remove'])
for p in pts_to_rm:
p[2] = slab.lattice.get_fractional_coords([top_site[0], top_site[1], top_site[2] + p[2] * d])[2]
cart_point = slab.lattice.get_cartesian_coords(p)
dist = [site.distance_from_point(cart_point) for site in slab]
site1 = dist.index(min(dist))
slab.symmetrically_remove_atoms([site1]) # depends on [control=['for'], data=['p']] # depends on [control=['if'], data=[]]
# Add any specified sites
if 'points_to_add' in self.reconstruction_json.keys():
pts_to_add = copy.deepcopy(self.reconstruction_json['points_to_add'])
for p in pts_to_add:
p[2] = slab.lattice.get_fractional_coords([top_site[0], top_site[1], top_site[2] + p[2] * d])[2]
slab.symmetrically_add_atom(slab[0].specie, p) # depends on [control=['for'], data=['p']] # depends on [control=['if'], data=[]]
slab.reconstruction = self.name
setattr(slab, 'recon_trans_matrix', self.trans_matrix)
# Get the oriented_unit_cell with the same axb area.
ouc = slab.oriented_unit_cell.copy()
ouc.make_supercell(self.trans_matrix)
slab.oriented_unit_cell = ouc
recon_slabs.append(slab) # depends on [control=['for'], data=['slab']]
return recon_slabs |
def comittoapi(api):
"""
Commit to the use of specified Qt api.
Raise an error if another Qt api is already loaded in sys.modules
"""
global USED_API
assert USED_API is None, "committoapi called again!"
check = ["PyQt4", "PyQt5", "PySide", "PySide2"]
assert api in [QT_API_PYQT5, QT_API_PYQT4, QT_API_PYSIDE, QT_API_PYSIDE2]
for name in check:
if name.lower() != api and name in sys.modules:
raise RuntimeError(
"{} was already imported. Cannot commit to {}!"
.format(name, api)
)
else:
api = _intern(api)
USED_API = api
AnyQt.__SELECTED_API = api
AnyQt.USED_API = api | def function[comittoapi, parameter[api]]:
constant[
Commit to the use of specified Qt api.
Raise an error if another Qt api is already loaded in sys.modules
]
<ast.Global object at 0x7da1b26ad270>
assert[compare[name[USED_API] is constant[None]]]
variable[check] assign[=] list[[<ast.Constant object at 0x7da1b26ad1e0>, <ast.Constant object at 0x7da1b26af5b0>, <ast.Constant object at 0x7da1b26aded0>, <ast.Constant object at 0x7da1b26af010>]]
assert[compare[name[api] in list[[<ast.Name object at 0x7da1b26ae140>, <ast.Name object at 0x7da1b26ae9e0>, <ast.Name object at 0x7da1b26ae740>, <ast.Name object at 0x7da1b26ac2b0>]]]]
for taget[name[name]] in starred[name[check]] begin[:]
if <ast.BoolOp object at 0x7da1b26ac7c0> begin[:]
<ast.Raise object at 0x7da1b26ae410> | keyword[def] identifier[comittoapi] ( identifier[api] ):
literal[string]
keyword[global] identifier[USED_API]
keyword[assert] identifier[USED_API] keyword[is] keyword[None] , literal[string]
identifier[check] =[ literal[string] , literal[string] , literal[string] , literal[string] ]
keyword[assert] identifier[api] keyword[in] [ identifier[QT_API_PYQT5] , identifier[QT_API_PYQT4] , identifier[QT_API_PYSIDE] , identifier[QT_API_PYSIDE2] ]
keyword[for] identifier[name] keyword[in] identifier[check] :
keyword[if] identifier[name] . identifier[lower] ()!= identifier[api] keyword[and] identifier[name] keyword[in] identifier[sys] . identifier[modules] :
keyword[raise] identifier[RuntimeError] (
literal[string]
. identifier[format] ( identifier[name] , identifier[api] )
)
keyword[else] :
identifier[api] = identifier[_intern] ( identifier[api] )
identifier[USED_API] = identifier[api]
identifier[AnyQt] . identifier[__SELECTED_API] = identifier[api]
identifier[AnyQt] . identifier[USED_API] = identifier[api] | def comittoapi(api):
"""
Commit to the use of specified Qt api.
Raise an error if another Qt api is already loaded in sys.modules
"""
global USED_API
assert USED_API is None, 'committoapi called again!'
check = ['PyQt4', 'PyQt5', 'PySide', 'PySide2']
assert api in [QT_API_PYQT5, QT_API_PYQT4, QT_API_PYSIDE, QT_API_PYSIDE2]
for name in check:
if name.lower() != api and name in sys.modules:
raise RuntimeError('{} was already imported. Cannot commit to {}!'.format(name, api)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['name']]
else:
api = _intern(api)
USED_API = api
AnyQt.__SELECTED_API = api
AnyQt.USED_API = api |
def suggest():
""" Suggest a new random brain key. Randomness is provided by the
operating system using ``os.urandom()``.
"""
word_count = 16
brainkey = [None] * word_count
dict_lines = BrainKeyDictionary.split(",")
assert len(dict_lines) == 49744
for j in range(0, word_count):
num = int.from_bytes(os.urandom(2), byteorder="little")
rndMult = num / 2 ** 16 # returns float between 0..1 (inclusive)
wIdx = round(len(dict_lines) * rndMult)
brainkey[j] = dict_lines[wIdx]
return " ".join(brainkey).upper() | def function[suggest, parameter[]]:
constant[ Suggest a new random brain key. Randomness is provided by the
operating system using ``os.urandom()``.
]
variable[word_count] assign[=] constant[16]
variable[brainkey] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b01e7580>]] * name[word_count]]
variable[dict_lines] assign[=] call[name[BrainKeyDictionary].split, parameter[constant[,]]]
assert[compare[call[name[len], parameter[name[dict_lines]]] equal[==] constant[49744]]]
for taget[name[j]] in starred[call[name[range], parameter[constant[0], name[word_count]]]] begin[:]
variable[num] assign[=] call[name[int].from_bytes, parameter[call[name[os].urandom, parameter[constant[2]]]]]
variable[rndMult] assign[=] binary_operation[name[num] / binary_operation[constant[2] ** constant[16]]]
variable[wIdx] assign[=] call[name[round], parameter[binary_operation[call[name[len], parameter[name[dict_lines]]] * name[rndMult]]]]
call[name[brainkey]][name[j]] assign[=] call[name[dict_lines]][name[wIdx]]
return[call[call[constant[ ].join, parameter[name[brainkey]]].upper, parameter[]]] | keyword[def] identifier[suggest] ():
literal[string]
identifier[word_count] = literal[int]
identifier[brainkey] =[ keyword[None] ]* identifier[word_count]
identifier[dict_lines] = identifier[BrainKeyDictionary] . identifier[split] ( literal[string] )
keyword[assert] identifier[len] ( identifier[dict_lines] )== literal[int]
keyword[for] identifier[j] keyword[in] identifier[range] ( literal[int] , identifier[word_count] ):
identifier[num] = identifier[int] . identifier[from_bytes] ( identifier[os] . identifier[urandom] ( literal[int] ), identifier[byteorder] = literal[string] )
identifier[rndMult] = identifier[num] / literal[int] ** literal[int]
identifier[wIdx] = identifier[round] ( identifier[len] ( identifier[dict_lines] )* identifier[rndMult] )
identifier[brainkey] [ identifier[j] ]= identifier[dict_lines] [ identifier[wIdx] ]
keyword[return] literal[string] . identifier[join] ( identifier[brainkey] ). identifier[upper] () | def suggest():
""" Suggest a new random brain key. Randomness is provided by the
operating system using ``os.urandom()``.
"""
word_count = 16
brainkey = [None] * word_count
dict_lines = BrainKeyDictionary.split(',')
assert len(dict_lines) == 49744
for j in range(0, word_count):
num = int.from_bytes(os.urandom(2), byteorder='little')
rndMult = num / 2 ** 16 # returns float between 0..1 (inclusive)
wIdx = round(len(dict_lines) * rndMult)
brainkey[j] = dict_lines[wIdx] # depends on [control=['for'], data=['j']]
return ' '.join(brainkey).upper() |
def ignore(code):
"""Should this code be ignored.
:param str code: Error code (e.g. D201).
:return: True if code should be ignored, False otherwise.
:rtype: bool
"""
if code in Main.options['ignore']:
return True
if any(c in code for c in Main.options['ignore']):
return True
return False | def function[ignore, parameter[code]]:
constant[Should this code be ignored.
:param str code: Error code (e.g. D201).
:return: True if code should be ignored, False otherwise.
:rtype: bool
]
if compare[name[code] in call[name[Main].options][constant[ignore]]] begin[:]
return[constant[True]]
if call[name[any], parameter[<ast.GeneratorExp object at 0x7da1b195e890>]] begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[ignore] ( identifier[code] ):
literal[string]
keyword[if] identifier[code] keyword[in] identifier[Main] . identifier[options] [ literal[string] ]:
keyword[return] keyword[True]
keyword[if] identifier[any] ( identifier[c] keyword[in] identifier[code] keyword[for] identifier[c] keyword[in] identifier[Main] . identifier[options] [ literal[string] ]):
keyword[return] keyword[True]
keyword[return] keyword[False] | def ignore(code):
"""Should this code be ignored.
:param str code: Error code (e.g. D201).
:return: True if code should be ignored, False otherwise.
:rtype: bool
"""
if code in Main.options['ignore']:
return True # depends on [control=['if'], data=[]]
if any((c in code for c in Main.options['ignore'])):
return True # depends on [control=['if'], data=[]]
return False |
def read_locked(*args, **kwargs):
"""Acquires & releases a read lock around call into decorated method.
NOTE(harlowja): if no attribute name is provided then by default the
attribute named '_lock' is looked for (this attribute is expected to be
a :py:class:`.ReaderWriterLock`) in the instance object this decorator
is attached to.
"""
def decorator(f):
attr_name = kwargs.get('lock', '_lock')
@six.wraps(f)
def wrapper(self, *args, **kwargs):
rw_lock = getattr(self, attr_name)
with rw_lock.read_lock():
return f(self, *args, **kwargs)
return wrapper
# This is needed to handle when the decorator has args or the decorator
# doesn't have args, python is rather weird here...
if kwargs or not args:
return decorator
else:
if len(args) == 1:
return decorator(args[0])
else:
return decorator | def function[read_locked, parameter[]]:
constant[Acquires & releases a read lock around call into decorated method.
NOTE(harlowja): if no attribute name is provided then by default the
attribute named '_lock' is looked for (this attribute is expected to be
a :py:class:`.ReaderWriterLock`) in the instance object this decorator
is attached to.
]
def function[decorator, parameter[f]]:
variable[attr_name] assign[=] call[name[kwargs].get, parameter[constant[lock], constant[_lock]]]
def function[wrapper, parameter[self]]:
variable[rw_lock] assign[=] call[name[getattr], parameter[name[self], name[attr_name]]]
with call[name[rw_lock].read_lock, parameter[]] begin[:]
return[call[name[f], parameter[name[self], <ast.Starred object at 0x7da1b11bcdc0>]]]
return[name[wrapper]]
if <ast.BoolOp object at 0x7da1b11bce50> begin[:]
return[name[decorator]] | keyword[def] identifier[read_locked] (* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[def] identifier[decorator] ( identifier[f] ):
identifier[attr_name] = identifier[kwargs] . identifier[get] ( literal[string] , literal[string] )
@ identifier[six] . identifier[wraps] ( identifier[f] )
keyword[def] identifier[wrapper] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
identifier[rw_lock] = identifier[getattr] ( identifier[self] , identifier[attr_name] )
keyword[with] identifier[rw_lock] . identifier[read_lock] ():
keyword[return] identifier[f] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[wrapper]
keyword[if] identifier[kwargs] keyword[or] keyword[not] identifier[args] :
keyword[return] identifier[decorator]
keyword[else] :
keyword[if] identifier[len] ( identifier[args] )== literal[int] :
keyword[return] identifier[decorator] ( identifier[args] [ literal[int] ])
keyword[else] :
keyword[return] identifier[decorator] | def read_locked(*args, **kwargs):
"""Acquires & releases a read lock around call into decorated method.
NOTE(harlowja): if no attribute name is provided then by default the
attribute named '_lock' is looked for (this attribute is expected to be
a :py:class:`.ReaderWriterLock`) in the instance object this decorator
is attached to.
"""
def decorator(f):
attr_name = kwargs.get('lock', '_lock')
@six.wraps(f)
def wrapper(self, *args, **kwargs):
rw_lock = getattr(self, attr_name)
with rw_lock.read_lock():
return f(self, *args, **kwargs) # depends on [control=['with'], data=[]]
return wrapper
# This is needed to handle when the decorator has args or the decorator
# doesn't have args, python is rather weird here...
if kwargs or not args:
return decorator # depends on [control=['if'], data=[]]
elif len(args) == 1:
return decorator(args[0]) # depends on [control=['if'], data=[]]
else:
return decorator |
def finalize(state, block):
"""Apply rewards and commit."""
if state.is_METROPOLIS():
br = state.config['BYZANTIUM_BLOCK_REWARD']
nr = state.config['BYZANTIUM_NEPHEW_REWARD']
else:
br = state.config['BLOCK_REWARD']
nr = state.config['NEPHEW_REWARD']
delta = int(br + nr * len(block.uncles))
state.delta_balance(state.block_coinbase, delta)
udpf = state.config['UNCLE_DEPTH_PENALTY_FACTOR']
for uncle in block.uncles:
r = int(br * (udpf + uncle.number - state.block_number) // udpf)
state.delta_balance(uncle.coinbase, r)
if state.block_number - \
state.config['MAX_UNCLE_DEPTH'] in state.recent_uncles:
del state.recent_uncles[state.block_number -
state.config['MAX_UNCLE_DEPTH']] | def function[finalize, parameter[state, block]]:
constant[Apply rewards and commit.]
if call[name[state].is_METROPOLIS, parameter[]] begin[:]
variable[br] assign[=] call[name[state].config][constant[BYZANTIUM_BLOCK_REWARD]]
variable[nr] assign[=] call[name[state].config][constant[BYZANTIUM_NEPHEW_REWARD]]
variable[delta] assign[=] call[name[int], parameter[binary_operation[name[br] + binary_operation[name[nr] * call[name[len], parameter[name[block].uncles]]]]]]
call[name[state].delta_balance, parameter[name[state].block_coinbase, name[delta]]]
variable[udpf] assign[=] call[name[state].config][constant[UNCLE_DEPTH_PENALTY_FACTOR]]
for taget[name[uncle]] in starred[name[block].uncles] begin[:]
variable[r] assign[=] call[name[int], parameter[binary_operation[binary_operation[name[br] * binary_operation[binary_operation[name[udpf] + name[uncle].number] - name[state].block_number]] <ast.FloorDiv object at 0x7da2590d6bc0> name[udpf]]]]
call[name[state].delta_balance, parameter[name[uncle].coinbase, name[r]]]
if compare[binary_operation[name[state].block_number - call[name[state].config][constant[MAX_UNCLE_DEPTH]]] in name[state].recent_uncles] begin[:]
<ast.Delete object at 0x7da1b180d150> | keyword[def] identifier[finalize] ( identifier[state] , identifier[block] ):
literal[string]
keyword[if] identifier[state] . identifier[is_METROPOLIS] ():
identifier[br] = identifier[state] . identifier[config] [ literal[string] ]
identifier[nr] = identifier[state] . identifier[config] [ literal[string] ]
keyword[else] :
identifier[br] = identifier[state] . identifier[config] [ literal[string] ]
identifier[nr] = identifier[state] . identifier[config] [ literal[string] ]
identifier[delta] = identifier[int] ( identifier[br] + identifier[nr] * identifier[len] ( identifier[block] . identifier[uncles] ))
identifier[state] . identifier[delta_balance] ( identifier[state] . identifier[block_coinbase] , identifier[delta] )
identifier[udpf] = identifier[state] . identifier[config] [ literal[string] ]
keyword[for] identifier[uncle] keyword[in] identifier[block] . identifier[uncles] :
identifier[r] = identifier[int] ( identifier[br] *( identifier[udpf] + identifier[uncle] . identifier[number] - identifier[state] . identifier[block_number] )// identifier[udpf] )
identifier[state] . identifier[delta_balance] ( identifier[uncle] . identifier[coinbase] , identifier[r] )
keyword[if] identifier[state] . identifier[block_number] - identifier[state] . identifier[config] [ literal[string] ] keyword[in] identifier[state] . identifier[recent_uncles] :
keyword[del] identifier[state] . identifier[recent_uncles] [ identifier[state] . identifier[block_number] -
identifier[state] . identifier[config] [ literal[string] ]] | def finalize(state, block):
"""Apply rewards and commit."""
if state.is_METROPOLIS():
br = state.config['BYZANTIUM_BLOCK_REWARD']
nr = state.config['BYZANTIUM_NEPHEW_REWARD'] # depends on [control=['if'], data=[]]
else:
br = state.config['BLOCK_REWARD']
nr = state.config['NEPHEW_REWARD']
delta = int(br + nr * len(block.uncles))
state.delta_balance(state.block_coinbase, delta)
udpf = state.config['UNCLE_DEPTH_PENALTY_FACTOR']
for uncle in block.uncles:
r = int(br * (udpf + uncle.number - state.block_number) // udpf)
state.delta_balance(uncle.coinbase, r) # depends on [control=['for'], data=['uncle']]
if state.block_number - state.config['MAX_UNCLE_DEPTH'] in state.recent_uncles:
del state.recent_uncles[state.block_number - state.config['MAX_UNCLE_DEPTH']] # depends on [control=['if'], data=[]] |
def rpyhttp(value):
""" converts a no namespace pyuri back to a standard uri """
if value.startswith("http"):
return value
try:
parts = value.split("_")
del parts[0]
_uri = base64.b64decode(parts.pop(0)).decode()
return _uri + "_".join(parts)
except (IndexError, UnicodeDecodeError, binascii.Error):
# if the value is not a pyuri return the value
return value | def function[rpyhttp, parameter[value]]:
constant[ converts a no namespace pyuri back to a standard uri ]
if call[name[value].startswith, parameter[constant[http]]] begin[:]
return[name[value]]
<ast.Try object at 0x7da20e957ac0> | keyword[def] identifier[rpyhttp] ( identifier[value] ):
literal[string]
keyword[if] identifier[value] . identifier[startswith] ( literal[string] ):
keyword[return] identifier[value]
keyword[try] :
identifier[parts] = identifier[value] . identifier[split] ( literal[string] )
keyword[del] identifier[parts] [ literal[int] ]
identifier[_uri] = identifier[base64] . identifier[b64decode] ( identifier[parts] . identifier[pop] ( literal[int] )). identifier[decode] ()
keyword[return] identifier[_uri] + literal[string] . identifier[join] ( identifier[parts] )
keyword[except] ( identifier[IndexError] , identifier[UnicodeDecodeError] , identifier[binascii] . identifier[Error] ):
keyword[return] identifier[value] | def rpyhttp(value):
""" converts a no namespace pyuri back to a standard uri """
if value.startswith('http'):
return value # depends on [control=['if'], data=[]]
try:
parts = value.split('_')
del parts[0]
_uri = base64.b64decode(parts.pop(0)).decode()
return _uri + '_'.join(parts) # depends on [control=['try'], data=[]]
except (IndexError, UnicodeDecodeError, binascii.Error):
# if the value is not a pyuri return the value
return value # depends on [control=['except'], data=[]] |
def get_local_playlists(filepaths, exclude_patterns=None, max_depth=float('inf')):
"""Load playlists from local filepaths.
Parameters:
filepaths (list or str): Filepath(s) to search for music files.
exclude_patterns (list or str): Pattern(s) to exclude.
Patterns are Python regex patterns.
Filepaths are excluded if they match any of the exclude patterns.
max_depth (int): The depth in the directory tree to walk.
A depth of '0' limits the walk to the top directory.
Default: No limit.
Returns:
A list of local playlist filepaths matching criteria
and a list of local playlist filepaths excluded using exclusion criteria.
"""
logger.info("Loading local playlists...")
included_playlists = []
excluded_playlists = []
supported_filepaths = get_supported_filepaths(filepaths, SUPPORTED_PLAYLIST_FORMATS, max_depth=max_depth)
included_playlists, excluded_playlists = exclude_filepaths(supported_filepaths, exclude_patterns=exclude_patterns)
logger.info("Excluded {0} local playlists".format(len(excluded_playlists)))
logger.info("Loaded {0} local playlists".format(len(included_playlists)))
return included_playlists, excluded_playlists | def function[get_local_playlists, parameter[filepaths, exclude_patterns, max_depth]]:
constant[Load playlists from local filepaths.
Parameters:
filepaths (list or str): Filepath(s) to search for music files.
exclude_patterns (list or str): Pattern(s) to exclude.
Patterns are Python regex patterns.
Filepaths are excluded if they match any of the exclude patterns.
max_depth (int): The depth in the directory tree to walk.
A depth of '0' limits the walk to the top directory.
Default: No limit.
Returns:
A list of local playlist filepaths matching criteria
and a list of local playlist filepaths excluded using exclusion criteria.
]
call[name[logger].info, parameter[constant[Loading local playlists...]]]
variable[included_playlists] assign[=] list[[]]
variable[excluded_playlists] assign[=] list[[]]
variable[supported_filepaths] assign[=] call[name[get_supported_filepaths], parameter[name[filepaths], name[SUPPORTED_PLAYLIST_FORMATS]]]
<ast.Tuple object at 0x7da1b26ae650> assign[=] call[name[exclude_filepaths], parameter[name[supported_filepaths]]]
call[name[logger].info, parameter[call[constant[Excluded {0} local playlists].format, parameter[call[name[len], parameter[name[excluded_playlists]]]]]]]
call[name[logger].info, parameter[call[constant[Loaded {0} local playlists].format, parameter[call[name[len], parameter[name[included_playlists]]]]]]]
return[tuple[[<ast.Name object at 0x7da1b00f5210>, <ast.Name object at 0x7da1b00f4be0>]]] | keyword[def] identifier[get_local_playlists] ( identifier[filepaths] , identifier[exclude_patterns] = keyword[None] , identifier[max_depth] = identifier[float] ( literal[string] )):
literal[string]
identifier[logger] . identifier[info] ( literal[string] )
identifier[included_playlists] =[]
identifier[excluded_playlists] =[]
identifier[supported_filepaths] = identifier[get_supported_filepaths] ( identifier[filepaths] , identifier[SUPPORTED_PLAYLIST_FORMATS] , identifier[max_depth] = identifier[max_depth] )
identifier[included_playlists] , identifier[excluded_playlists] = identifier[exclude_filepaths] ( identifier[supported_filepaths] , identifier[exclude_patterns] = identifier[exclude_patterns] )
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[len] ( identifier[excluded_playlists] )))
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[len] ( identifier[included_playlists] )))
keyword[return] identifier[included_playlists] , identifier[excluded_playlists] | def get_local_playlists(filepaths, exclude_patterns=None, max_depth=float('inf')):
"""Load playlists from local filepaths.
Parameters:
filepaths (list or str): Filepath(s) to search for music files.
exclude_patterns (list or str): Pattern(s) to exclude.
Patterns are Python regex patterns.
Filepaths are excluded if they match any of the exclude patterns.
max_depth (int): The depth in the directory tree to walk.
A depth of '0' limits the walk to the top directory.
Default: No limit.
Returns:
A list of local playlist filepaths matching criteria
and a list of local playlist filepaths excluded using exclusion criteria.
"""
logger.info('Loading local playlists...')
included_playlists = []
excluded_playlists = []
supported_filepaths = get_supported_filepaths(filepaths, SUPPORTED_PLAYLIST_FORMATS, max_depth=max_depth)
(included_playlists, excluded_playlists) = exclude_filepaths(supported_filepaths, exclude_patterns=exclude_patterns)
logger.info('Excluded {0} local playlists'.format(len(excluded_playlists)))
logger.info('Loaded {0} local playlists'.format(len(included_playlists)))
return (included_playlists, excluded_playlists) |
async def emit(self, event, data, namespace=None, room=None, skip_sid=None,
callback=None, **kwargs):
"""Emit a message to a single client, a room, or all the clients
connected to the namespace.
This method takes care or propagating the message to all the servers
that are connected through the message queue.
The parameters are the same as in :meth:`.Server.emit`.
Note: this method is a coroutine.
"""
if kwargs.get('ignore_queue'):
return await super().emit(
event, data, namespace=namespace, room=room, skip_sid=skip_sid,
callback=callback)
namespace = namespace or '/'
if callback is not None:
if self.server is None:
raise RuntimeError('Callbacks can only be issued from the '
'context of a server.')
if room is None:
raise ValueError('Cannot use callback without a room set.')
id = self._generate_ack_id(room, namespace, callback)
callback = (room, namespace, id)
else:
callback = None
await self._publish({'method': 'emit', 'event': event, 'data': data,
'namespace': namespace, 'room': room,
'skip_sid': skip_sid, 'callback': callback,
'host_id': self.host_id}) | <ast.AsyncFunctionDef object at 0x7da18dc996c0> | keyword[async] keyword[def] identifier[emit] ( identifier[self] , identifier[event] , identifier[data] , identifier[namespace] = keyword[None] , identifier[room] = keyword[None] , identifier[skip_sid] = keyword[None] ,
identifier[callback] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] keyword[await] identifier[super] (). identifier[emit] (
identifier[event] , identifier[data] , identifier[namespace] = identifier[namespace] , identifier[room] = identifier[room] , identifier[skip_sid] = identifier[skip_sid] ,
identifier[callback] = identifier[callback] )
identifier[namespace] = identifier[namespace] keyword[or] literal[string]
keyword[if] identifier[callback] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[self] . identifier[server] keyword[is] keyword[None] :
keyword[raise] identifier[RuntimeError] ( literal[string]
literal[string] )
keyword[if] identifier[room] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[id] = identifier[self] . identifier[_generate_ack_id] ( identifier[room] , identifier[namespace] , identifier[callback] )
identifier[callback] =( identifier[room] , identifier[namespace] , identifier[id] )
keyword[else] :
identifier[callback] = keyword[None]
keyword[await] identifier[self] . identifier[_publish] ({ literal[string] : literal[string] , literal[string] : identifier[event] , literal[string] : identifier[data] ,
literal[string] : identifier[namespace] , literal[string] : identifier[room] ,
literal[string] : identifier[skip_sid] , literal[string] : identifier[callback] ,
literal[string] : identifier[self] . identifier[host_id] }) | async def emit(self, event, data, namespace=None, room=None, skip_sid=None, callback=None, **kwargs):
"""Emit a message to a single client, a room, or all the clients
connected to the namespace.
This method takes care or propagating the message to all the servers
that are connected through the message queue.
The parameters are the same as in :meth:`.Server.emit`.
Note: this method is a coroutine.
"""
if kwargs.get('ignore_queue'):
return await super().emit(event, data, namespace=namespace, room=room, skip_sid=skip_sid, callback=callback) # depends on [control=['if'], data=[]]
namespace = namespace or '/'
if callback is not None:
if self.server is None:
raise RuntimeError('Callbacks can only be issued from the context of a server.') # depends on [control=['if'], data=[]]
if room is None:
raise ValueError('Cannot use callback without a room set.') # depends on [control=['if'], data=[]]
id = self._generate_ack_id(room, namespace, callback)
callback = (room, namespace, id) # depends on [control=['if'], data=['callback']]
else:
callback = None
await self._publish({'method': 'emit', 'event': event, 'data': data, 'namespace': namespace, 'room': room, 'skip_sid': skip_sid, 'callback': callback, 'host_id': self.host_id}) |
def get_publication(request):
"""Lookup publication state"""
publication_id = request.matchdict['id']
state, messages = check_publication_state(publication_id)
response_data = {
'publication': publication_id,
'state': state,
'messages': messages,
}
return response_data | def function[get_publication, parameter[request]]:
constant[Lookup publication state]
variable[publication_id] assign[=] call[name[request].matchdict][constant[id]]
<ast.Tuple object at 0x7da20c6e6ef0> assign[=] call[name[check_publication_state], parameter[name[publication_id]]]
variable[response_data] assign[=] dictionary[[<ast.Constant object at 0x7da20c6e5210>, <ast.Constant object at 0x7da20c6e4430>, <ast.Constant object at 0x7da20c6e6dd0>], [<ast.Name object at 0x7da20c6e4d60>, <ast.Name object at 0x7da20c6e4d30>, <ast.Name object at 0x7da20c6e5e40>]]
return[name[response_data]] | keyword[def] identifier[get_publication] ( identifier[request] ):
literal[string]
identifier[publication_id] = identifier[request] . identifier[matchdict] [ literal[string] ]
identifier[state] , identifier[messages] = identifier[check_publication_state] ( identifier[publication_id] )
identifier[response_data] ={
literal[string] : identifier[publication_id] ,
literal[string] : identifier[state] ,
literal[string] : identifier[messages] ,
}
keyword[return] identifier[response_data] | def get_publication(request):
"""Lookup publication state"""
publication_id = request.matchdict['id']
(state, messages) = check_publication_state(publication_id)
response_data = {'publication': publication_id, 'state': state, 'messages': messages}
return response_data |
def size_control_valve_g(T, MW, mu, gamma, Z, P1, P2, Q, D1=None, D2=None,
d=None, FL=0.9, Fd=1, xT=0.7, allow_choked=True,
allow_laminar=True, full_output=False):
r'''Calculates flow coefficient of a control valve passing a gas
according to IEC 60534. Uses a large number of inputs in SI units. Note the
return value is not standard SI. All parameters are required. For details
of the calculations, consult [1]_. Note the inlet gas flow conditions.
Parameters
----------
T : float
Temperature of the gas at the inlet [K]
MW : float
Molecular weight of the gas [g/mol]
mu : float
Viscosity of the fluid at inlet conditions [Pa*s]
gamma : float
Specific heat capacity ratio [-]
Z : float
Compressibility factor at inlet conditions, [-]
P1 : float
Inlet pressure of the gas before valves and reducers [Pa]
P2 : float
Outlet pressure of the gas after valves and reducers [Pa]
Q : float
Volumetric flow rate of the gas at *273.15 K* and 1 atm specifically
[m^3/s]
D1 : float, optional
Diameter of the pipe before the valve [m]
D2 : float, optional
Diameter of the pipe after the valve [m]
d : float, optional
Diameter of the valve [m]
FL : float, optional
Liquid pressure recovery factor of a control valve without attached
fittings (normally 0.8-0.9 at full open and decreasing as opened
further to below 0.5; use default very cautiously!) []
Fd : float, optional
Valve style modifier (0.1 to 1; varies tremendously depending on the
type of valve and position; do not use the default at all!) []
xT : float, optional
Pressure difference ratio factor of a valve without fittings at choked
flow (increasing to 0.9 or higher as the valve is closed further and
decreasing to 0.1 or lower as the valve is opened further; use default
very cautiously!) [-]
allow_choked : bool, optional
Overrides the automatic transition into the choked regime if this is
False and returns as if choked flow does not exist
allow_laminar : bool, optional
Overrides the automatic transition into the laminar regime if this is
False and returns as if laminar flow does not exist
full_output : bool, optional
If True, returns intermediate calculation values as
well as Kv in the form of a dictionary containing 'Kv', 'Rev', 'choked',
'Y', 'FR', 'FP', 'xTP', and 'laminar'. Some may be None if they are
not used in the calculation.
Returns
-------
Kv : float
Metric Kv valve flow coefficient (flow rate of water at a pressure drop
of 1 bar) [m^3/hr]
Notes
-----
It is possible to use this model without any diameters specified; in that
case, turbulent flow is assumed. Choked flow can still be modeled. This is
not recommended. All three diameters need to be None for this to work.
`FL` and `Fd` are not used by the models when the diameters are not
specified, but `xT` definitely is used by the model.
Examples
--------
From [1]_, matching example 3 for non-choked gas flow with attached
fittings and a rotary, eccentric plug, flow-to-open control valve:
>>> size_control_valve_g(T=433., MW=44.01, mu=1.4665E-4, gamma=1.30,
... Z=0.988, P1=680E3, P2=310E3, Q=38/36., D1=0.08, D2=0.1, d=0.05,
... FL=0.85, Fd=0.42, xT=0.60)
72.58664545391052
From [1]_, roughly matching example 4 for a small flow trim sized tapered
needle plug valve. Difference is 3% and explained by the difference in
algorithms used.
>>> size_control_valve_g(T=320., MW=39.95, mu=5.625E-5, gamma=1.67, Z=1.0,
... P1=2.8E5, P2=1.3E5, Q=0.46/3600., D1=0.015, D2=0.015, d=0.015, FL=0.98,
... Fd=0.07, xT=0.8)
0.016498765335995726
References
----------
.. [1] IEC 60534-2-1 / ISA-75.01.01-2007
'''
MAX_C_POSSIBLE = 1E40 # Quit iterations if C reaches this high
# Pa to kPa, according to constants in standard
P1, P2 = P1/1000., P2/1000.
Q = Q*3600. # m^3/s to m^3/hr, according to constants in standard
# Convert dynamic viscosity to kinematic viscosity
Vm = Z*R*T/(P1*1000)
rho = (Vm)**-1*MW/1000.
nu = mu/rho # kinematic viscosity used in standard
dP = P1 - P2
Fgamma = gamma/1.40
x = dP/P1
Y = max(1 - x/(3*Fgamma*xT), 2/3.)
choked = is_choked_turbulent_g(x, Fgamma, xT)
if choked and allow_choked:
# Choked, and flow coefficient from eq 14a
C = Q/(N9*P1*Y)*(MW*T*Z/xT/Fgamma)**0.5
else:
# Non-choked, and flow coefficient from eq 8a
C = Q/(N9*P1*Y)*(MW*T*Z/x)**0.5
if full_output:
ans = {'FP': None, 'xTP': None, 'FR': None,
'choked': choked, 'Y': Y}
if D1 is None and D2 is None and d is None:
# Assume turbulent if no diameters are provided, no other calculations
Rev = 1e5
if full_output:
ans['Rev'] = None
else:
# m to mm, according to constants in standard
D1, D2, d = D1*1000., D2*1000., d*1000. # Convert diameters to mm which is used in the standard
Rev = Reynolds_valve(nu=nu, Q=Q, D1=D1, FL=FL, Fd=Fd, C=C)
if full_output:
ans['Rev'] = Rev
if (Rev > 10000 or not allow_laminar) and (D1 != d or D2 != d):
# gas, using xTP and FLP
FP = 1.
MAX_ITER = 20
def iterate_piping_coef(Ci, iterations):
loss = loss_coefficient_piping(d, D1, D2)
FP = (1. + loss/N2*(Ci/d**2)**2)**-0.5
loss_upstream = loss_coefficient_piping(d, D1)
xTP = xT/FP**2/(1 + xT*loss_upstream/N5*(Ci/d**2)**2)
choked = is_choked_turbulent_g(x, Fgamma, xTP=xTP)
if choked:
# Choked flow with piping, equation 17a
C = Q/(N9*FP*P1*Y)*(MW*T*Z/xTP/Fgamma)**0.5
else:
# Non-choked flow with piping, equation 11a
C = Q/(N9*FP*P1*Y)*(MW*T*Z/x)**0.5
if Ci/C < 0.99 and iterations < MAX_ITER and Ci < MAX_C_POSSIBLE:
C = iterate_piping_coef(C, iterations+1)
if full_output:
ans['xTP'] = xTP
ans['FP'] = FP
ans['choked'] = choked
if MAX_ITER == iterations or Ci >= MAX_C_POSSIBLE:
ans['warning'] = 'Not converged in inner loop'
return C
C = iterate_piping_coef(C, 0)
elif Rev <= 10000 and allow_laminar:
# Laminar;
def iterate_piping_laminar(C):
Ci = 1.3*C
Rev = Reynolds_valve(nu=nu, Q=Q, D1=D1, FL=FL, Fd=Fd, C=Ci)
if Ci/d**2 > 0.016*N18:
FR = Reynolds_factor(FL=FL, C=Ci, d=d, Rev=Rev, full_trim=False)
else:
FR = Reynolds_factor(FL=FL, C=Ci, d=d, Rev=Rev, full_trim=True)
if C/FR >= Ci:
Ci = iterate_piping_laminar(Ci)
if full_output:
ans['FR'] = FR
ans['Rev'] = Rev
return Ci
C = iterate_piping_laminar(C)
if full_output:
ans['Kv'] = C
ans['laminar'] = Rev <= 10000
return ans
else:
return C | def function[size_control_valve_g, parameter[T, MW, mu, gamma, Z, P1, P2, Q, D1, D2, d, FL, Fd, xT, allow_choked, allow_laminar, full_output]]:
constant[Calculates flow coefficient of a control valve passing a gas
according to IEC 60534. Uses a large number of inputs in SI units. Note the
return value is not standard SI. All parameters are required. For details
of the calculations, consult [1]_. Note the inlet gas flow conditions.
Parameters
----------
T : float
Temperature of the gas at the inlet [K]
MW : float
Molecular weight of the gas [g/mol]
mu : float
Viscosity of the fluid at inlet conditions [Pa*s]
gamma : float
Specific heat capacity ratio [-]
Z : float
Compressibility factor at inlet conditions, [-]
P1 : float
Inlet pressure of the gas before valves and reducers [Pa]
P2 : float
Outlet pressure of the gas after valves and reducers [Pa]
Q : float
Volumetric flow rate of the gas at *273.15 K* and 1 atm specifically
[m^3/s]
D1 : float, optional
Diameter of the pipe before the valve [m]
D2 : float, optional
Diameter of the pipe after the valve [m]
d : float, optional
Diameter of the valve [m]
FL : float, optional
Liquid pressure recovery factor of a control valve without attached
fittings (normally 0.8-0.9 at full open and decreasing as opened
further to below 0.5; use default very cautiously!) []
Fd : float, optional
Valve style modifier (0.1 to 1; varies tremendously depending on the
type of valve and position; do not use the default at all!) []
xT : float, optional
Pressure difference ratio factor of a valve without fittings at choked
flow (increasing to 0.9 or higher as the valve is closed further and
decreasing to 0.1 or lower as the valve is opened further; use default
very cautiously!) [-]
allow_choked : bool, optional
Overrides the automatic transition into the choked regime if this is
False and returns as if choked flow does not exist
allow_laminar : bool, optional
Overrides the automatic transition into the laminar regime if this is
False and returns as if laminar flow does not exist
full_output : bool, optional
If True, returns intermediate calculation values as
well as Kv in the form of a dictionary containing 'Kv', 'Rev', 'choked',
'Y', 'FR', 'FP', 'xTP', and 'laminar'. Some may be None if they are
not used in the calculation.
Returns
-------
Kv : float
Metric Kv valve flow coefficient (flow rate of water at a pressure drop
of 1 bar) [m^3/hr]
Notes
-----
It is possible to use this model without any diameters specified; in that
case, turbulent flow is assumed. Choked flow can still be modeled. This is
not recommended. All three diameters need to be None for this to work.
`FL` and `Fd` are not used by the models when the diameters are not
specified, but `xT` definitely is used by the model.
Examples
--------
From [1]_, matching example 3 for non-choked gas flow with attached
fittings and a rotary, eccentric plug, flow-to-open control valve:
>>> size_control_valve_g(T=433., MW=44.01, mu=1.4665E-4, gamma=1.30,
... Z=0.988, P1=680E3, P2=310E3, Q=38/36., D1=0.08, D2=0.1, d=0.05,
... FL=0.85, Fd=0.42, xT=0.60)
72.58664545391052
From [1]_, roughly matching example 4 for a small flow trim sized tapered
needle plug valve. Difference is 3% and explained by the difference in
algorithms used.
>>> size_control_valve_g(T=320., MW=39.95, mu=5.625E-5, gamma=1.67, Z=1.0,
... P1=2.8E5, P2=1.3E5, Q=0.46/3600., D1=0.015, D2=0.015, d=0.015, FL=0.98,
... Fd=0.07, xT=0.8)
0.016498765335995726
References
----------
.. [1] IEC 60534-2-1 / ISA-75.01.01-2007
]
variable[MAX_C_POSSIBLE] assign[=] constant[1e+40]
<ast.Tuple object at 0x7da1b12c2800> assign[=] tuple[[<ast.BinOp object at 0x7da1b12c39d0>, <ast.BinOp object at 0x7da1b12c3e50>]]
variable[Q] assign[=] binary_operation[name[Q] * constant[3600.0]]
variable[Vm] assign[=] binary_operation[binary_operation[binary_operation[name[Z] * name[R]] * name[T]] / binary_operation[name[P1] * constant[1000]]]
variable[rho] assign[=] binary_operation[binary_operation[binary_operation[name[Vm] ** <ast.UnaryOp object at 0x7da1b12c3670>] * name[MW]] / constant[1000.0]]
variable[nu] assign[=] binary_operation[name[mu] / name[rho]]
variable[dP] assign[=] binary_operation[name[P1] - name[P2]]
variable[Fgamma] assign[=] binary_operation[name[gamma] / constant[1.4]]
variable[x] assign[=] binary_operation[name[dP] / name[P1]]
variable[Y] assign[=] call[name[max], parameter[binary_operation[constant[1] - binary_operation[name[x] / binary_operation[binary_operation[constant[3] * name[Fgamma]] * name[xT]]]], binary_operation[constant[2] / constant[3.0]]]]
variable[choked] assign[=] call[name[is_choked_turbulent_g], parameter[name[x], name[Fgamma], name[xT]]]
if <ast.BoolOp object at 0x7da1b1184430> begin[:]
variable[C] assign[=] binary_operation[binary_operation[name[Q] / binary_operation[binary_operation[name[N9] * name[P1]] * name[Y]]] * binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[MW] * name[T]] * name[Z]] / name[xT]] / name[Fgamma]] ** constant[0.5]]]
if name[full_output] begin[:]
variable[ans] assign[=] dictionary[[<ast.Constant object at 0x7da1b11858d0>, <ast.Constant object at 0x7da1b1184490>, <ast.Constant object at 0x7da1b1185b40>, <ast.Constant object at 0x7da1b1185ab0>, <ast.Constant object at 0x7da1b1186350>], [<ast.Constant object at 0x7da1b1186410>, <ast.Constant object at 0x7da1b1185960>, <ast.Constant object at 0x7da1b1186470>, <ast.Name object at 0x7da1b1185630>, <ast.Name object at 0x7da1b12ba6b0>]]
if <ast.BoolOp object at 0x7da1b12bb6a0> begin[:]
variable[Rev] assign[=] constant[100000.0]
if name[full_output] begin[:]
call[name[ans]][constant[Rev]] assign[=] constant[None]
if name[full_output] begin[:]
call[name[ans]][constant[Kv]] assign[=] name[C]
call[name[ans]][constant[laminar]] assign[=] compare[name[Rev] less_or_equal[<=] constant[10000]]
return[name[ans]] | keyword[def] identifier[size_control_valve_g] ( identifier[T] , identifier[MW] , identifier[mu] , identifier[gamma] , identifier[Z] , identifier[P1] , identifier[P2] , identifier[Q] , identifier[D1] = keyword[None] , identifier[D2] = keyword[None] ,
identifier[d] = keyword[None] , identifier[FL] = literal[int] , identifier[Fd] = literal[int] , identifier[xT] = literal[int] , identifier[allow_choked] = keyword[True] ,
identifier[allow_laminar] = keyword[True] , identifier[full_output] = keyword[False] ):
literal[string]
identifier[MAX_C_POSSIBLE] = literal[int]
identifier[P1] , identifier[P2] = identifier[P1] / literal[int] , identifier[P2] / literal[int]
identifier[Q] = identifier[Q] * literal[int]
identifier[Vm] = identifier[Z] * identifier[R] * identifier[T] /( identifier[P1] * literal[int] )
identifier[rho] =( identifier[Vm] )**- literal[int] * identifier[MW] / literal[int]
identifier[nu] = identifier[mu] / identifier[rho]
identifier[dP] = identifier[P1] - identifier[P2]
identifier[Fgamma] = identifier[gamma] / literal[int]
identifier[x] = identifier[dP] / identifier[P1]
identifier[Y] = identifier[max] ( literal[int] - identifier[x] /( literal[int] * identifier[Fgamma] * identifier[xT] ), literal[int] / literal[int] )
identifier[choked] = identifier[is_choked_turbulent_g] ( identifier[x] , identifier[Fgamma] , identifier[xT] )
keyword[if] identifier[choked] keyword[and] identifier[allow_choked] :
identifier[C] = identifier[Q] /( identifier[N9] * identifier[P1] * identifier[Y] )*( identifier[MW] * identifier[T] * identifier[Z] / identifier[xT] / identifier[Fgamma] )** literal[int]
keyword[else] :
identifier[C] = identifier[Q] /( identifier[N9] * identifier[P1] * identifier[Y] )*( identifier[MW] * identifier[T] * identifier[Z] / identifier[x] )** literal[int]
keyword[if] identifier[full_output] :
identifier[ans] ={ literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : keyword[None] ,
literal[string] : identifier[choked] , literal[string] : identifier[Y] }
keyword[if] identifier[D1] keyword[is] keyword[None] keyword[and] identifier[D2] keyword[is] keyword[None] keyword[and] identifier[d] keyword[is] keyword[None] :
identifier[Rev] = literal[int]
keyword[if] identifier[full_output] :
identifier[ans] [ literal[string] ]= keyword[None]
keyword[else] :
identifier[D1] , identifier[D2] , identifier[d] = identifier[D1] * literal[int] , identifier[D2] * literal[int] , identifier[d] * literal[int]
identifier[Rev] = identifier[Reynolds_valve] ( identifier[nu] = identifier[nu] , identifier[Q] = identifier[Q] , identifier[D1] = identifier[D1] , identifier[FL] = identifier[FL] , identifier[Fd] = identifier[Fd] , identifier[C] = identifier[C] )
keyword[if] identifier[full_output] :
identifier[ans] [ literal[string] ]= identifier[Rev]
keyword[if] ( identifier[Rev] > literal[int] keyword[or] keyword[not] identifier[allow_laminar] ) keyword[and] ( identifier[D1] != identifier[d] keyword[or] identifier[D2] != identifier[d] ):
identifier[FP] = literal[int]
identifier[MAX_ITER] = literal[int]
keyword[def] identifier[iterate_piping_coef] ( identifier[Ci] , identifier[iterations] ):
identifier[loss] = identifier[loss_coefficient_piping] ( identifier[d] , identifier[D1] , identifier[D2] )
identifier[FP] =( literal[int] + identifier[loss] / identifier[N2] *( identifier[Ci] / identifier[d] ** literal[int] )** literal[int] )**- literal[int]
identifier[loss_upstream] = identifier[loss_coefficient_piping] ( identifier[d] , identifier[D1] )
identifier[xTP] = identifier[xT] / identifier[FP] ** literal[int] /( literal[int] + identifier[xT] * identifier[loss_upstream] / identifier[N5] *( identifier[Ci] / identifier[d] ** literal[int] )** literal[int] )
identifier[choked] = identifier[is_choked_turbulent_g] ( identifier[x] , identifier[Fgamma] , identifier[xTP] = identifier[xTP] )
keyword[if] identifier[choked] :
identifier[C] = identifier[Q] /( identifier[N9] * identifier[FP] * identifier[P1] * identifier[Y] )*( identifier[MW] * identifier[T] * identifier[Z] / identifier[xTP] / identifier[Fgamma] )** literal[int]
keyword[else] :
identifier[C] = identifier[Q] /( identifier[N9] * identifier[FP] * identifier[P1] * identifier[Y] )*( identifier[MW] * identifier[T] * identifier[Z] / identifier[x] )** literal[int]
keyword[if] identifier[Ci] / identifier[C] < literal[int] keyword[and] identifier[iterations] < identifier[MAX_ITER] keyword[and] identifier[Ci] < identifier[MAX_C_POSSIBLE] :
identifier[C] = identifier[iterate_piping_coef] ( identifier[C] , identifier[iterations] + literal[int] )
keyword[if] identifier[full_output] :
identifier[ans] [ literal[string] ]= identifier[xTP]
identifier[ans] [ literal[string] ]= identifier[FP]
identifier[ans] [ literal[string] ]= identifier[choked]
keyword[if] identifier[MAX_ITER] == identifier[iterations] keyword[or] identifier[Ci] >= identifier[MAX_C_POSSIBLE] :
identifier[ans] [ literal[string] ]= literal[string]
keyword[return] identifier[C]
identifier[C] = identifier[iterate_piping_coef] ( identifier[C] , literal[int] )
keyword[elif] identifier[Rev] <= literal[int] keyword[and] identifier[allow_laminar] :
keyword[def] identifier[iterate_piping_laminar] ( identifier[C] ):
identifier[Ci] = literal[int] * identifier[C]
identifier[Rev] = identifier[Reynolds_valve] ( identifier[nu] = identifier[nu] , identifier[Q] = identifier[Q] , identifier[D1] = identifier[D1] , identifier[FL] = identifier[FL] , identifier[Fd] = identifier[Fd] , identifier[C] = identifier[Ci] )
keyword[if] identifier[Ci] / identifier[d] ** literal[int] > literal[int] * identifier[N18] :
identifier[FR] = identifier[Reynolds_factor] ( identifier[FL] = identifier[FL] , identifier[C] = identifier[Ci] , identifier[d] = identifier[d] , identifier[Rev] = identifier[Rev] , identifier[full_trim] = keyword[False] )
keyword[else] :
identifier[FR] = identifier[Reynolds_factor] ( identifier[FL] = identifier[FL] , identifier[C] = identifier[Ci] , identifier[d] = identifier[d] , identifier[Rev] = identifier[Rev] , identifier[full_trim] = keyword[True] )
keyword[if] identifier[C] / identifier[FR] >= identifier[Ci] :
identifier[Ci] = identifier[iterate_piping_laminar] ( identifier[Ci] )
keyword[if] identifier[full_output] :
identifier[ans] [ literal[string] ]= identifier[FR]
identifier[ans] [ literal[string] ]= identifier[Rev]
keyword[return] identifier[Ci]
identifier[C] = identifier[iterate_piping_laminar] ( identifier[C] )
keyword[if] identifier[full_output] :
identifier[ans] [ literal[string] ]= identifier[C]
identifier[ans] [ literal[string] ]= identifier[Rev] <= literal[int]
keyword[return] identifier[ans]
keyword[else] :
keyword[return] identifier[C] | def size_control_valve_g(T, MW, mu, gamma, Z, P1, P2, Q, D1=None, D2=None, d=None, FL=0.9, Fd=1, xT=0.7, allow_choked=True, allow_laminar=True, full_output=False):
"""Calculates flow coefficient of a control valve passing a gas
according to IEC 60534. Uses a large number of inputs in SI units. Note the
return value is not standard SI. All parameters are required. For details
of the calculations, consult [1]_. Note the inlet gas flow conditions.
Parameters
----------
T : float
Temperature of the gas at the inlet [K]
MW : float
Molecular weight of the gas [g/mol]
mu : float
Viscosity of the fluid at inlet conditions [Pa*s]
gamma : float
Specific heat capacity ratio [-]
Z : float
Compressibility factor at inlet conditions, [-]
P1 : float
Inlet pressure of the gas before valves and reducers [Pa]
P2 : float
Outlet pressure of the gas after valves and reducers [Pa]
Q : float
Volumetric flow rate of the gas at *273.15 K* and 1 atm specifically
[m^3/s]
D1 : float, optional
Diameter of the pipe before the valve [m]
D2 : float, optional
Diameter of the pipe after the valve [m]
d : float, optional
Diameter of the valve [m]
FL : float, optional
Liquid pressure recovery factor of a control valve without attached
fittings (normally 0.8-0.9 at full open and decreasing as opened
further to below 0.5; use default very cautiously!) []
Fd : float, optional
Valve style modifier (0.1 to 1; varies tremendously depending on the
type of valve and position; do not use the default at all!) []
xT : float, optional
Pressure difference ratio factor of a valve without fittings at choked
flow (increasing to 0.9 or higher as the valve is closed further and
decreasing to 0.1 or lower as the valve is opened further; use default
very cautiously!) [-]
allow_choked : bool, optional
Overrides the automatic transition into the choked regime if this is
False and returns as if choked flow does not exist
allow_laminar : bool, optional
Overrides the automatic transition into the laminar regime if this is
False and returns as if laminar flow does not exist
full_output : bool, optional
If True, returns intermediate calculation values as
well as Kv in the form of a dictionary containing 'Kv', 'Rev', 'choked',
'Y', 'FR', 'FP', 'xTP', and 'laminar'. Some may be None if they are
not used in the calculation.
Returns
-------
Kv : float
Metric Kv valve flow coefficient (flow rate of water at a pressure drop
of 1 bar) [m^3/hr]
Notes
-----
It is possible to use this model without any diameters specified; in that
case, turbulent flow is assumed. Choked flow can still be modeled. This is
not recommended. All three diameters need to be None for this to work.
`FL` and `Fd` are not used by the models when the diameters are not
specified, but `xT` definitely is used by the model.
Examples
--------
From [1]_, matching example 3 for non-choked gas flow with attached
fittings and a rotary, eccentric plug, flow-to-open control valve:
>>> size_control_valve_g(T=433., MW=44.01, mu=1.4665E-4, gamma=1.30,
... Z=0.988, P1=680E3, P2=310E3, Q=38/36., D1=0.08, D2=0.1, d=0.05,
... FL=0.85, Fd=0.42, xT=0.60)
72.58664545391052
From [1]_, roughly matching example 4 for a small flow trim sized tapered
needle plug valve. Difference is 3% and explained by the difference in
algorithms used.
>>> size_control_valve_g(T=320., MW=39.95, mu=5.625E-5, gamma=1.67, Z=1.0,
... P1=2.8E5, P2=1.3E5, Q=0.46/3600., D1=0.015, D2=0.015, d=0.015, FL=0.98,
... Fd=0.07, xT=0.8)
0.016498765335995726
References
----------
.. [1] IEC 60534-2-1 / ISA-75.01.01-2007
"""
MAX_C_POSSIBLE = 1e+40 # Quit iterations if C reaches this high
# Pa to kPa, according to constants in standard
(P1, P2) = (P1 / 1000.0, P2 / 1000.0)
Q = Q * 3600.0 # m^3/s to m^3/hr, according to constants in standard
# Convert dynamic viscosity to kinematic viscosity
Vm = Z * R * T / (P1 * 1000)
rho = Vm ** (-1) * MW / 1000.0
nu = mu / rho # kinematic viscosity used in standard
dP = P1 - P2
Fgamma = gamma / 1.4
x = dP / P1
Y = max(1 - x / (3 * Fgamma * xT), 2 / 3.0)
choked = is_choked_turbulent_g(x, Fgamma, xT)
if choked and allow_choked:
# Choked, and flow coefficient from eq 14a
C = Q / (N9 * P1 * Y) * (MW * T * Z / xT / Fgamma) ** 0.5 # depends on [control=['if'], data=[]]
else:
# Non-choked, and flow coefficient from eq 8a
C = Q / (N9 * P1 * Y) * (MW * T * Z / x) ** 0.5
if full_output:
ans = {'FP': None, 'xTP': None, 'FR': None, 'choked': choked, 'Y': Y} # depends on [control=['if'], data=[]]
if D1 is None and D2 is None and (d is None):
# Assume turbulent if no diameters are provided, no other calculations
Rev = 100000.0
if full_output:
ans['Rev'] = None # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
# m to mm, according to constants in standard
(D1, D2, d) = (D1 * 1000.0, D2 * 1000.0, d * 1000.0) # Convert diameters to mm which is used in the standard
Rev = Reynolds_valve(nu=nu, Q=Q, D1=D1, FL=FL, Fd=Fd, C=C)
if full_output:
ans['Rev'] = Rev # depends on [control=['if'], data=[]]
if (Rev > 10000 or not allow_laminar) and (D1 != d or D2 != d):
# gas, using xTP and FLP
FP = 1.0
MAX_ITER = 20
def iterate_piping_coef(Ci, iterations):
loss = loss_coefficient_piping(d, D1, D2)
FP = (1.0 + loss / N2 * (Ci / d ** 2) ** 2) ** (-0.5)
loss_upstream = loss_coefficient_piping(d, D1)
xTP = xT / FP ** 2 / (1 + xT * loss_upstream / N5 * (Ci / d ** 2) ** 2)
choked = is_choked_turbulent_g(x, Fgamma, xTP=xTP)
if choked:
# Choked flow with piping, equation 17a
C = Q / (N9 * FP * P1 * Y) * (MW * T * Z / xTP / Fgamma) ** 0.5 # depends on [control=['if'], data=[]]
else:
# Non-choked flow with piping, equation 11a
C = Q / (N9 * FP * P1 * Y) * (MW * T * Z / x) ** 0.5
if Ci / C < 0.99 and iterations < MAX_ITER and (Ci < MAX_C_POSSIBLE):
C = iterate_piping_coef(C, iterations + 1) # depends on [control=['if'], data=[]]
if full_output:
ans['xTP'] = xTP
ans['FP'] = FP
ans['choked'] = choked
if MAX_ITER == iterations or Ci >= MAX_C_POSSIBLE:
ans['warning'] = 'Not converged in inner loop' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return C
C = iterate_piping_coef(C, 0) # depends on [control=['if'], data=[]]
elif Rev <= 10000 and allow_laminar:
# Laminar;
def iterate_piping_laminar(C):
Ci = 1.3 * C
Rev = Reynolds_valve(nu=nu, Q=Q, D1=D1, FL=FL, Fd=Fd, C=Ci)
if Ci / d ** 2 > 0.016 * N18:
FR = Reynolds_factor(FL=FL, C=Ci, d=d, Rev=Rev, full_trim=False) # depends on [control=['if'], data=[]]
else:
FR = Reynolds_factor(FL=FL, C=Ci, d=d, Rev=Rev, full_trim=True)
if C / FR >= Ci:
Ci = iterate_piping_laminar(Ci) # depends on [control=['if'], data=['Ci']]
if full_output:
ans['FR'] = FR
ans['Rev'] = Rev # depends on [control=['if'], data=[]]
return Ci
C = iterate_piping_laminar(C) # depends on [control=['if'], data=[]]
if full_output:
ans['Kv'] = C
ans['laminar'] = Rev <= 10000
return ans # depends on [control=['if'], data=[]]
else:
return C |
def _apply_krauss_multi_qubit(krauss: Union[Tuple[Any], Sequence[Any]],
args: 'ApplyChannelArgs') -> np.ndarray:
"""Use numpy's einsum to apply a multi-qubit channel."""
for krauss_op in krauss:
np.copyto(dst=args.target_tensor, src=args.auxiliary_buffer0)
krauss_tensor = np.reshape(
krauss_op.astype(args.target_tensor.dtype),
(2,) * len(args.left_axes) * 2)
linalg.targeted_left_multiply(
krauss_tensor,
args.target_tensor,
args.left_axes,
out=args.auxiliary_buffer1)
# No need to transpose as we are acting on the tensor
# representation of matrix, so transpose is done for us.
linalg.targeted_left_multiply(
np.conjugate(krauss_tensor),
args.auxiliary_buffer1,
args.right_axes,
out=args.target_tensor)
args.out_buffer += args.target_tensor
return args.out_buffer | def function[_apply_krauss_multi_qubit, parameter[krauss, args]]:
constant[Use numpy's einsum to apply a multi-qubit channel.]
for taget[name[krauss_op]] in starred[name[krauss]] begin[:]
call[name[np].copyto, parameter[]]
variable[krauss_tensor] assign[=] call[name[np].reshape, parameter[call[name[krauss_op].astype, parameter[name[args].target_tensor.dtype]], binary_operation[binary_operation[tuple[[<ast.Constant object at 0x7da1b1c62b30>]] * call[name[len], parameter[name[args].left_axes]]] * constant[2]]]]
call[name[linalg].targeted_left_multiply, parameter[name[krauss_tensor], name[args].target_tensor, name[args].left_axes]]
call[name[linalg].targeted_left_multiply, parameter[call[name[np].conjugate, parameter[name[krauss_tensor]]], name[args].auxiliary_buffer1, name[args].right_axes]]
<ast.AugAssign object at 0x7da1b1f48970>
return[name[args].out_buffer] | keyword[def] identifier[_apply_krauss_multi_qubit] ( identifier[krauss] : identifier[Union] [ identifier[Tuple] [ identifier[Any] ], identifier[Sequence] [ identifier[Any] ]],
identifier[args] : literal[string] )-> identifier[np] . identifier[ndarray] :
literal[string]
keyword[for] identifier[krauss_op] keyword[in] identifier[krauss] :
identifier[np] . identifier[copyto] ( identifier[dst] = identifier[args] . identifier[target_tensor] , identifier[src] = identifier[args] . identifier[auxiliary_buffer0] )
identifier[krauss_tensor] = identifier[np] . identifier[reshape] (
identifier[krauss_op] . identifier[astype] ( identifier[args] . identifier[target_tensor] . identifier[dtype] ),
( literal[int] ,)* identifier[len] ( identifier[args] . identifier[left_axes] )* literal[int] )
identifier[linalg] . identifier[targeted_left_multiply] (
identifier[krauss_tensor] ,
identifier[args] . identifier[target_tensor] ,
identifier[args] . identifier[left_axes] ,
identifier[out] = identifier[args] . identifier[auxiliary_buffer1] )
identifier[linalg] . identifier[targeted_left_multiply] (
identifier[np] . identifier[conjugate] ( identifier[krauss_tensor] ),
identifier[args] . identifier[auxiliary_buffer1] ,
identifier[args] . identifier[right_axes] ,
identifier[out] = identifier[args] . identifier[target_tensor] )
identifier[args] . identifier[out_buffer] += identifier[args] . identifier[target_tensor]
keyword[return] identifier[args] . identifier[out_buffer] | def _apply_krauss_multi_qubit(krauss: Union[Tuple[Any], Sequence[Any]], args: 'ApplyChannelArgs') -> np.ndarray:
"""Use numpy's einsum to apply a multi-qubit channel."""
for krauss_op in krauss:
np.copyto(dst=args.target_tensor, src=args.auxiliary_buffer0)
krauss_tensor = np.reshape(krauss_op.astype(args.target_tensor.dtype), (2,) * len(args.left_axes) * 2)
linalg.targeted_left_multiply(krauss_tensor, args.target_tensor, args.left_axes, out=args.auxiliary_buffer1)
# No need to transpose as we are acting on the tensor
# representation of matrix, so transpose is done for us.
linalg.targeted_left_multiply(np.conjugate(krauss_tensor), args.auxiliary_buffer1, args.right_axes, out=args.target_tensor)
args.out_buffer += args.target_tensor # depends on [control=['for'], data=['krauss_op']]
return args.out_buffer |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.