code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def upload_urls(self, project, files, run=None, entity=None, description=None):
"""Generate temporary resumeable upload urls
Args:
project (str): The project to download
files (list or dict): The filenames to upload
run (str, optional): The run to upload to
entity (str, optional): The entity to scope this project to. Defaults to wandb models
Returns:
(bucket_id, file_info)
bucket_id: id of bucket we uploaded to
file_info: A dict of filenames and urls, also indicates if this revision already has uploaded files.
{
'weights.h5': { "url": "https://weights.url" },
'model.json': { "url": "https://model.json", "updatedAt": '2013-04-26T22:22:23.832Z', 'md5': 'mZFLkyvTelC5g8XnyQrpOw==' },
}
"""
query = gql('''
query Model($name: String!, $files: [String]!, $entity: String!, $run: String!, $description: String) {
model(name: $name, entityName: $entity) {
bucket(name: $run, desc: $description) {
id
files(names: $files) {
edges {
node {
name
url(upload: true)
updatedAt
}
}
}
}
}
}
''')
run_id = run or self.settings('run')
entity = entity or self.settings('entity')
query_result = self.gql(query, variable_values={
'name': project, 'run': run_id,
'entity': entity,
'description': description,
'files': [file for file in files]
})
run = query_result['model']['bucket']
if run:
result = {file['name']: file for file in self._flatten_edges(run['files'])}
return run['id'], result
else:
raise CommError("Run does not exist {}/{}/{}.".format(entity, project, run_id)) | def function[upload_urls, parameter[self, project, files, run, entity, description]]:
constant[Generate temporary resumeable upload urls
Args:
project (str): The project to download
files (list or dict): The filenames to upload
run (str, optional): The run to upload to
entity (str, optional): The entity to scope this project to. Defaults to wandb models
Returns:
(bucket_id, file_info)
bucket_id: id of bucket we uploaded to
file_info: A dict of filenames and urls, also indicates if this revision already has uploaded files.
{
'weights.h5': { "url": "https://weights.url" },
'model.json': { "url": "https://model.json", "updatedAt": '2013-04-26T22:22:23.832Z', 'md5': 'mZFLkyvTelC5g8XnyQrpOw==' },
}
]
variable[query] assign[=] call[name[gql], parameter[constant[
query Model($name: String!, $files: [String]!, $entity: String!, $run: String!, $description: String) {
model(name: $name, entityName: $entity) {
bucket(name: $run, desc: $description) {
id
files(names: $files) {
edges {
node {
name
url(upload: true)
updatedAt
}
}
}
}
}
}
]]]
variable[run_id] assign[=] <ast.BoolOp object at 0x7da204566680>
variable[entity] assign[=] <ast.BoolOp object at 0x7da204565480>
variable[query_result] assign[=] call[name[self].gql, parameter[name[query]]]
variable[run] assign[=] call[call[name[query_result]][constant[model]]][constant[bucket]]
if name[run] begin[:]
variable[result] assign[=] <ast.DictComp object at 0x7da204567d90>
return[tuple[[<ast.Subscript object at 0x7da20c7c99c0>, <ast.Name object at 0x7da20c7c8b80>]]] | keyword[def] identifier[upload_urls] ( identifier[self] , identifier[project] , identifier[files] , identifier[run] = keyword[None] , identifier[entity] = keyword[None] , identifier[description] = keyword[None] ):
literal[string]
identifier[query] = identifier[gql] ( literal[string] )
identifier[run_id] = identifier[run] keyword[or] identifier[self] . identifier[settings] ( literal[string] )
identifier[entity] = identifier[entity] keyword[or] identifier[self] . identifier[settings] ( literal[string] )
identifier[query_result] = identifier[self] . identifier[gql] ( identifier[query] , identifier[variable_values] ={
literal[string] : identifier[project] , literal[string] : identifier[run_id] ,
literal[string] : identifier[entity] ,
literal[string] : identifier[description] ,
literal[string] :[ identifier[file] keyword[for] identifier[file] keyword[in] identifier[files] ]
})
identifier[run] = identifier[query_result] [ literal[string] ][ literal[string] ]
keyword[if] identifier[run] :
identifier[result] ={ identifier[file] [ literal[string] ]: identifier[file] keyword[for] identifier[file] keyword[in] identifier[self] . identifier[_flatten_edges] ( identifier[run] [ literal[string] ])}
keyword[return] identifier[run] [ literal[string] ], identifier[result]
keyword[else] :
keyword[raise] identifier[CommError] ( literal[string] . identifier[format] ( identifier[entity] , identifier[project] , identifier[run_id] )) | def upload_urls(self, project, files, run=None, entity=None, description=None):
"""Generate temporary resumeable upload urls
Args:
project (str): The project to download
files (list or dict): The filenames to upload
run (str, optional): The run to upload to
entity (str, optional): The entity to scope this project to. Defaults to wandb models
Returns:
(bucket_id, file_info)
bucket_id: id of bucket we uploaded to
file_info: A dict of filenames and urls, also indicates if this revision already has uploaded files.
{
'weights.h5': { "url": "https://weights.url" },
'model.json': { "url": "https://model.json", "updatedAt": '2013-04-26T22:22:23.832Z', 'md5': 'mZFLkyvTelC5g8XnyQrpOw==' },
}
"""
query = gql('\n query Model($name: String!, $files: [String]!, $entity: String!, $run: String!, $description: String) {\n model(name: $name, entityName: $entity) {\n bucket(name: $run, desc: $description) {\n id\n files(names: $files) {\n edges {\n node {\n name\n url(upload: true)\n updatedAt\n }\n }\n }\n }\n }\n }\n ')
run_id = run or self.settings('run')
entity = entity or self.settings('entity')
query_result = self.gql(query, variable_values={'name': project, 'run': run_id, 'entity': entity, 'description': description, 'files': [file for file in files]})
run = query_result['model']['bucket']
if run:
result = {file['name']: file for file in self._flatten_edges(run['files'])}
return (run['id'], result) # depends on [control=['if'], data=[]]
else:
raise CommError('Run does not exist {}/{}/{}.'.format(entity, project, run_id)) |
def extension(names):
"""Makes a function to be an extension."""
for name in names:
if not NAME_PATTERN.match(name):
raise ValueError('invalid extension name: %s' % name)
def decorator(f, names=names):
return Extension(f, names=names)
return decorator | def function[extension, parameter[names]]:
constant[Makes a function to be an extension.]
for taget[name[name]] in starred[name[names]] begin[:]
if <ast.UnaryOp object at 0x7da1b2875ba0> begin[:]
<ast.Raise object at 0x7da1b28751b0>
def function[decorator, parameter[f, names]]:
return[call[name[Extension], parameter[name[f]]]]
return[name[decorator]] | keyword[def] identifier[extension] ( identifier[names] ):
literal[string]
keyword[for] identifier[name] keyword[in] identifier[names] :
keyword[if] keyword[not] identifier[NAME_PATTERN] . identifier[match] ( identifier[name] ):
keyword[raise] identifier[ValueError] ( literal[string] % identifier[name] )
keyword[def] identifier[decorator] ( identifier[f] , identifier[names] = identifier[names] ):
keyword[return] identifier[Extension] ( identifier[f] , identifier[names] = identifier[names] )
keyword[return] identifier[decorator] | def extension(names):
"""Makes a function to be an extension."""
for name in names:
if not NAME_PATTERN.match(name):
raise ValueError('invalid extension name: %s' % name) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['name']]
def decorator(f, names=names):
return Extension(f, names=names)
return decorator |
def get_project_children(self, project_id, name_contains=None):
"""
Get direct files and folders of a project.
:param project_id: str: uuid of the project to list contents
:param name_contains: str: filter children based on a pattern
:return: [File|Folder]: list of Files/Folders contained by the project
"""
return self._create_array_response(
self.data_service.get_project_children(
project_id, name_contains
),
DDSConnection._folder_or_file_constructor
) | def function[get_project_children, parameter[self, project_id, name_contains]]:
constant[
Get direct files and folders of a project.
:param project_id: str: uuid of the project to list contents
:param name_contains: str: filter children based on a pattern
:return: [File|Folder]: list of Files/Folders contained by the project
]
return[call[name[self]._create_array_response, parameter[call[name[self].data_service.get_project_children, parameter[name[project_id], name[name_contains]]], name[DDSConnection]._folder_or_file_constructor]]] | keyword[def] identifier[get_project_children] ( identifier[self] , identifier[project_id] , identifier[name_contains] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[_create_array_response] (
identifier[self] . identifier[data_service] . identifier[get_project_children] (
identifier[project_id] , identifier[name_contains]
),
identifier[DDSConnection] . identifier[_folder_or_file_constructor]
) | def get_project_children(self, project_id, name_contains=None):
"""
Get direct files and folders of a project.
:param project_id: str: uuid of the project to list contents
:param name_contains: str: filter children based on a pattern
:return: [File|Folder]: list of Files/Folders contained by the project
"""
return self._create_array_response(self.data_service.get_project_children(project_id, name_contains), DDSConnection._folder_or_file_constructor) |
def format(self, record):
"""Format the log record."""
levelname = getattr(record, 'levelname', None)
record.levelcolor = ''
record.endlevelcolor = ''
if levelname:
level_color = getattr(self.TermColors, levelname, '')
record.levelcolor = level_color
record.endlevelcolor = self.TermColors.ENDC if level_color else ''
return super(FleakerLogFormatter, self).format(record) | def function[format, parameter[self, record]]:
constant[Format the log record.]
variable[levelname] assign[=] call[name[getattr], parameter[name[record], constant[levelname], constant[None]]]
name[record].levelcolor assign[=] constant[]
name[record].endlevelcolor assign[=] constant[]
if name[levelname] begin[:]
variable[level_color] assign[=] call[name[getattr], parameter[name[self].TermColors, name[levelname], constant[]]]
name[record].levelcolor assign[=] name[level_color]
name[record].endlevelcolor assign[=] <ast.IfExp object at 0x7da1b06186d0>
return[call[call[name[super], parameter[name[FleakerLogFormatter], name[self]]].format, parameter[name[record]]]] | keyword[def] identifier[format] ( identifier[self] , identifier[record] ):
literal[string]
identifier[levelname] = identifier[getattr] ( identifier[record] , literal[string] , keyword[None] )
identifier[record] . identifier[levelcolor] = literal[string]
identifier[record] . identifier[endlevelcolor] = literal[string]
keyword[if] identifier[levelname] :
identifier[level_color] = identifier[getattr] ( identifier[self] . identifier[TermColors] , identifier[levelname] , literal[string] )
identifier[record] . identifier[levelcolor] = identifier[level_color]
identifier[record] . identifier[endlevelcolor] = identifier[self] . identifier[TermColors] . identifier[ENDC] keyword[if] identifier[level_color] keyword[else] literal[string]
keyword[return] identifier[super] ( identifier[FleakerLogFormatter] , identifier[self] ). identifier[format] ( identifier[record] ) | def format(self, record):
"""Format the log record."""
levelname = getattr(record, 'levelname', None)
record.levelcolor = ''
record.endlevelcolor = ''
if levelname:
level_color = getattr(self.TermColors, levelname, '')
record.levelcolor = level_color
record.endlevelcolor = self.TermColors.ENDC if level_color else '' # depends on [control=['if'], data=[]]
return super(FleakerLogFormatter, self).format(record) |
def maybe_get_static_value(x, dtype=None):
"""Helper which tries to return a static value.
Given `x`, extract it's value statically, optionally casting to a specific
dtype. If this is not possible, None is returned.
Args:
x: `Tensor` for which to extract a value statically.
dtype: Optional dtype to cast to.
Returns:
Statically inferred value if possible, otherwise None.
"""
if x is None:
return x
try:
# This returns an np.ndarray.
x_ = tf.get_static_value(x)
except TypeError:
x_ = x
if x_ is None or dtype is None:
return x_
return np.array(x_, dtype) | def function[maybe_get_static_value, parameter[x, dtype]]:
constant[Helper which tries to return a static value.
Given `x`, extract it's value statically, optionally casting to a specific
dtype. If this is not possible, None is returned.
Args:
x: `Tensor` for which to extract a value statically.
dtype: Optional dtype to cast to.
Returns:
Statically inferred value if possible, otherwise None.
]
if compare[name[x] is constant[None]] begin[:]
return[name[x]]
<ast.Try object at 0x7da1b0211bd0>
if <ast.BoolOp object at 0x7da1b0212ec0> begin[:]
return[name[x_]]
return[call[name[np].array, parameter[name[x_], name[dtype]]]] | keyword[def] identifier[maybe_get_static_value] ( identifier[x] , identifier[dtype] = keyword[None] ):
literal[string]
keyword[if] identifier[x] keyword[is] keyword[None] :
keyword[return] identifier[x]
keyword[try] :
identifier[x_] = identifier[tf] . identifier[get_static_value] ( identifier[x] )
keyword[except] identifier[TypeError] :
identifier[x_] = identifier[x]
keyword[if] identifier[x_] keyword[is] keyword[None] keyword[or] identifier[dtype] keyword[is] keyword[None] :
keyword[return] identifier[x_]
keyword[return] identifier[np] . identifier[array] ( identifier[x_] , identifier[dtype] ) | def maybe_get_static_value(x, dtype=None):
"""Helper which tries to return a static value.
Given `x`, extract it's value statically, optionally casting to a specific
dtype. If this is not possible, None is returned.
Args:
x: `Tensor` for which to extract a value statically.
dtype: Optional dtype to cast to.
Returns:
Statically inferred value if possible, otherwise None.
"""
if x is None:
return x # depends on [control=['if'], data=['x']]
try:
# This returns an np.ndarray.
x_ = tf.get_static_value(x) # depends on [control=['try'], data=[]]
except TypeError:
x_ = x # depends on [control=['except'], data=[]]
if x_ is None or dtype is None:
return x_ # depends on [control=['if'], data=[]]
return np.array(x_, dtype) |
def revoke(self):
"""LeaseRevoke revokes a lease.
All keys attached to the lease will expire and be deleted.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
:return:
"""
self.client.post(self.client.get_url("/kv/lease/revoke"),
json={"ID": self.id})
return True | def function[revoke, parameter[self]]:
constant[LeaseRevoke revokes a lease.
All keys attached to the lease will expire and be deleted.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
:return:
]
call[name[self].client.post, parameter[call[name[self].client.get_url, parameter[constant[/kv/lease/revoke]]]]]
return[constant[True]] | keyword[def] identifier[revoke] ( identifier[self] ):
literal[string]
identifier[self] . identifier[client] . identifier[post] ( identifier[self] . identifier[client] . identifier[get_url] ( literal[string] ),
identifier[json] ={ literal[string] : identifier[self] . identifier[id] })
keyword[return] keyword[True] | def revoke(self):
"""LeaseRevoke revokes a lease.
All keys attached to the lease will expire and be deleted.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
:return:
"""
self.client.post(self.client.get_url('/kv/lease/revoke'), json={'ID': self.id})
return True |
def needs_serialization(self):
"""
Return True if the aside has any data to serialize to XML.
If all of the aside's data is empty or a default value, then the aside shouldn't
be serialized as XML at all.
"""
return any(field.is_set_on(self) for field in six.itervalues(self.fields)) | def function[needs_serialization, parameter[self]]:
constant[
Return True if the aside has any data to serialize to XML.
If all of the aside's data is empty or a default value, then the aside shouldn't
be serialized as XML at all.
]
return[call[name[any], parameter[<ast.GeneratorExp object at 0x7da18bcc8f70>]]] | keyword[def] identifier[needs_serialization] ( identifier[self] ):
literal[string]
keyword[return] identifier[any] ( identifier[field] . identifier[is_set_on] ( identifier[self] ) keyword[for] identifier[field] keyword[in] identifier[six] . identifier[itervalues] ( identifier[self] . identifier[fields] )) | def needs_serialization(self):
"""
Return True if the aside has any data to serialize to XML.
If all of the aside's data is empty or a default value, then the aside shouldn't
be serialized as XML at all.
"""
return any((field.is_set_on(self) for field in six.itervalues(self.fields))) |
def sym_gen(seq_len):
"""
Build NN symbol depending on the length of the input sequence
"""
sentence_shape = train_iter.provide_data[0][1]
char_sentence_shape = train_iter.provide_data[1][1]
entities_shape = train_iter.provide_label[0][1]
X_sent = mx.symbol.Variable(train_iter.provide_data[0].name)
X_char_sent = mx.symbol.Variable(train_iter.provide_data[1].name)
Y = mx.sym.Variable(train_iter.provide_label[0].name)
###############################
# Character embedding component
###############################
char_embeddings = mx.sym.Embedding(data=X_char_sent, input_dim=len(char_to_index), output_dim=args.char_embed, name='char_embed')
char_embeddings = mx.sym.reshape(data=char_embeddings, shape=(0,1,seq_len,-1,args.char_embed), name='char_embed2')
char_cnn_outputs = []
for i, filter_size in enumerate(args.char_filter_list):
# Kernel that slides over entire words resulting in a 1d output
convi = mx.sym.Convolution(data=char_embeddings, kernel=(1, filter_size, args.char_embed), stride=(1, 1, 1),
num_filter=args.char_filters, name="char_conv_layer_" + str(i))
acti = mx.sym.Activation(data=convi, act_type='tanh')
pooli = mx.sym.Pooling(data=acti, pool_type='max', kernel=(1, char_sentence_shape[2] - filter_size + 1, 1),
stride=(1, 1, 1), name="char_pool_layer_" + str(i))
pooli = mx.sym.transpose(mx.sym.Reshape(pooli, shape=(0, 0, 0)), axes=(0, 2, 1), name="cchar_conv_layer_" + str(i))
char_cnn_outputs.append(pooli)
# combine features from all filters & apply dropout
cnn_char_features = mx.sym.Concat(*char_cnn_outputs, dim=2, name="cnn_char_features")
regularized_cnn_char_features = mx.sym.Dropout(data=cnn_char_features, p=args.dropout, mode='training',
name='regularized charCnn features')
##################################
# Combine char and word embeddings
##################################
word_embeddings = mx.sym.Embedding(data=X_sent, input_dim=len(word_to_index), output_dim=args.word_embed, name='word_embed')
rnn_features = mx.sym.Concat(*[word_embeddings, regularized_cnn_char_features], dim=2, name='rnn input')
##############################
# Bidirectional LSTM component
##############################
# unroll the lstm cell in time, merging outputs
bi_cell.reset()
output, states = bi_cell.unroll(length=seq_len, inputs=rnn_features, merge_outputs=True)
# Map to num entity classes
rnn_output = mx.sym.Reshape(output, shape=(-1, args.lstm_state_size * 2), name='r_output')
fc = mx.sym.FullyConnected(data=rnn_output, num_hidden=len(entity_to_index), name='fc_layer')
# reshape back to same shape as loss will be
reshaped_fc = mx.sym.transpose(mx.sym.reshape(fc, shape=(-1, seq_len, len(entity_to_index))), axes=(0, 2, 1))
sm = mx.sym.SoftmaxOutput(data=reshaped_fc, label=Y, ignore_label=-1, use_ignore=True, multi_output=True, name='softmax')
return sm, [v.name for v in train_iter.provide_data], [v.name for v in train_iter.provide_label] | def function[sym_gen, parameter[seq_len]]:
constant[
Build NN symbol depending on the length of the input sequence
]
variable[sentence_shape] assign[=] call[call[name[train_iter].provide_data][constant[0]]][constant[1]]
variable[char_sentence_shape] assign[=] call[call[name[train_iter].provide_data][constant[1]]][constant[1]]
variable[entities_shape] assign[=] call[call[name[train_iter].provide_label][constant[0]]][constant[1]]
variable[X_sent] assign[=] call[name[mx].symbol.Variable, parameter[call[name[train_iter].provide_data][constant[0]].name]]
variable[X_char_sent] assign[=] call[name[mx].symbol.Variable, parameter[call[name[train_iter].provide_data][constant[1]].name]]
variable[Y] assign[=] call[name[mx].sym.Variable, parameter[call[name[train_iter].provide_label][constant[0]].name]]
variable[char_embeddings] assign[=] call[name[mx].sym.Embedding, parameter[]]
variable[char_embeddings] assign[=] call[name[mx].sym.reshape, parameter[]]
variable[char_cnn_outputs] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b1ff4370>, <ast.Name object at 0x7da1b1ff5f00>]]] in starred[call[name[enumerate], parameter[name[args].char_filter_list]]] begin[:]
variable[convi] assign[=] call[name[mx].sym.Convolution, parameter[]]
variable[acti] assign[=] call[name[mx].sym.Activation, parameter[]]
variable[pooli] assign[=] call[name[mx].sym.Pooling, parameter[]]
variable[pooli] assign[=] call[name[mx].sym.transpose, parameter[call[name[mx].sym.Reshape, parameter[name[pooli]]]]]
call[name[char_cnn_outputs].append, parameter[name[pooli]]]
variable[cnn_char_features] assign[=] call[name[mx].sym.Concat, parameter[<ast.Starred object at 0x7da1b1ff4bb0>]]
variable[regularized_cnn_char_features] assign[=] call[name[mx].sym.Dropout, parameter[]]
variable[word_embeddings] assign[=] call[name[mx].sym.Embedding, parameter[]]
variable[rnn_features] assign[=] call[name[mx].sym.Concat, parameter[<ast.Starred object at 0x7da1b1f60a90>]]
call[name[bi_cell].reset, parameter[]]
<ast.Tuple object at 0x7da1b1f61630> assign[=] call[name[bi_cell].unroll, parameter[]]
variable[rnn_output] assign[=] call[name[mx].sym.Reshape, parameter[name[output]]]
variable[fc] assign[=] call[name[mx].sym.FullyConnected, parameter[]]
variable[reshaped_fc] assign[=] call[name[mx].sym.transpose, parameter[call[name[mx].sym.reshape, parameter[name[fc]]]]]
variable[sm] assign[=] call[name[mx].sym.SoftmaxOutput, parameter[]]
return[tuple[[<ast.Name object at 0x7da1b1f60d00>, <ast.ListComp object at 0x7da1b1f62800>, <ast.ListComp object at 0x7da1b1f63040>]]] | keyword[def] identifier[sym_gen] ( identifier[seq_len] ):
literal[string]
identifier[sentence_shape] = identifier[train_iter] . identifier[provide_data] [ literal[int] ][ literal[int] ]
identifier[char_sentence_shape] = identifier[train_iter] . identifier[provide_data] [ literal[int] ][ literal[int] ]
identifier[entities_shape] = identifier[train_iter] . identifier[provide_label] [ literal[int] ][ literal[int] ]
identifier[X_sent] = identifier[mx] . identifier[symbol] . identifier[Variable] ( identifier[train_iter] . identifier[provide_data] [ literal[int] ]. identifier[name] )
identifier[X_char_sent] = identifier[mx] . identifier[symbol] . identifier[Variable] ( identifier[train_iter] . identifier[provide_data] [ literal[int] ]. identifier[name] )
identifier[Y] = identifier[mx] . identifier[sym] . identifier[Variable] ( identifier[train_iter] . identifier[provide_label] [ literal[int] ]. identifier[name] )
identifier[char_embeddings] = identifier[mx] . identifier[sym] . identifier[Embedding] ( identifier[data] = identifier[X_char_sent] , identifier[input_dim] = identifier[len] ( identifier[char_to_index] ), identifier[output_dim] = identifier[args] . identifier[char_embed] , identifier[name] = literal[string] )
identifier[char_embeddings] = identifier[mx] . identifier[sym] . identifier[reshape] ( identifier[data] = identifier[char_embeddings] , identifier[shape] =( literal[int] , literal[int] , identifier[seq_len] ,- literal[int] , identifier[args] . identifier[char_embed] ), identifier[name] = literal[string] )
identifier[char_cnn_outputs] =[]
keyword[for] identifier[i] , identifier[filter_size] keyword[in] identifier[enumerate] ( identifier[args] . identifier[char_filter_list] ):
identifier[convi] = identifier[mx] . identifier[sym] . identifier[Convolution] ( identifier[data] = identifier[char_embeddings] , identifier[kernel] =( literal[int] , identifier[filter_size] , identifier[args] . identifier[char_embed] ), identifier[stride] =( literal[int] , literal[int] , literal[int] ),
identifier[num_filter] = identifier[args] . identifier[char_filters] , identifier[name] = literal[string] + identifier[str] ( identifier[i] ))
identifier[acti] = identifier[mx] . identifier[sym] . identifier[Activation] ( identifier[data] = identifier[convi] , identifier[act_type] = literal[string] )
identifier[pooli] = identifier[mx] . identifier[sym] . identifier[Pooling] ( identifier[data] = identifier[acti] , identifier[pool_type] = literal[string] , identifier[kernel] =( literal[int] , identifier[char_sentence_shape] [ literal[int] ]- identifier[filter_size] + literal[int] , literal[int] ),
identifier[stride] =( literal[int] , literal[int] , literal[int] ), identifier[name] = literal[string] + identifier[str] ( identifier[i] ))
identifier[pooli] = identifier[mx] . identifier[sym] . identifier[transpose] ( identifier[mx] . identifier[sym] . identifier[Reshape] ( identifier[pooli] , identifier[shape] =( literal[int] , literal[int] , literal[int] )), identifier[axes] =( literal[int] , literal[int] , literal[int] ), identifier[name] = literal[string] + identifier[str] ( identifier[i] ))
identifier[char_cnn_outputs] . identifier[append] ( identifier[pooli] )
identifier[cnn_char_features] = identifier[mx] . identifier[sym] . identifier[Concat] (* identifier[char_cnn_outputs] , identifier[dim] = literal[int] , identifier[name] = literal[string] )
identifier[regularized_cnn_char_features] = identifier[mx] . identifier[sym] . identifier[Dropout] ( identifier[data] = identifier[cnn_char_features] , identifier[p] = identifier[args] . identifier[dropout] , identifier[mode] = literal[string] ,
identifier[name] = literal[string] )
identifier[word_embeddings] = identifier[mx] . identifier[sym] . identifier[Embedding] ( identifier[data] = identifier[X_sent] , identifier[input_dim] = identifier[len] ( identifier[word_to_index] ), identifier[output_dim] = identifier[args] . identifier[word_embed] , identifier[name] = literal[string] )
identifier[rnn_features] = identifier[mx] . identifier[sym] . identifier[Concat] (*[ identifier[word_embeddings] , identifier[regularized_cnn_char_features] ], identifier[dim] = literal[int] , identifier[name] = literal[string] )
identifier[bi_cell] . identifier[reset] ()
identifier[output] , identifier[states] = identifier[bi_cell] . identifier[unroll] ( identifier[length] = identifier[seq_len] , identifier[inputs] = identifier[rnn_features] , identifier[merge_outputs] = keyword[True] )
identifier[rnn_output] = identifier[mx] . identifier[sym] . identifier[Reshape] ( identifier[output] , identifier[shape] =(- literal[int] , identifier[args] . identifier[lstm_state_size] * literal[int] ), identifier[name] = literal[string] )
identifier[fc] = identifier[mx] . identifier[sym] . identifier[FullyConnected] ( identifier[data] = identifier[rnn_output] , identifier[num_hidden] = identifier[len] ( identifier[entity_to_index] ), identifier[name] = literal[string] )
identifier[reshaped_fc] = identifier[mx] . identifier[sym] . identifier[transpose] ( identifier[mx] . identifier[sym] . identifier[reshape] ( identifier[fc] , identifier[shape] =(- literal[int] , identifier[seq_len] , identifier[len] ( identifier[entity_to_index] ))), identifier[axes] =( literal[int] , literal[int] , literal[int] ))
identifier[sm] = identifier[mx] . identifier[sym] . identifier[SoftmaxOutput] ( identifier[data] = identifier[reshaped_fc] , identifier[label] = identifier[Y] , identifier[ignore_label] =- literal[int] , identifier[use_ignore] = keyword[True] , identifier[multi_output] = keyword[True] , identifier[name] = literal[string] )
keyword[return] identifier[sm] ,[ identifier[v] . identifier[name] keyword[for] identifier[v] keyword[in] identifier[train_iter] . identifier[provide_data] ],[ identifier[v] . identifier[name] keyword[for] identifier[v] keyword[in] identifier[train_iter] . identifier[provide_label] ] | def sym_gen(seq_len):
"""
Build NN symbol depending on the length of the input sequence
"""
sentence_shape = train_iter.provide_data[0][1]
char_sentence_shape = train_iter.provide_data[1][1]
entities_shape = train_iter.provide_label[0][1]
X_sent = mx.symbol.Variable(train_iter.provide_data[0].name)
X_char_sent = mx.symbol.Variable(train_iter.provide_data[1].name)
Y = mx.sym.Variable(train_iter.provide_label[0].name)
###############################
# Character embedding component
###############################
char_embeddings = mx.sym.Embedding(data=X_char_sent, input_dim=len(char_to_index), output_dim=args.char_embed, name='char_embed')
char_embeddings = mx.sym.reshape(data=char_embeddings, shape=(0, 1, seq_len, -1, args.char_embed), name='char_embed2')
char_cnn_outputs = []
for (i, filter_size) in enumerate(args.char_filter_list):
# Kernel that slides over entire words resulting in a 1d output
convi = mx.sym.Convolution(data=char_embeddings, kernel=(1, filter_size, args.char_embed), stride=(1, 1, 1), num_filter=args.char_filters, name='char_conv_layer_' + str(i))
acti = mx.sym.Activation(data=convi, act_type='tanh')
pooli = mx.sym.Pooling(data=acti, pool_type='max', kernel=(1, char_sentence_shape[2] - filter_size + 1, 1), stride=(1, 1, 1), name='char_pool_layer_' + str(i))
pooli = mx.sym.transpose(mx.sym.Reshape(pooli, shape=(0, 0, 0)), axes=(0, 2, 1), name='cchar_conv_layer_' + str(i))
char_cnn_outputs.append(pooli) # depends on [control=['for'], data=[]]
# combine features from all filters & apply dropout
cnn_char_features = mx.sym.Concat(*char_cnn_outputs, dim=2, name='cnn_char_features')
regularized_cnn_char_features = mx.sym.Dropout(data=cnn_char_features, p=args.dropout, mode='training', name='regularized charCnn features')
##################################
# Combine char and word embeddings
##################################
word_embeddings = mx.sym.Embedding(data=X_sent, input_dim=len(word_to_index), output_dim=args.word_embed, name='word_embed')
rnn_features = mx.sym.Concat(*[word_embeddings, regularized_cnn_char_features], dim=2, name='rnn input')
##############################
# Bidirectional LSTM component
##############################
# unroll the lstm cell in time, merging outputs
bi_cell.reset()
(output, states) = bi_cell.unroll(length=seq_len, inputs=rnn_features, merge_outputs=True)
# Map to num entity classes
rnn_output = mx.sym.Reshape(output, shape=(-1, args.lstm_state_size * 2), name='r_output')
fc = mx.sym.FullyConnected(data=rnn_output, num_hidden=len(entity_to_index), name='fc_layer')
# reshape back to same shape as loss will be
reshaped_fc = mx.sym.transpose(mx.sym.reshape(fc, shape=(-1, seq_len, len(entity_to_index))), axes=(0, 2, 1))
sm = mx.sym.SoftmaxOutput(data=reshaped_fc, label=Y, ignore_label=-1, use_ignore=True, multi_output=True, name='softmax')
return (sm, [v.name for v in train_iter.provide_data], [v.name for v in train_iter.provide_label]) |
def _add_child(self, collection, set, child):
"""Adds 'child' to 'collection', first checking 'set' to see if it's
already present."""
added = None
for c in child:
if c not in set:
set.add(c)
collection.append(c)
added = 1
if added:
self._children_reset() | def function[_add_child, parameter[self, collection, set, child]]:
constant[Adds 'child' to 'collection', first checking 'set' to see if it's
already present.]
variable[added] assign[=] constant[None]
for taget[name[c]] in starred[name[child]] begin[:]
if compare[name[c] <ast.NotIn object at 0x7da2590d7190> name[set]] begin[:]
call[name[set].add, parameter[name[c]]]
call[name[collection].append, parameter[name[c]]]
variable[added] assign[=] constant[1]
if name[added] begin[:]
call[name[self]._children_reset, parameter[]] | keyword[def] identifier[_add_child] ( identifier[self] , identifier[collection] , identifier[set] , identifier[child] ):
literal[string]
identifier[added] = keyword[None]
keyword[for] identifier[c] keyword[in] identifier[child] :
keyword[if] identifier[c] keyword[not] keyword[in] identifier[set] :
identifier[set] . identifier[add] ( identifier[c] )
identifier[collection] . identifier[append] ( identifier[c] )
identifier[added] = literal[int]
keyword[if] identifier[added] :
identifier[self] . identifier[_children_reset] () | def _add_child(self, collection, set, child):
"""Adds 'child' to 'collection', first checking 'set' to see if it's
already present."""
added = None
for c in child:
if c not in set:
set.add(c)
collection.append(c)
added = 1 # depends on [control=['if'], data=['c', 'set']] # depends on [control=['for'], data=['c']]
if added:
self._children_reset() # depends on [control=['if'], data=[]] |
def update_message_dict(message_dict,action):
"""
Update the g_ok_java_messages dict structure by
1. add the new java ignored messages stored in message_dict if action == 1
2. remove the java ignored messages stired in message_dict if action == 2.
Parameters
----------
message_dict : Python dict
key: unit test name or "general"
value: list of java messages that are to be ignored if they are found when running the test stored as the key. If
the key is "general", the list of java messages are to be ignored when running all tests.
action : int
if 1: add java ignored messages stored in message_dict to g_ok_java_messages dict;
if 2: remove java ignored messages stored in message_dict from g_ok_java_messages dict.
:return: none
"""
global g_ok_java_messages
allKeys = g_ok_java_messages.keys()
for key in message_dict.keys():
if key in allKeys: # key already exists, just add to it
for message in message_dict[key]:
if action == 1:
if message not in g_ok_java_messages[key]:
g_ok_java_messages[key].append(message)
if action == 2:
if message in g_ok_java_messages[key]:
g_ok_java_messages[key].remove(message)
else: # new key here. Can only add and cannot remove
if action == 1:
g_ok_java_messages[key] = message_dict[key] | def function[update_message_dict, parameter[message_dict, action]]:
constant[
Update the g_ok_java_messages dict structure by
1. add the new java ignored messages stored in message_dict if action == 1
2. remove the java ignored messages stired in message_dict if action == 2.
Parameters
----------
message_dict : Python dict
key: unit test name or "general"
value: list of java messages that are to be ignored if they are found when running the test stored as the key. If
the key is "general", the list of java messages are to be ignored when running all tests.
action : int
if 1: add java ignored messages stored in message_dict to g_ok_java_messages dict;
if 2: remove java ignored messages stored in message_dict from g_ok_java_messages dict.
:return: none
]
<ast.Global object at 0x7da18dc99900>
variable[allKeys] assign[=] call[name[g_ok_java_messages].keys, parameter[]]
for taget[name[key]] in starred[call[name[message_dict].keys, parameter[]]] begin[:]
if compare[name[key] in name[allKeys]] begin[:]
for taget[name[message]] in starred[call[name[message_dict]][name[key]]] begin[:]
if compare[name[action] equal[==] constant[1]] begin[:]
if compare[name[message] <ast.NotIn object at 0x7da2590d7190> call[name[g_ok_java_messages]][name[key]]] begin[:]
call[call[name[g_ok_java_messages]][name[key]].append, parameter[name[message]]]
if compare[name[action] equal[==] constant[2]] begin[:]
if compare[name[message] in call[name[g_ok_java_messages]][name[key]]] begin[:]
call[call[name[g_ok_java_messages]][name[key]].remove, parameter[name[message]]] | keyword[def] identifier[update_message_dict] ( identifier[message_dict] , identifier[action] ):
literal[string]
keyword[global] identifier[g_ok_java_messages]
identifier[allKeys] = identifier[g_ok_java_messages] . identifier[keys] ()
keyword[for] identifier[key] keyword[in] identifier[message_dict] . identifier[keys] ():
keyword[if] identifier[key] keyword[in] identifier[allKeys] :
keyword[for] identifier[message] keyword[in] identifier[message_dict] [ identifier[key] ]:
keyword[if] identifier[action] == literal[int] :
keyword[if] identifier[message] keyword[not] keyword[in] identifier[g_ok_java_messages] [ identifier[key] ]:
identifier[g_ok_java_messages] [ identifier[key] ]. identifier[append] ( identifier[message] )
keyword[if] identifier[action] == literal[int] :
keyword[if] identifier[message] keyword[in] identifier[g_ok_java_messages] [ identifier[key] ]:
identifier[g_ok_java_messages] [ identifier[key] ]. identifier[remove] ( identifier[message] )
keyword[else] :
keyword[if] identifier[action] == literal[int] :
identifier[g_ok_java_messages] [ identifier[key] ]= identifier[message_dict] [ identifier[key] ] | def update_message_dict(message_dict, action):
"""
Update the g_ok_java_messages dict structure by
1. add the new java ignored messages stored in message_dict if action == 1
2. remove the java ignored messages stired in message_dict if action == 2.
Parameters
----------
message_dict : Python dict
key: unit test name or "general"
value: list of java messages that are to be ignored if they are found when running the test stored as the key. If
the key is "general", the list of java messages are to be ignored when running all tests.
action : int
if 1: add java ignored messages stored in message_dict to g_ok_java_messages dict;
if 2: remove java ignored messages stored in message_dict from g_ok_java_messages dict.
:return: none
"""
global g_ok_java_messages
allKeys = g_ok_java_messages.keys()
for key in message_dict.keys():
if key in allKeys: # key already exists, just add to it
for message in message_dict[key]:
if action == 1:
if message not in g_ok_java_messages[key]:
g_ok_java_messages[key].append(message) # depends on [control=['if'], data=['message']] # depends on [control=['if'], data=[]]
if action == 2:
if message in g_ok_java_messages[key]:
g_ok_java_messages[key].remove(message) # depends on [control=['if'], data=['message']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['message']] # depends on [control=['if'], data=['key']] # new key here. Can only add and cannot remove
elif action == 1:
g_ok_java_messages[key] = message_dict[key] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']] |
def get_long_description():
"""Extract description from README.md, for PyPI's usage"""
try:
fpath = os.path.join(os.path.dirname(__file__), "README.md")
with io.open(fpath, encoding="utf-8") as f:
readme = f.read()
desc = readme.partition("<!-- start_ppi_description -->")[2]
desc = desc.partition("<!-- stop_ppi_description -->")[0]
return desc.strip()
except IOError:
return None | def function[get_long_description, parameter[]]:
constant[Extract description from README.md, for PyPI's usage]
<ast.Try object at 0x7da1b21c73a0> | keyword[def] identifier[get_long_description] ():
literal[string]
keyword[try] :
identifier[fpath] = identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[__file__] ), literal[string] )
keyword[with] identifier[io] . identifier[open] ( identifier[fpath] , identifier[encoding] = literal[string] ) keyword[as] identifier[f] :
identifier[readme] = identifier[f] . identifier[read] ()
identifier[desc] = identifier[readme] . identifier[partition] ( literal[string] )[ literal[int] ]
identifier[desc] = identifier[desc] . identifier[partition] ( literal[string] )[ literal[int] ]
keyword[return] identifier[desc] . identifier[strip] ()
keyword[except] identifier[IOError] :
keyword[return] keyword[None] | def get_long_description():
"""Extract description from README.md, for PyPI's usage"""
try:
fpath = os.path.join(os.path.dirname(__file__), 'README.md')
with io.open(fpath, encoding='utf-8') as f:
readme = f.read()
desc = readme.partition('<!-- start_ppi_description -->')[2]
desc = desc.partition('<!-- stop_ppi_description -->')[0]
return desc.strip() # depends on [control=['with'], data=['f']] # depends on [control=['try'], data=[]]
except IOError:
return None # depends on [control=['except'], data=[]] |
def add_dynamic_gateway(self, networks):
"""
A dynamic gateway object creates a router object that is
attached to a DHCP interface. You can associate networks with
this gateway address to identify networks for routing on this
interface.
::
route = engine.routing.get(0)
route.add_dynamic_gateway([Network('mynetwork')])
:param list Network: list of network elements to add to
this DHCP gateway
:raises ModificationAborted: Change must be made at the interface level
:raises UpdateElementFailed: failure to update routing table
:return: Status of whether the route table was updated
:rtype: bool
"""
routing_node_gateway = RoutingNodeGateway(dynamic_classid='gateway',
destinations=networks or [])
return self._add_gateway_node('dynamic_netlink', routing_node_gateway) | def function[add_dynamic_gateway, parameter[self, networks]]:
constant[
A dynamic gateway object creates a router object that is
attached to a DHCP interface. You can associate networks with
this gateway address to identify networks for routing on this
interface.
::
route = engine.routing.get(0)
route.add_dynamic_gateway([Network('mynetwork')])
:param list Network: list of network elements to add to
this DHCP gateway
:raises ModificationAborted: Change must be made at the interface level
:raises UpdateElementFailed: failure to update routing table
:return: Status of whether the route table was updated
:rtype: bool
]
variable[routing_node_gateway] assign[=] call[name[RoutingNodeGateway], parameter[]]
return[call[name[self]._add_gateway_node, parameter[constant[dynamic_netlink], name[routing_node_gateway]]]] | keyword[def] identifier[add_dynamic_gateway] ( identifier[self] , identifier[networks] ):
literal[string]
identifier[routing_node_gateway] = identifier[RoutingNodeGateway] ( identifier[dynamic_classid] = literal[string] ,
identifier[destinations] = identifier[networks] keyword[or] [])
keyword[return] identifier[self] . identifier[_add_gateway_node] ( literal[string] , identifier[routing_node_gateway] ) | def add_dynamic_gateway(self, networks):
"""
A dynamic gateway object creates a router object that is
attached to a DHCP interface. You can associate networks with
this gateway address to identify networks for routing on this
interface.
::
route = engine.routing.get(0)
route.add_dynamic_gateway([Network('mynetwork')])
:param list Network: list of network elements to add to
this DHCP gateway
:raises ModificationAborted: Change must be made at the interface level
:raises UpdateElementFailed: failure to update routing table
:return: Status of whether the route table was updated
:rtype: bool
"""
routing_node_gateway = RoutingNodeGateway(dynamic_classid='gateway', destinations=networks or [])
return self._add_gateway_node('dynamic_netlink', routing_node_gateway) |
def get_value(self, field, quick):
# type: (Field, bool) -> Any
""" Ask user the question represented by this instance.
Args:
field (Field):
The field we're asking the user to provide the value for.
quick (bool):
Enable quick mode. In quick mode, the form will reduce the
number of question asked by using defaults wherever possible.
This can greatly reduce the number of interactions required on
the user part, but will obviously limit the user choices. This
should probably be enabled only by a specific user action
(like passing a ``--quick`` flag etc.).
Returns:
The user response converted to a python type using the
:py:attr:`cliform.core.Field.type` converter.
"""
if callable(field.default):
default = field.default(self)
else:
default = field.default
if quick and default is not None:
return default
shell.cprint('<90>{}', field.help)
while True:
try:
answer = click.prompt(field.pretty_prompt, default=default)
return field.type(answer)
except ValueError:
shell.cprint("<31>Unsupported value") | def function[get_value, parameter[self, field, quick]]:
constant[ Ask user the question represented by this instance.
Args:
field (Field):
The field we're asking the user to provide the value for.
quick (bool):
Enable quick mode. In quick mode, the form will reduce the
number of question asked by using defaults wherever possible.
This can greatly reduce the number of interactions required on
the user part, but will obviously limit the user choices. This
should probably be enabled only by a specific user action
(like passing a ``--quick`` flag etc.).
Returns:
The user response converted to a python type using the
:py:attr:`cliform.core.Field.type` converter.
]
if call[name[callable], parameter[name[field].default]] begin[:]
variable[default] assign[=] call[name[field].default, parameter[name[self]]]
if <ast.BoolOp object at 0x7da1b10a7d00> begin[:]
return[name[default]]
call[name[shell].cprint, parameter[constant[<90>{}], name[field].help]]
while constant[True] begin[:]
<ast.Try object at 0x7da1b10a4430> | keyword[def] identifier[get_value] ( identifier[self] , identifier[field] , identifier[quick] ):
literal[string]
keyword[if] identifier[callable] ( identifier[field] . identifier[default] ):
identifier[default] = identifier[field] . identifier[default] ( identifier[self] )
keyword[else] :
identifier[default] = identifier[field] . identifier[default]
keyword[if] identifier[quick] keyword[and] identifier[default] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[default]
identifier[shell] . identifier[cprint] ( literal[string] , identifier[field] . identifier[help] )
keyword[while] keyword[True] :
keyword[try] :
identifier[answer] = identifier[click] . identifier[prompt] ( identifier[field] . identifier[pretty_prompt] , identifier[default] = identifier[default] )
keyword[return] identifier[field] . identifier[type] ( identifier[answer] )
keyword[except] identifier[ValueError] :
identifier[shell] . identifier[cprint] ( literal[string] ) | def get_value(self, field, quick):
# type: (Field, bool) -> Any
" Ask user the question represented by this instance.\n\n Args:\n field (Field):\n The field we're asking the user to provide the value for.\n quick (bool):\n Enable quick mode. In quick mode, the form will reduce the\n number of question asked by using defaults wherever possible.\n This can greatly reduce the number of interactions required on\n the user part, but will obviously limit the user choices. This\n should probably be enabled only by a specific user action\n (like passing a ``--quick`` flag etc.).\n\n Returns:\n The user response converted to a python type using the\n :py:attr:`cliform.core.Field.type` converter.\n "
if callable(field.default):
default = field.default(self) # depends on [control=['if'], data=[]]
else:
default = field.default
if quick and default is not None:
return default # depends on [control=['if'], data=[]]
shell.cprint('<90>{}', field.help)
while True:
try:
answer = click.prompt(field.pretty_prompt, default=default)
return field.type(answer) # depends on [control=['try'], data=[]]
except ValueError:
shell.cprint('<31>Unsupported value') # depends on [control=['except'], data=[]] # depends on [control=['while'], data=[]] |
def read_stats(self):
""" Read current statistics from chassis.
:return: dictionary {stream: {tx: {stat name: stat value}} rx: {tpld: {stat group {stat name: value}}}}
"""
self.tx_statistics = TgnObjectsDict()
for port in self.session.ports.values():
for stream in port.streams.values():
self.tx_statistics[stream] = stream.read_stats()
tpld_statistics = XenaTpldsStats(self.session).read_stats()
self.statistics = TgnObjectsDict()
for stream, stream_stats in self.tx_statistics.items():
self.statistics[stream] = OrderedDict()
self.statistics[stream]['tx'] = stream_stats
self.statistics[stream]['rx'] = OrderedDict()
stream_tpld = stream.get_attribute('ps_tpldid')
for tpld, tpld_stats in tpld_statistics.items():
if tpld.id == stream_tpld:
self.statistics[stream]['rx'][tpld] = tpld_stats
return self.statistics | def function[read_stats, parameter[self]]:
constant[ Read current statistics from chassis.
:return: dictionary {stream: {tx: {stat name: stat value}} rx: {tpld: {stat group {stat name: value}}}}
]
name[self].tx_statistics assign[=] call[name[TgnObjectsDict], parameter[]]
for taget[name[port]] in starred[call[name[self].session.ports.values, parameter[]]] begin[:]
for taget[name[stream]] in starred[call[name[port].streams.values, parameter[]]] begin[:]
call[name[self].tx_statistics][name[stream]] assign[=] call[name[stream].read_stats, parameter[]]
variable[tpld_statistics] assign[=] call[call[name[XenaTpldsStats], parameter[name[self].session]].read_stats, parameter[]]
name[self].statistics assign[=] call[name[TgnObjectsDict], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da20c76d420>, <ast.Name object at 0x7da20c76d630>]]] in starred[call[name[self].tx_statistics.items, parameter[]]] begin[:]
call[name[self].statistics][name[stream]] assign[=] call[name[OrderedDict], parameter[]]
call[call[name[self].statistics][name[stream]]][constant[tx]] assign[=] name[stream_stats]
call[call[name[self].statistics][name[stream]]][constant[rx]] assign[=] call[name[OrderedDict], parameter[]]
variable[stream_tpld] assign[=] call[name[stream].get_attribute, parameter[constant[ps_tpldid]]]
for taget[tuple[[<ast.Name object at 0x7da1b10424a0>, <ast.Name object at 0x7da1b1042a40>]]] in starred[call[name[tpld_statistics].items, parameter[]]] begin[:]
if compare[name[tpld].id equal[==] name[stream_tpld]] begin[:]
call[call[call[name[self].statistics][name[stream]]][constant[rx]]][name[tpld]] assign[=] name[tpld_stats]
return[name[self].statistics] | keyword[def] identifier[read_stats] ( identifier[self] ):
literal[string]
identifier[self] . identifier[tx_statistics] = identifier[TgnObjectsDict] ()
keyword[for] identifier[port] keyword[in] identifier[self] . identifier[session] . identifier[ports] . identifier[values] ():
keyword[for] identifier[stream] keyword[in] identifier[port] . identifier[streams] . identifier[values] ():
identifier[self] . identifier[tx_statistics] [ identifier[stream] ]= identifier[stream] . identifier[read_stats] ()
identifier[tpld_statistics] = identifier[XenaTpldsStats] ( identifier[self] . identifier[session] ). identifier[read_stats] ()
identifier[self] . identifier[statistics] = identifier[TgnObjectsDict] ()
keyword[for] identifier[stream] , identifier[stream_stats] keyword[in] identifier[self] . identifier[tx_statistics] . identifier[items] ():
identifier[self] . identifier[statistics] [ identifier[stream] ]= identifier[OrderedDict] ()
identifier[self] . identifier[statistics] [ identifier[stream] ][ literal[string] ]= identifier[stream_stats]
identifier[self] . identifier[statistics] [ identifier[stream] ][ literal[string] ]= identifier[OrderedDict] ()
identifier[stream_tpld] = identifier[stream] . identifier[get_attribute] ( literal[string] )
keyword[for] identifier[tpld] , identifier[tpld_stats] keyword[in] identifier[tpld_statistics] . identifier[items] ():
keyword[if] identifier[tpld] . identifier[id] == identifier[stream_tpld] :
identifier[self] . identifier[statistics] [ identifier[stream] ][ literal[string] ][ identifier[tpld] ]= identifier[tpld_stats]
keyword[return] identifier[self] . identifier[statistics] | def read_stats(self):
""" Read current statistics from chassis.
:return: dictionary {stream: {tx: {stat name: stat value}} rx: {tpld: {stat group {stat name: value}}}}
"""
self.tx_statistics = TgnObjectsDict()
for port in self.session.ports.values():
for stream in port.streams.values():
self.tx_statistics[stream] = stream.read_stats() # depends on [control=['for'], data=['stream']] # depends on [control=['for'], data=['port']]
tpld_statistics = XenaTpldsStats(self.session).read_stats()
self.statistics = TgnObjectsDict()
for (stream, stream_stats) in self.tx_statistics.items():
self.statistics[stream] = OrderedDict()
self.statistics[stream]['tx'] = stream_stats
self.statistics[stream]['rx'] = OrderedDict()
stream_tpld = stream.get_attribute('ps_tpldid')
for (tpld, tpld_stats) in tpld_statistics.items():
if tpld.id == stream_tpld:
self.statistics[stream]['rx'][tpld] = tpld_stats # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
return self.statistics |
def get_contents_sig(self):
"""
A helper method for get_cachedir_bsig.
It computes and returns the signature for this
node's contents.
"""
try:
return self.contentsig
except AttributeError:
pass
executor = self.get_executor()
result = self.contentsig = SCons.Util.MD5signature(executor.get_contents())
return result | def function[get_contents_sig, parameter[self]]:
constant[
A helper method for get_cachedir_bsig.
It computes and returns the signature for this
node's contents.
]
<ast.Try object at 0x7da2041da830>
variable[executor] assign[=] call[name[self].get_executor, parameter[]]
variable[result] assign[=] call[name[SCons].Util.MD5signature, parameter[call[name[executor].get_contents, parameter[]]]]
return[name[result]] | keyword[def] identifier[get_contents_sig] ( identifier[self] ):
literal[string]
keyword[try] :
keyword[return] identifier[self] . identifier[contentsig]
keyword[except] identifier[AttributeError] :
keyword[pass]
identifier[executor] = identifier[self] . identifier[get_executor] ()
identifier[result] = identifier[self] . identifier[contentsig] = identifier[SCons] . identifier[Util] . identifier[MD5signature] ( identifier[executor] . identifier[get_contents] ())
keyword[return] identifier[result] | def get_contents_sig(self):
"""
A helper method for get_cachedir_bsig.
It computes and returns the signature for this
node's contents.
"""
try:
return self.contentsig # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]]
executor = self.get_executor()
result = self.contentsig = SCons.Util.MD5signature(executor.get_contents())
return result |
def generate_warning_text(self):
"""
generates warnings for the current specimen then adds them to the
current warning text for the GUI which will be rendered on a call to
update_warning_box.
"""
self.warning_text = ""
if self.s in list(self.pmag_results_data['specimens'].keys()):
for fit in self.pmag_results_data['specimens'][self.s]:
beg_pca, end_pca = self.get_indices(
fit, fit.tmin, fit.tmax, self.s)
if beg_pca == None or end_pca == None:
self.warning_text += "%s to %s are invalid bounds, to fit %s.\n" % (
fit.tmin, fit.tmax, fit.name)
elif end_pca - beg_pca < 2:
self.warning_text += "there are not enough points between %s to %s, on fit %s.\n" % (
fit.tmin, fit.tmax, fit.name)
else:
check_duplicates = []
warning_issued = [] # keep track of warnings issued to avoid redundant warnings
# if within range, attempt to go one additional step beyond
# tmax so that duplicates at the upper bound are caught
if (end_pca + 2) < len(self.Data[self.s]['zijdblock_steps']):
check_endpoint = end_pca + 2
else:
check_endpoint = end_pca + 1
for s, f in zip(self.Data[self.s]['zijdblock_steps'][beg_pca:check_endpoint],
self.Data[self.s]['measurement_flag'][beg_pca:check_endpoint]):
if f == 'g' and [s, 'g'] in check_duplicates:
if s == fit.tmin and s not in warning_issued:
self.warning_text += ("There are multiple good %s " +
"steps at the upper bound of Fit %s. The first " +
"measurement will be used as the lower bound.\n") % (
s, fit.name)
# warning_issued_low.append(s)
warning_issued.append(s)
elif s == fit.tmax and s not in warning_issued:
self.warning_text += ("There are multiple good %s " +
"steps at the upper bound of Fit %s. The first " +
"measurement will be used as the upper bound.\n") % (
s, fit.name)
# warning_issued_high.append(s)
warning_issued.append(s)
elif s not in warning_issued:
self.warning_text += ("Within Fit %s, there are " +
"multiple good measurements at the %s step. All " +
"good measurements are included in the fit.\n") % (
fit.name, s)
warning_issued.append(s)
else:
pass
else:
check_duplicates.append([s, f])
if self.s in list(self.Data.keys()):
if not self.Data[self.s]['zijdblock_geo']:
self.warning_text += "There is no geographic data for this specimen.\n"
if not self.Data[self.s]['zijdblock_tilt']:
self.warning_text += "There is no tilt-corrected data for this specimen.\n" | def function[generate_warning_text, parameter[self]]:
constant[
generates warnings for the current specimen then adds them to the
current warning text for the GUI which will be rendered on a call to
update_warning_box.
]
name[self].warning_text assign[=] constant[]
if compare[name[self].s in call[name[list], parameter[call[call[name[self].pmag_results_data][constant[specimens]].keys, parameter[]]]]] begin[:]
for taget[name[fit]] in starred[call[call[name[self].pmag_results_data][constant[specimens]]][name[self].s]] begin[:]
<ast.Tuple object at 0x7da2044c31f0> assign[=] call[name[self].get_indices, parameter[name[fit], name[fit].tmin, name[fit].tmax, name[self].s]]
if <ast.BoolOp object at 0x7da2044c0a90> begin[:]
<ast.AugAssign object at 0x7da2044c3040>
if compare[name[self].s in call[name[list], parameter[call[name[self].Data.keys, parameter[]]]]] begin[:]
if <ast.UnaryOp object at 0x7da2044c2d40> begin[:]
<ast.AugAssign object at 0x7da2044c39a0>
if <ast.UnaryOp object at 0x7da2044c0820> begin[:]
<ast.AugAssign object at 0x7da2044c1cf0> | keyword[def] identifier[generate_warning_text] ( identifier[self] ):
literal[string]
identifier[self] . identifier[warning_text] = literal[string]
keyword[if] identifier[self] . identifier[s] keyword[in] identifier[list] ( identifier[self] . identifier[pmag_results_data] [ literal[string] ]. identifier[keys] ()):
keyword[for] identifier[fit] keyword[in] identifier[self] . identifier[pmag_results_data] [ literal[string] ][ identifier[self] . identifier[s] ]:
identifier[beg_pca] , identifier[end_pca] = identifier[self] . identifier[get_indices] (
identifier[fit] , identifier[fit] . identifier[tmin] , identifier[fit] . identifier[tmax] , identifier[self] . identifier[s] )
keyword[if] identifier[beg_pca] == keyword[None] keyword[or] identifier[end_pca] == keyword[None] :
identifier[self] . identifier[warning_text] += literal[string] %(
identifier[fit] . identifier[tmin] , identifier[fit] . identifier[tmax] , identifier[fit] . identifier[name] )
keyword[elif] identifier[end_pca] - identifier[beg_pca] < literal[int] :
identifier[self] . identifier[warning_text] += literal[string] %(
identifier[fit] . identifier[tmin] , identifier[fit] . identifier[tmax] , identifier[fit] . identifier[name] )
keyword[else] :
identifier[check_duplicates] =[]
identifier[warning_issued] =[]
keyword[if] ( identifier[end_pca] + literal[int] )< identifier[len] ( identifier[self] . identifier[Data] [ identifier[self] . identifier[s] ][ literal[string] ]):
identifier[check_endpoint] = identifier[end_pca] + literal[int]
keyword[else] :
identifier[check_endpoint] = identifier[end_pca] + literal[int]
keyword[for] identifier[s] , identifier[f] keyword[in] identifier[zip] ( identifier[self] . identifier[Data] [ identifier[self] . identifier[s] ][ literal[string] ][ identifier[beg_pca] : identifier[check_endpoint] ],
identifier[self] . identifier[Data] [ identifier[self] . identifier[s] ][ literal[string] ][ identifier[beg_pca] : identifier[check_endpoint] ]):
keyword[if] identifier[f] == literal[string] keyword[and] [ identifier[s] , literal[string] ] keyword[in] identifier[check_duplicates] :
keyword[if] identifier[s] == identifier[fit] . identifier[tmin] keyword[and] identifier[s] keyword[not] keyword[in] identifier[warning_issued] :
identifier[self] . identifier[warning_text] +=( literal[string] +
literal[string] +
literal[string] )%(
identifier[s] , identifier[fit] . identifier[name] )
identifier[warning_issued] . identifier[append] ( identifier[s] )
keyword[elif] identifier[s] == identifier[fit] . identifier[tmax] keyword[and] identifier[s] keyword[not] keyword[in] identifier[warning_issued] :
identifier[self] . identifier[warning_text] +=( literal[string] +
literal[string] +
literal[string] )%(
identifier[s] , identifier[fit] . identifier[name] )
identifier[warning_issued] . identifier[append] ( identifier[s] )
keyword[elif] identifier[s] keyword[not] keyword[in] identifier[warning_issued] :
identifier[self] . identifier[warning_text] +=( literal[string] +
literal[string] +
literal[string] )%(
identifier[fit] . identifier[name] , identifier[s] )
identifier[warning_issued] . identifier[append] ( identifier[s] )
keyword[else] :
keyword[pass]
keyword[else] :
identifier[check_duplicates] . identifier[append] ([ identifier[s] , identifier[f] ])
keyword[if] identifier[self] . identifier[s] keyword[in] identifier[list] ( identifier[self] . identifier[Data] . identifier[keys] ()):
keyword[if] keyword[not] identifier[self] . identifier[Data] [ identifier[self] . identifier[s] ][ literal[string] ]:
identifier[self] . identifier[warning_text] += literal[string]
keyword[if] keyword[not] identifier[self] . identifier[Data] [ identifier[self] . identifier[s] ][ literal[string] ]:
identifier[self] . identifier[warning_text] += literal[string] | def generate_warning_text(self):
"""
generates warnings for the current specimen then adds them to the
current warning text for the GUI which will be rendered on a call to
update_warning_box.
"""
self.warning_text = ''
if self.s in list(self.pmag_results_data['specimens'].keys()):
for fit in self.pmag_results_data['specimens'][self.s]:
(beg_pca, end_pca) = self.get_indices(fit, fit.tmin, fit.tmax, self.s)
if beg_pca == None or end_pca == None:
self.warning_text += '%s to %s are invalid bounds, to fit %s.\n' % (fit.tmin, fit.tmax, fit.name) # depends on [control=['if'], data=[]]
elif end_pca - beg_pca < 2:
self.warning_text += 'there are not enough points between %s to %s, on fit %s.\n' % (fit.tmin, fit.tmax, fit.name) # depends on [control=['if'], data=[]]
else:
check_duplicates = []
warning_issued = [] # keep track of warnings issued to avoid redundant warnings
# if within range, attempt to go one additional step beyond
# tmax so that duplicates at the upper bound are caught
if end_pca + 2 < len(self.Data[self.s]['zijdblock_steps']):
check_endpoint = end_pca + 2 # depends on [control=['if'], data=[]]
else:
check_endpoint = end_pca + 1
for (s, f) in zip(self.Data[self.s]['zijdblock_steps'][beg_pca:check_endpoint], self.Data[self.s]['measurement_flag'][beg_pca:check_endpoint]):
if f == 'g' and [s, 'g'] in check_duplicates:
if s == fit.tmin and s not in warning_issued:
self.warning_text += ('There are multiple good %s ' + 'steps at the upper bound of Fit %s. The first ' + 'measurement will be used as the lower bound.\n') % (s, fit.name)
# warning_issued_low.append(s)
warning_issued.append(s) # depends on [control=['if'], data=[]]
elif s == fit.tmax and s not in warning_issued:
self.warning_text += ('There are multiple good %s ' + 'steps at the upper bound of Fit %s. The first ' + 'measurement will be used as the upper bound.\n') % (s, fit.name)
# warning_issued_high.append(s)
warning_issued.append(s) # depends on [control=['if'], data=[]]
elif s not in warning_issued:
self.warning_text += ('Within Fit %s, there are ' + 'multiple good measurements at the %s step. All ' + 'good measurements are included in the fit.\n') % (fit.name, s)
warning_issued.append(s) # depends on [control=['if'], data=['s', 'warning_issued']]
else:
pass # depends on [control=['if'], data=[]]
else:
check_duplicates.append([s, f]) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['fit']] # depends on [control=['if'], data=[]]
if self.s in list(self.Data.keys()):
if not self.Data[self.s]['zijdblock_geo']:
self.warning_text += 'There is no geographic data for this specimen.\n' # depends on [control=['if'], data=[]]
if not self.Data[self.s]['zijdblock_tilt']:
self.warning_text += 'There is no tilt-corrected data for this specimen.\n' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def in_collision_other(self, other_manager,
return_names=False, return_data=False):
"""
Check if any object from this manager collides with any object
from another manager.
Parameters
-------------------
other_manager : CollisionManager
Another collision manager object
return_names : bool
If true, a set is returned containing the names
of all pairs of objects in collision.
return_data : bool
If true, a list of ContactData is returned as well
Returns
-------------
is_collision : bool
True if a collision occurred between any pair of objects
and False otherwise
names : set of 2-tup
The set of pairwise collisions. Each tuple
contains two names (first from this manager,
second from the other_manager) indicating
that the two corresponding objects are in collision.
contacts : list of ContactData
All contacts detected
"""
cdata = fcl.CollisionData()
if return_names or return_data:
cdata = fcl.CollisionData(
request=fcl.CollisionRequest(
num_max_contacts=100000,
enable_contact=True))
self._manager.collide(other_manager._manager,
cdata,
fcl.defaultCollisionCallback)
result = cdata.result.is_collision
objs_in_collision = set()
contact_data = []
if return_names or return_data:
for contact in cdata.result.contacts:
reverse = False
names = (self._extract_name(contact.o1),
other_manager._extract_name(contact.o2))
if names[0] is None:
names = (self._extract_name(contact.o2),
other_manager._extract_name(contact.o1))
reverse = True
if return_names:
objs_in_collision.add(names)
if return_data:
if reverse:
names = reversed(names)
contact_data.append(ContactData(names, contact))
if return_names and return_data:
return result, objs_in_collision, contact_data
elif return_names:
return result, objs_in_collision
elif return_data:
return result, contact_data
else:
return result | def function[in_collision_other, parameter[self, other_manager, return_names, return_data]]:
constant[
Check if any object from this manager collides with any object
from another manager.
Parameters
-------------------
other_manager : CollisionManager
Another collision manager object
return_names : bool
If true, a set is returned containing the names
of all pairs of objects in collision.
return_data : bool
If true, a list of ContactData is returned as well
Returns
-------------
is_collision : bool
True if a collision occurred between any pair of objects
and False otherwise
names : set of 2-tup
The set of pairwise collisions. Each tuple
contains two names (first from this manager,
second from the other_manager) indicating
that the two corresponding objects are in collision.
contacts : list of ContactData
All contacts detected
]
variable[cdata] assign[=] call[name[fcl].CollisionData, parameter[]]
if <ast.BoolOp object at 0x7da2044c1540> begin[:]
variable[cdata] assign[=] call[name[fcl].CollisionData, parameter[]]
call[name[self]._manager.collide, parameter[name[other_manager]._manager, name[cdata], name[fcl].defaultCollisionCallback]]
variable[result] assign[=] name[cdata].result.is_collision
variable[objs_in_collision] assign[=] call[name[set], parameter[]]
variable[contact_data] assign[=] list[[]]
if <ast.BoolOp object at 0x7da2044c1930> begin[:]
for taget[name[contact]] in starred[name[cdata].result.contacts] begin[:]
variable[reverse] assign[=] constant[False]
variable[names] assign[=] tuple[[<ast.Call object at 0x7da1b23e5ba0>, <ast.Call object at 0x7da1b23e5f30>]]
if compare[call[name[names]][constant[0]] is constant[None]] begin[:]
variable[names] assign[=] tuple[[<ast.Call object at 0x7da2054a5e70>, <ast.Call object at 0x7da2054a7fa0>]]
variable[reverse] assign[=] constant[True]
if name[return_names] begin[:]
call[name[objs_in_collision].add, parameter[name[names]]]
if name[return_data] begin[:]
if name[reverse] begin[:]
variable[names] assign[=] call[name[reversed], parameter[name[names]]]
call[name[contact_data].append, parameter[call[name[ContactData], parameter[name[names], name[contact]]]]]
if <ast.BoolOp object at 0x7da2054a4460> begin[:]
return[tuple[[<ast.Name object at 0x7da2054a5b40>, <ast.Name object at 0x7da2054a7070>, <ast.Name object at 0x7da2054a52d0>]]] | keyword[def] identifier[in_collision_other] ( identifier[self] , identifier[other_manager] ,
identifier[return_names] = keyword[False] , identifier[return_data] = keyword[False] ):
literal[string]
identifier[cdata] = identifier[fcl] . identifier[CollisionData] ()
keyword[if] identifier[return_names] keyword[or] identifier[return_data] :
identifier[cdata] = identifier[fcl] . identifier[CollisionData] (
identifier[request] = identifier[fcl] . identifier[CollisionRequest] (
identifier[num_max_contacts] = literal[int] ,
identifier[enable_contact] = keyword[True] ))
identifier[self] . identifier[_manager] . identifier[collide] ( identifier[other_manager] . identifier[_manager] ,
identifier[cdata] ,
identifier[fcl] . identifier[defaultCollisionCallback] )
identifier[result] = identifier[cdata] . identifier[result] . identifier[is_collision]
identifier[objs_in_collision] = identifier[set] ()
identifier[contact_data] =[]
keyword[if] identifier[return_names] keyword[or] identifier[return_data] :
keyword[for] identifier[contact] keyword[in] identifier[cdata] . identifier[result] . identifier[contacts] :
identifier[reverse] = keyword[False]
identifier[names] =( identifier[self] . identifier[_extract_name] ( identifier[contact] . identifier[o1] ),
identifier[other_manager] . identifier[_extract_name] ( identifier[contact] . identifier[o2] ))
keyword[if] identifier[names] [ literal[int] ] keyword[is] keyword[None] :
identifier[names] =( identifier[self] . identifier[_extract_name] ( identifier[contact] . identifier[o2] ),
identifier[other_manager] . identifier[_extract_name] ( identifier[contact] . identifier[o1] ))
identifier[reverse] = keyword[True]
keyword[if] identifier[return_names] :
identifier[objs_in_collision] . identifier[add] ( identifier[names] )
keyword[if] identifier[return_data] :
keyword[if] identifier[reverse] :
identifier[names] = identifier[reversed] ( identifier[names] )
identifier[contact_data] . identifier[append] ( identifier[ContactData] ( identifier[names] , identifier[contact] ))
keyword[if] identifier[return_names] keyword[and] identifier[return_data] :
keyword[return] identifier[result] , identifier[objs_in_collision] , identifier[contact_data]
keyword[elif] identifier[return_names] :
keyword[return] identifier[result] , identifier[objs_in_collision]
keyword[elif] identifier[return_data] :
keyword[return] identifier[result] , identifier[contact_data]
keyword[else] :
keyword[return] identifier[result] | def in_collision_other(self, other_manager, return_names=False, return_data=False):
"""
Check if any object from this manager collides with any object
from another manager.
Parameters
-------------------
other_manager : CollisionManager
Another collision manager object
return_names : bool
If true, a set is returned containing the names
of all pairs of objects in collision.
return_data : bool
If true, a list of ContactData is returned as well
Returns
-------------
is_collision : bool
True if a collision occurred between any pair of objects
and False otherwise
names : set of 2-tup
The set of pairwise collisions. Each tuple
contains two names (first from this manager,
second from the other_manager) indicating
that the two corresponding objects are in collision.
contacts : list of ContactData
All contacts detected
"""
cdata = fcl.CollisionData()
if return_names or return_data:
cdata = fcl.CollisionData(request=fcl.CollisionRequest(num_max_contacts=100000, enable_contact=True)) # depends on [control=['if'], data=[]]
self._manager.collide(other_manager._manager, cdata, fcl.defaultCollisionCallback)
result = cdata.result.is_collision
objs_in_collision = set()
contact_data = []
if return_names or return_data:
for contact in cdata.result.contacts:
reverse = False
names = (self._extract_name(contact.o1), other_manager._extract_name(contact.o2))
if names[0] is None:
names = (self._extract_name(contact.o2), other_manager._extract_name(contact.o1))
reverse = True # depends on [control=['if'], data=[]]
if return_names:
objs_in_collision.add(names) # depends on [control=['if'], data=[]]
if return_data:
if reverse:
names = reversed(names) # depends on [control=['if'], data=[]]
contact_data.append(ContactData(names, contact)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['contact']] # depends on [control=['if'], data=[]]
if return_names and return_data:
return (result, objs_in_collision, contact_data) # depends on [control=['if'], data=[]]
elif return_names:
return (result, objs_in_collision) # depends on [control=['if'], data=[]]
elif return_data:
return (result, contact_data) # depends on [control=['if'], data=[]]
else:
return result |
def _dsp2dot_option(arg):
"""Used to convert the :dmap: option to auto directives."""
# noinspection PyUnusedLocal
def map_args(*args, **kwargs):
from schedula.utils.base import Base
a = inspect.signature(Base.plot).bind(None, *args, **kwargs).arguments
a.popitem(last=False)
return a
kw = eval('map_args(%s)' % arg)
return kw if kw else PLOT | def function[_dsp2dot_option, parameter[arg]]:
constant[Used to convert the :dmap: option to auto directives.]
def function[map_args, parameter[]]:
from relative_module[schedula.utils.base] import module[Base]
variable[a] assign[=] call[call[name[inspect].signature, parameter[name[Base].plot]].bind, parameter[constant[None], <ast.Starred object at 0x7da18c4cc400>]].arguments
call[name[a].popitem, parameter[]]
return[name[a]]
variable[kw] assign[=] call[name[eval], parameter[binary_operation[constant[map_args(%s)] <ast.Mod object at 0x7da2590d6920> name[arg]]]]
return[<ast.IfExp object at 0x7da18f09db70>] | keyword[def] identifier[_dsp2dot_option] ( identifier[arg] ):
literal[string]
keyword[def] identifier[map_args] (* identifier[args] ,** identifier[kwargs] ):
keyword[from] identifier[schedula] . identifier[utils] . identifier[base] keyword[import] identifier[Base]
identifier[a] = identifier[inspect] . identifier[signature] ( identifier[Base] . identifier[plot] ). identifier[bind] ( keyword[None] ,* identifier[args] ,** identifier[kwargs] ). identifier[arguments]
identifier[a] . identifier[popitem] ( identifier[last] = keyword[False] )
keyword[return] identifier[a]
identifier[kw] = identifier[eval] ( literal[string] % identifier[arg] )
keyword[return] identifier[kw] keyword[if] identifier[kw] keyword[else] identifier[PLOT] | def _dsp2dot_option(arg):
"""Used to convert the :dmap: option to auto directives."""
# noinspection PyUnusedLocal
def map_args(*args, **kwargs):
from schedula.utils.base import Base
a = inspect.signature(Base.plot).bind(None, *args, **kwargs).arguments
a.popitem(last=False)
return a
kw = eval('map_args(%s)' % arg)
return kw if kw else PLOT |
def start_datetime(self) -> Optional[datetime.datetime]:
"""
Returns the start date of the set of intervals, or ``None`` if empty.
"""
if not self.intervals:
return None
return self.intervals[0].start | def function[start_datetime, parameter[self]]:
constant[
Returns the start date of the set of intervals, or ``None`` if empty.
]
if <ast.UnaryOp object at 0x7da1b18e6650> begin[:]
return[constant[None]]
return[call[name[self].intervals][constant[0]].start] | keyword[def] identifier[start_datetime] ( identifier[self] )-> identifier[Optional] [ identifier[datetime] . identifier[datetime] ]:
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[intervals] :
keyword[return] keyword[None]
keyword[return] identifier[self] . identifier[intervals] [ literal[int] ]. identifier[start] | def start_datetime(self) -> Optional[datetime.datetime]:
"""
Returns the start date of the set of intervals, or ``None`` if empty.
"""
if not self.intervals:
return None # depends on [control=['if'], data=[]]
return self.intervals[0].start |
def delete_api_key(self, api_key, **kwargs): # noqa: E501
"""Delete API key. # noqa: E501
An endpoint for deleting the API key. **Example usage:** `curl -X DELETE https://api.us-east-1.mbedcloud.com/v3/api-keys/{apikey-id} -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.delete_api_key(api_key, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str api_key: The ID of the API key to be deleted. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.delete_api_key_with_http_info(api_key, **kwargs) # noqa: E501
else:
(data) = self.delete_api_key_with_http_info(api_key, **kwargs) # noqa: E501
return data | def function[delete_api_key, parameter[self, api_key]]:
constant[Delete API key. # noqa: E501
An endpoint for deleting the API key. **Example usage:** `curl -X DELETE https://api.us-east-1.mbedcloud.com/v3/api-keys/{apikey-id} -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.delete_api_key(api_key, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str api_key: The ID of the API key to be deleted. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[asynchronous]]] begin[:]
return[call[name[self].delete_api_key_with_http_info, parameter[name[api_key]]]] | keyword[def] identifier[delete_api_key] ( identifier[self] , identifier[api_key] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[delete_api_key_with_http_info] ( identifier[api_key] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[delete_api_key_with_http_info] ( identifier[api_key] ,** identifier[kwargs] )
keyword[return] identifier[data] | def delete_api_key(self, api_key, **kwargs): # noqa: E501
"Delete API key. # noqa: E501\n\n An endpoint for deleting the API key. **Example usage:** `curl -X DELETE https://api.us-east-1.mbedcloud.com/v3/api-keys/{apikey-id} -H 'Authorization: Bearer API_KEY'` # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass asynchronous=True\n >>> thread = api.delete_api_key(api_key, asynchronous=True)\n >>> result = thread.get()\n\n :param asynchronous bool\n :param str api_key: The ID of the API key to be deleted. (required)\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n "
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.delete_api_key_with_http_info(api_key, **kwargs) # noqa: E501 # depends on [control=['if'], data=[]]
else:
data = self.delete_api_key_with_http_info(api_key, **kwargs) # noqa: E501
return data |
def addParts(self, part_id, parent_id, part_relationship=None):
"""
This will add a has_part (or subproperty) relationship between
a parent_id and the supplied part.
By default the relationship will be BFO:has_part,
but any relationship could be given here.
:param part_id:
:param parent_id:
:param part_relationship:
:return:
"""
if part_relationship is None:
part_relationship = self.globaltt['has_part']
# Fail loudly if parent or child identifiers are None
if parent_id is None:
raise TypeError('Attempt to pass None as parent')
elif part_id is None:
raise TypeError('Attempt to pass None as child')
elif part_relationship is None:
part_relationship = self.globaltt['has_part']
self.graph.addTriple(parent_id, part_relationship, part_id)
return | def function[addParts, parameter[self, part_id, parent_id, part_relationship]]:
constant[
This will add a has_part (or subproperty) relationship between
a parent_id and the supplied part.
By default the relationship will be BFO:has_part,
but any relationship could be given here.
:param part_id:
:param parent_id:
:param part_relationship:
:return:
]
if compare[name[part_relationship] is constant[None]] begin[:]
variable[part_relationship] assign[=] call[name[self].globaltt][constant[has_part]]
if compare[name[parent_id] is constant[None]] begin[:]
<ast.Raise object at 0x7da18dc04310>
call[name[self].graph.addTriple, parameter[name[parent_id], name[part_relationship], name[part_id]]]
return[None] | keyword[def] identifier[addParts] ( identifier[self] , identifier[part_id] , identifier[parent_id] , identifier[part_relationship] = keyword[None] ):
literal[string]
keyword[if] identifier[part_relationship] keyword[is] keyword[None] :
identifier[part_relationship] = identifier[self] . identifier[globaltt] [ literal[string] ]
keyword[if] identifier[parent_id] keyword[is] keyword[None] :
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[elif] identifier[part_id] keyword[is] keyword[None] :
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[elif] identifier[part_relationship] keyword[is] keyword[None] :
identifier[part_relationship] = identifier[self] . identifier[globaltt] [ literal[string] ]
identifier[self] . identifier[graph] . identifier[addTriple] ( identifier[parent_id] , identifier[part_relationship] , identifier[part_id] )
keyword[return] | def addParts(self, part_id, parent_id, part_relationship=None):
"""
This will add a has_part (or subproperty) relationship between
a parent_id and the supplied part.
By default the relationship will be BFO:has_part,
but any relationship could be given here.
:param part_id:
:param parent_id:
:param part_relationship:
:return:
"""
if part_relationship is None:
part_relationship = self.globaltt['has_part'] # depends on [control=['if'], data=['part_relationship']]
# Fail loudly if parent or child identifiers are None
if parent_id is None:
raise TypeError('Attempt to pass None as parent') # depends on [control=['if'], data=[]]
elif part_id is None:
raise TypeError('Attempt to pass None as child') # depends on [control=['if'], data=[]]
elif part_relationship is None:
part_relationship = self.globaltt['has_part'] # depends on [control=['if'], data=['part_relationship']]
self.graph.addTriple(parent_id, part_relationship, part_id)
return |
def get_old_sha(diff_part):
"""
Returns the SHA for the original file that was changed in a diff part.
"""
r = re.compile(r'index ([a-fA-F\d]*)')
return r.search(diff_part).groups()[0] | def function[get_old_sha, parameter[diff_part]]:
constant[
Returns the SHA for the original file that was changed in a diff part.
]
variable[r] assign[=] call[name[re].compile, parameter[constant[index ([a-fA-F\d]*)]]]
return[call[call[call[name[r].search, parameter[name[diff_part]]].groups, parameter[]]][constant[0]]] | keyword[def] identifier[get_old_sha] ( identifier[diff_part] ):
literal[string]
identifier[r] = identifier[re] . identifier[compile] ( literal[string] )
keyword[return] identifier[r] . identifier[search] ( identifier[diff_part] ). identifier[groups] ()[ literal[int] ] | def get_old_sha(diff_part):
"""
Returns the SHA for the original file that was changed in a diff part.
"""
r = re.compile('index ([a-fA-F\\d]*)')
return r.search(diff_part).groups()[0] |
def on_electrode_states_set(self, states):
'''
Render and draw updated **static** electrode actuations layer on
canvas.
'''
if (self.canvas_slave.electrode_states
.equals(states['electrode_states'])):
return
self.canvas_slave.electrode_states = states['electrode_states']
surface = self.canvas_slave.render_static_electrode_state_shapes()
self.canvas_slave.set_surface('static_electrode_state_shapes', surface)
self.canvas_slave.cairo_surface = flatten_surfaces(self.canvas_slave
.df_surfaces)
gobject.idle_add(self.canvas_slave.draw) | def function[on_electrode_states_set, parameter[self, states]]:
constant[
Render and draw updated **static** electrode actuations layer on
canvas.
]
if call[name[self].canvas_slave.electrode_states.equals, parameter[call[name[states]][constant[electrode_states]]]] begin[:]
return[None]
name[self].canvas_slave.electrode_states assign[=] call[name[states]][constant[electrode_states]]
variable[surface] assign[=] call[name[self].canvas_slave.render_static_electrode_state_shapes, parameter[]]
call[name[self].canvas_slave.set_surface, parameter[constant[static_electrode_state_shapes], name[surface]]]
name[self].canvas_slave.cairo_surface assign[=] call[name[flatten_surfaces], parameter[name[self].canvas_slave.df_surfaces]]
call[name[gobject].idle_add, parameter[name[self].canvas_slave.draw]] | keyword[def] identifier[on_electrode_states_set] ( identifier[self] , identifier[states] ):
literal[string]
keyword[if] ( identifier[self] . identifier[canvas_slave] . identifier[electrode_states]
. identifier[equals] ( identifier[states] [ literal[string] ])):
keyword[return]
identifier[self] . identifier[canvas_slave] . identifier[electrode_states] = identifier[states] [ literal[string] ]
identifier[surface] = identifier[self] . identifier[canvas_slave] . identifier[render_static_electrode_state_shapes] ()
identifier[self] . identifier[canvas_slave] . identifier[set_surface] ( literal[string] , identifier[surface] )
identifier[self] . identifier[canvas_slave] . identifier[cairo_surface] = identifier[flatten_surfaces] ( identifier[self] . identifier[canvas_slave]
. identifier[df_surfaces] )
identifier[gobject] . identifier[idle_add] ( identifier[self] . identifier[canvas_slave] . identifier[draw] ) | def on_electrode_states_set(self, states):
"""
Render and draw updated **static** electrode actuations layer on
canvas.
"""
if self.canvas_slave.electrode_states.equals(states['electrode_states']):
return # depends on [control=['if'], data=[]]
self.canvas_slave.electrode_states = states['electrode_states']
surface = self.canvas_slave.render_static_electrode_state_shapes()
self.canvas_slave.set_surface('static_electrode_state_shapes', surface)
self.canvas_slave.cairo_surface = flatten_surfaces(self.canvas_slave.df_surfaces)
gobject.idle_add(self.canvas_slave.draw) |
def get_nowait(self):
"""Returns a value from the queue without waiting.
Raises ``QueueEmpty`` if no values are available right now.
"""
new_get = Future()
with self._lock:
if not self._get.done():
raise QueueEmpty
get, self._get = self._get, new_get
hole = get.result()
if not hole.done():
# Restore the unfinished hole.
new_get.set_result(hole)
raise QueueEmpty
node = hole.result()
value = node.value
new_hole, node.next = node.next, None
new_get.set_result(new_hole)
return value | def function[get_nowait, parameter[self]]:
constant[Returns a value from the queue without waiting.
Raises ``QueueEmpty`` if no values are available right now.
]
variable[new_get] assign[=] call[name[Future], parameter[]]
with name[self]._lock begin[:]
if <ast.UnaryOp object at 0x7da20c6e56f0> begin[:]
<ast.Raise object at 0x7da20c6e59f0>
<ast.Tuple object at 0x7da20c6e4790> assign[=] tuple[[<ast.Attribute object at 0x7da20c6e6dd0>, <ast.Name object at 0x7da20c6e7130>]]
variable[hole] assign[=] call[name[get].result, parameter[]]
if <ast.UnaryOp object at 0x7da20c6e4c40> begin[:]
call[name[new_get].set_result, parameter[name[hole]]]
<ast.Raise object at 0x7da20c6e6d40>
variable[node] assign[=] call[name[hole].result, parameter[]]
variable[value] assign[=] name[node].value
<ast.Tuple object at 0x7da20c6e5e10> assign[=] tuple[[<ast.Attribute object at 0x7da20c6e5690>, <ast.Constant object at 0x7da20c6e7730>]]
call[name[new_get].set_result, parameter[name[new_hole]]]
return[name[value]] | keyword[def] identifier[get_nowait] ( identifier[self] ):
literal[string]
identifier[new_get] = identifier[Future] ()
keyword[with] identifier[self] . identifier[_lock] :
keyword[if] keyword[not] identifier[self] . identifier[_get] . identifier[done] ():
keyword[raise] identifier[QueueEmpty]
identifier[get] , identifier[self] . identifier[_get] = identifier[self] . identifier[_get] , identifier[new_get]
identifier[hole] = identifier[get] . identifier[result] ()
keyword[if] keyword[not] identifier[hole] . identifier[done] ():
identifier[new_get] . identifier[set_result] ( identifier[hole] )
keyword[raise] identifier[QueueEmpty]
identifier[node] = identifier[hole] . identifier[result] ()
identifier[value] = identifier[node] . identifier[value]
identifier[new_hole] , identifier[node] . identifier[next] = identifier[node] . identifier[next] , keyword[None]
identifier[new_get] . identifier[set_result] ( identifier[new_hole] )
keyword[return] identifier[value] | def get_nowait(self):
"""Returns a value from the queue without waiting.
Raises ``QueueEmpty`` if no values are available right now.
"""
new_get = Future()
with self._lock:
if not self._get.done():
raise QueueEmpty # depends on [control=['if'], data=[]]
(get, self._get) = (self._get, new_get) # depends on [control=['with'], data=[]]
hole = get.result()
if not hole.done():
# Restore the unfinished hole.
new_get.set_result(hole)
raise QueueEmpty # depends on [control=['if'], data=[]]
node = hole.result()
value = node.value
(new_hole, node.next) = (node.next, None)
new_get.set_result(new_hole)
return value |
def get_fields(brain_or_object):
"""Get the list of fields from the object
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: List of fields
:rtype: list
"""
obj = get_object(brain_or_object)
# The portal object has no schema
if is_root(obj):
return {}
schema = get_schema(obj)
if is_dexterity_content(obj):
names = schema.names()
fields = map(lambda name: schema.get(name), names)
schema_fields = dict(zip(names, fields))
# update with behavior fields
schema_fields.update(get_behaviors(obj))
return schema_fields
return dict(zip(schema.keys(), schema.fields())) | def function[get_fields, parameter[brain_or_object]]:
constant[Get the list of fields from the object
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: List of fields
:rtype: list
]
variable[obj] assign[=] call[name[get_object], parameter[name[brain_or_object]]]
if call[name[is_root], parameter[name[obj]]] begin[:]
return[dictionary[[], []]]
variable[schema] assign[=] call[name[get_schema], parameter[name[obj]]]
if call[name[is_dexterity_content], parameter[name[obj]]] begin[:]
variable[names] assign[=] call[name[schema].names, parameter[]]
variable[fields] assign[=] call[name[map], parameter[<ast.Lambda object at 0x7da1b26a7a60>, name[names]]]
variable[schema_fields] assign[=] call[name[dict], parameter[call[name[zip], parameter[name[names], name[fields]]]]]
call[name[schema_fields].update, parameter[call[name[get_behaviors], parameter[name[obj]]]]]
return[name[schema_fields]]
return[call[name[dict], parameter[call[name[zip], parameter[call[name[schema].keys, parameter[]], call[name[schema].fields, parameter[]]]]]]] | keyword[def] identifier[get_fields] ( identifier[brain_or_object] ):
literal[string]
identifier[obj] = identifier[get_object] ( identifier[brain_or_object] )
keyword[if] identifier[is_root] ( identifier[obj] ):
keyword[return] {}
identifier[schema] = identifier[get_schema] ( identifier[obj] )
keyword[if] identifier[is_dexterity_content] ( identifier[obj] ):
identifier[names] = identifier[schema] . identifier[names] ()
identifier[fields] = identifier[map] ( keyword[lambda] identifier[name] : identifier[schema] . identifier[get] ( identifier[name] ), identifier[names] )
identifier[schema_fields] = identifier[dict] ( identifier[zip] ( identifier[names] , identifier[fields] ))
identifier[schema_fields] . identifier[update] ( identifier[get_behaviors] ( identifier[obj] ))
keyword[return] identifier[schema_fields]
keyword[return] identifier[dict] ( identifier[zip] ( identifier[schema] . identifier[keys] (), identifier[schema] . identifier[fields] ())) | def get_fields(brain_or_object):
"""Get the list of fields from the object
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: List of fields
:rtype: list
"""
obj = get_object(brain_or_object)
# The portal object has no schema
if is_root(obj):
return {} # depends on [control=['if'], data=[]]
schema = get_schema(obj)
if is_dexterity_content(obj):
names = schema.names()
fields = map(lambda name: schema.get(name), names)
schema_fields = dict(zip(names, fields))
# update with behavior fields
schema_fields.update(get_behaviors(obj))
return schema_fields # depends on [control=['if'], data=[]]
return dict(zip(schema.keys(), schema.fields())) |
def format_spec_to_regex(field_name, format_spec):
"""Make an attempt at converting a format spec to a regular expression."""
# NOTE: remove escaped backslashes so regex matches
regex_match = fmt_spec_regex.match(format_spec.replace('\\', ''))
if regex_match is None:
raise ValueError("Invalid format specification: '{}'".format(format_spec))
regex_dict = regex_match.groupdict()
fill = regex_dict['fill']
ftype = regex_dict['type']
width = regex_dict['width']
align = regex_dict['align']
# NOTE: does not properly handle `=` alignment
if fill is None:
if width is not None and width[0] == '0':
fill = '0'
elif ftype in ['s', 'd']:
fill = ' '
char_type = spec_regexes[ftype]
if ftype == 's' and align and align.endswith('='):
raise ValueError("Invalid format specification: '{}'".format(format_spec))
final_regex = char_type
if ftype in allow_multiple and (not width or width == '0'):
final_regex += r'*'
elif width and width != '0':
if not fill:
# we know we have exactly this many characters
final_regex += r'{{{}}}'.format(int(width))
elif fill:
# we don't know how many fill characters we have compared to
# field characters so just match all characters and sort it out
# later during type conversion.
final_regex = r'.{{{}}}'.format(int(width))
elif ftype in allow_multiple:
final_regex += r'*'
return r'(?P<{}>{})'.format(field_name, final_regex) | def function[format_spec_to_regex, parameter[field_name, format_spec]]:
constant[Make an attempt at converting a format spec to a regular expression.]
variable[regex_match] assign[=] call[name[fmt_spec_regex].match, parameter[call[name[format_spec].replace, parameter[constant[\], constant[]]]]]
if compare[name[regex_match] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b25ee800>
variable[regex_dict] assign[=] call[name[regex_match].groupdict, parameter[]]
variable[fill] assign[=] call[name[regex_dict]][constant[fill]]
variable[ftype] assign[=] call[name[regex_dict]][constant[type]]
variable[width] assign[=] call[name[regex_dict]][constant[width]]
variable[align] assign[=] call[name[regex_dict]][constant[align]]
if compare[name[fill] is constant[None]] begin[:]
if <ast.BoolOp object at 0x7da1b25ee7d0> begin[:]
variable[fill] assign[=] constant[0]
variable[char_type] assign[=] call[name[spec_regexes]][name[ftype]]
if <ast.BoolOp object at 0x7da1b25ef520> begin[:]
<ast.Raise object at 0x7da1b25eded0>
variable[final_regex] assign[=] name[char_type]
if <ast.BoolOp object at 0x7da1b25eddb0> begin[:]
<ast.AugAssign object at 0x7da1b25ef5e0>
return[call[constant[(?P<{}>{})].format, parameter[name[field_name], name[final_regex]]]] | keyword[def] identifier[format_spec_to_regex] ( identifier[field_name] , identifier[format_spec] ):
literal[string]
identifier[regex_match] = identifier[fmt_spec_regex] . identifier[match] ( identifier[format_spec] . identifier[replace] ( literal[string] , literal[string] ))
keyword[if] identifier[regex_match] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[format_spec] ))
identifier[regex_dict] = identifier[regex_match] . identifier[groupdict] ()
identifier[fill] = identifier[regex_dict] [ literal[string] ]
identifier[ftype] = identifier[regex_dict] [ literal[string] ]
identifier[width] = identifier[regex_dict] [ literal[string] ]
identifier[align] = identifier[regex_dict] [ literal[string] ]
keyword[if] identifier[fill] keyword[is] keyword[None] :
keyword[if] identifier[width] keyword[is] keyword[not] keyword[None] keyword[and] identifier[width] [ literal[int] ]== literal[string] :
identifier[fill] = literal[string]
keyword[elif] identifier[ftype] keyword[in] [ literal[string] , literal[string] ]:
identifier[fill] = literal[string]
identifier[char_type] = identifier[spec_regexes] [ identifier[ftype] ]
keyword[if] identifier[ftype] == literal[string] keyword[and] identifier[align] keyword[and] identifier[align] . identifier[endswith] ( literal[string] ):
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[format_spec] ))
identifier[final_regex] = identifier[char_type]
keyword[if] identifier[ftype] keyword[in] identifier[allow_multiple] keyword[and] ( keyword[not] identifier[width] keyword[or] identifier[width] == literal[string] ):
identifier[final_regex] += literal[string]
keyword[elif] identifier[width] keyword[and] identifier[width] != literal[string] :
keyword[if] keyword[not] identifier[fill] :
identifier[final_regex] += literal[string] . identifier[format] ( identifier[int] ( identifier[width] ))
keyword[elif] identifier[fill] :
identifier[final_regex] = literal[string] . identifier[format] ( identifier[int] ( identifier[width] ))
keyword[elif] identifier[ftype] keyword[in] identifier[allow_multiple] :
identifier[final_regex] += literal[string]
keyword[return] literal[string] . identifier[format] ( identifier[field_name] , identifier[final_regex] ) | def format_spec_to_regex(field_name, format_spec):
"""Make an attempt at converting a format spec to a regular expression."""
# NOTE: remove escaped backslashes so regex matches
regex_match = fmt_spec_regex.match(format_spec.replace('\\', ''))
if regex_match is None:
raise ValueError("Invalid format specification: '{}'".format(format_spec)) # depends on [control=['if'], data=[]]
regex_dict = regex_match.groupdict()
fill = regex_dict['fill']
ftype = regex_dict['type']
width = regex_dict['width']
align = regex_dict['align']
# NOTE: does not properly handle `=` alignment
if fill is None:
if width is not None and width[0] == '0':
fill = '0' # depends on [control=['if'], data=[]]
elif ftype in ['s', 'd']:
fill = ' ' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['fill']]
char_type = spec_regexes[ftype]
if ftype == 's' and align and align.endswith('='):
raise ValueError("Invalid format specification: '{}'".format(format_spec)) # depends on [control=['if'], data=[]]
final_regex = char_type
if ftype in allow_multiple and (not width or width == '0'):
final_regex += '*' # depends on [control=['if'], data=[]]
elif width and width != '0':
if not fill:
# we know we have exactly this many characters
final_regex += '{{{}}}'.format(int(width)) # depends on [control=['if'], data=[]]
elif fill:
# we don't know how many fill characters we have compared to
# field characters so just match all characters and sort it out
# later during type conversion.
final_regex = '.{{{}}}'.format(int(width)) # depends on [control=['if'], data=[]]
elif ftype in allow_multiple:
final_regex += '*' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return '(?P<{}>{})'.format(field_name, final_regex) |
def tablify(*args):
r"""
>>> tablify(range(3), range(10, 13))
[[0, 10], [1, 11], [2, 12]]
"""
table = []
args = [listify(arg) for arg in args]
for row in zip(*args):
r = []
for x in row:
r += listify(x)
table += [r]
return table
return [sum([listify(el) for el in row]) for row in zip(*args)] | def function[tablify, parameter[]]:
constant[
>>> tablify(range(3), range(10, 13))
[[0, 10], [1, 11], [2, 12]]
]
variable[table] assign[=] list[[]]
variable[args] assign[=] <ast.ListComp object at 0x7da18c4ce650>
for taget[name[row]] in starred[call[name[zip], parameter[<ast.Starred object at 0x7da18c4cd630>]]] begin[:]
variable[r] assign[=] list[[]]
for taget[name[x]] in starred[name[row]] begin[:]
<ast.AugAssign object at 0x7da18c4cc550>
<ast.AugAssign object at 0x7da18c4cedd0>
return[name[table]]
return[<ast.ListComp object at 0x7da18c4ccb80>] | keyword[def] identifier[tablify] (* identifier[args] ):
literal[string]
identifier[table] =[]
identifier[args] =[ identifier[listify] ( identifier[arg] ) keyword[for] identifier[arg] keyword[in] identifier[args] ]
keyword[for] identifier[row] keyword[in] identifier[zip] (* identifier[args] ):
identifier[r] =[]
keyword[for] identifier[x] keyword[in] identifier[row] :
identifier[r] += identifier[listify] ( identifier[x] )
identifier[table] +=[ identifier[r] ]
keyword[return] identifier[table]
keyword[return] [ identifier[sum] ([ identifier[listify] ( identifier[el] ) keyword[for] identifier[el] keyword[in] identifier[row] ]) keyword[for] identifier[row] keyword[in] identifier[zip] (* identifier[args] )] | def tablify(*args):
"""
>>> tablify(range(3), range(10, 13))
[[0, 10], [1, 11], [2, 12]]
"""
table = []
args = [listify(arg) for arg in args]
for row in zip(*args):
r = []
for x in row:
r += listify(x) # depends on [control=['for'], data=['x']]
table += [r] # depends on [control=['for'], data=['row']]
return table
return [sum([listify(el) for el in row]) for row in zip(*args)] |
def _import_plugins(self) -> None:
"""
Import and register plugin in the plugin manager.
The pluggy library is used as plugin manager.
"""
logger.debug('Importing plugins')
self._pm = pluggy.PluginManager('sirbot')
self._pm.add_hookspecs(hookspecs)
for plugin in self.config['sirbot']['plugins']:
try:
p = importlib.import_module(plugin)
except (ModuleNotFoundError, ):
if os.getcwd() not in sys.path:
sys.path.append(os.getcwd())
p = importlib.import_module(plugin)
else:
raise
self._pm.register(p) | def function[_import_plugins, parameter[self]]:
constant[
Import and register plugin in the plugin manager.
The pluggy library is used as plugin manager.
]
call[name[logger].debug, parameter[constant[Importing plugins]]]
name[self]._pm assign[=] call[name[pluggy].PluginManager, parameter[constant[sirbot]]]
call[name[self]._pm.add_hookspecs, parameter[name[hookspecs]]]
for taget[name[plugin]] in starred[call[call[name[self].config][constant[sirbot]]][constant[plugins]]] begin[:]
<ast.Try object at 0x7da20c6c4be0>
call[name[self]._pm.register, parameter[name[p]]] | keyword[def] identifier[_import_plugins] ( identifier[self] )-> keyword[None] :
literal[string]
identifier[logger] . identifier[debug] ( literal[string] )
identifier[self] . identifier[_pm] = identifier[pluggy] . identifier[PluginManager] ( literal[string] )
identifier[self] . identifier[_pm] . identifier[add_hookspecs] ( identifier[hookspecs] )
keyword[for] identifier[plugin] keyword[in] identifier[self] . identifier[config] [ literal[string] ][ literal[string] ]:
keyword[try] :
identifier[p] = identifier[importlib] . identifier[import_module] ( identifier[plugin] )
keyword[except] ( identifier[ModuleNotFoundError] ,):
keyword[if] identifier[os] . identifier[getcwd] () keyword[not] keyword[in] identifier[sys] . identifier[path] :
identifier[sys] . identifier[path] . identifier[append] ( identifier[os] . identifier[getcwd] ())
identifier[p] = identifier[importlib] . identifier[import_module] ( identifier[plugin] )
keyword[else] :
keyword[raise]
identifier[self] . identifier[_pm] . identifier[register] ( identifier[p] ) | def _import_plugins(self) -> None:
"""
Import and register plugin in the plugin manager.
The pluggy library is used as plugin manager.
"""
logger.debug('Importing plugins')
self._pm = pluggy.PluginManager('sirbot')
self._pm.add_hookspecs(hookspecs)
for plugin in self.config['sirbot']['plugins']:
try:
p = importlib.import_module(plugin) # depends on [control=['try'], data=[]]
except (ModuleNotFoundError,):
if os.getcwd() not in sys.path:
sys.path.append(os.getcwd())
p = importlib.import_module(plugin) # depends on [control=['if'], data=[]]
else:
raise # depends on [control=['except'], data=[]]
self._pm.register(p) # depends on [control=['for'], data=['plugin']] |
def _parse_image_name(self, image, retry=True):
'''starting with an image string in either of the following formats:
job_id|collection
job_id|collection|job_name
Parse the job_name, job_id, and collection uri from it. If the user
provides the first option, we use the job_name set by the client
(default is build).
Parameters
==========
image: the string to parse, with values separated by |
retry: the client can call itself recursively once, providing the
default job_name if the user doesn't.
'''
try:
job_id, collection, job_name = image.split(',')
except:
# Retry and add job_name
if retry:
return self._parse_image_name("%s,%s" %(image, self.job),
retry=False)
# Or fail
bot.exit('''Malformed image string! Please provide:
job_id,collection (or)
job_id,collection,job_name''')
return job_id, collection, job_name | def function[_parse_image_name, parameter[self, image, retry]]:
constant[starting with an image string in either of the following formats:
job_id|collection
job_id|collection|job_name
Parse the job_name, job_id, and collection uri from it. If the user
provides the first option, we use the job_name set by the client
(default is build).
Parameters
==========
image: the string to parse, with values separated by |
retry: the client can call itself recursively once, providing the
default job_name if the user doesn't.
]
<ast.Try object at 0x7da1b0388eb0>
return[tuple[[<ast.Name object at 0x7da1b03b9690>, <ast.Name object at 0x7da1b03b9ae0>, <ast.Name object at 0x7da1b03b8940>]]] | keyword[def] identifier[_parse_image_name] ( identifier[self] , identifier[image] , identifier[retry] = keyword[True] ):
literal[string]
keyword[try] :
identifier[job_id] , identifier[collection] , identifier[job_name] = identifier[image] . identifier[split] ( literal[string] )
keyword[except] :
keyword[if] identifier[retry] :
keyword[return] identifier[self] . identifier[_parse_image_name] ( literal[string] %( identifier[image] , identifier[self] . identifier[job] ),
identifier[retry] = keyword[False] )
identifier[bot] . identifier[exit] ( literal[string] )
keyword[return] identifier[job_id] , identifier[collection] , identifier[job_name] | def _parse_image_name(self, image, retry=True):
"""starting with an image string in either of the following formats:
job_id|collection
job_id|collection|job_name
Parse the job_name, job_id, and collection uri from it. If the user
provides the first option, we use the job_name set by the client
(default is build).
Parameters
==========
image: the string to parse, with values separated by |
retry: the client can call itself recursively once, providing the
default job_name if the user doesn't.
"""
try:
(job_id, collection, job_name) = image.split(',') # depends on [control=['try'], data=[]]
except:
# Retry and add job_name
if retry:
return self._parse_image_name('%s,%s' % (image, self.job), retry=False) # depends on [control=['if'], data=[]]
# Or fail
bot.exit('Malformed image string! Please provide:\n job_id,collection (or)\n job_id,collection,job_name') # depends on [control=['except'], data=[]]
return (job_id, collection, job_name) |
async def generate_psk(self, security_key):
"""Generate and set a psk from the security key."""
if not self._psk:
PatchedDTLSSecurityStore.IDENTITY = 'Client_identity'.encode(
'utf-8')
PatchedDTLSSecurityStore.KEY = security_key.encode('utf-8')
command = Gateway().generate_psk(self._psk_id)
self._psk = await self.request(command)
PatchedDTLSSecurityStore.IDENTITY = self._psk_id.encode('utf-8')
PatchedDTLSSecurityStore.KEY = self._psk.encode('utf-8')
# aiocoap has now cached our psk, so it must be reset.
# We also no longer need the protocol, so this will clean that up.
await self._reset_protocol()
return self._psk | <ast.AsyncFunctionDef object at 0x7da18ede69b0> | keyword[async] keyword[def] identifier[generate_psk] ( identifier[self] , identifier[security_key] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_psk] :
identifier[PatchedDTLSSecurityStore] . identifier[IDENTITY] = literal[string] . identifier[encode] (
literal[string] )
identifier[PatchedDTLSSecurityStore] . identifier[KEY] = identifier[security_key] . identifier[encode] ( literal[string] )
identifier[command] = identifier[Gateway] (). identifier[generate_psk] ( identifier[self] . identifier[_psk_id] )
identifier[self] . identifier[_psk] = keyword[await] identifier[self] . identifier[request] ( identifier[command] )
identifier[PatchedDTLSSecurityStore] . identifier[IDENTITY] = identifier[self] . identifier[_psk_id] . identifier[encode] ( literal[string] )
identifier[PatchedDTLSSecurityStore] . identifier[KEY] = identifier[self] . identifier[_psk] . identifier[encode] ( literal[string] )
keyword[await] identifier[self] . identifier[_reset_protocol] ()
keyword[return] identifier[self] . identifier[_psk] | async def generate_psk(self, security_key):
"""Generate and set a psk from the security key."""
if not self._psk:
PatchedDTLSSecurityStore.IDENTITY = 'Client_identity'.encode('utf-8')
PatchedDTLSSecurityStore.KEY = security_key.encode('utf-8')
command = Gateway().generate_psk(self._psk_id)
self._psk = await self.request(command)
PatchedDTLSSecurityStore.IDENTITY = self._psk_id.encode('utf-8')
PatchedDTLSSecurityStore.KEY = self._psk.encode('utf-8')
# aiocoap has now cached our psk, so it must be reset.
# We also no longer need the protocol, so this will clean that up.
await self._reset_protocol() # depends on [control=['if'], data=[]]
return self._psk |
def evaluate_inline(self, groups):
"""Evaluate inline comments on their own lines."""
# Consecutive lines with only comments with same leading whitespace
# will be captured as a single block.
if self.lines:
if (
self.group_comments and
self.line_num == self.prev_line + 1 and
groups['leading_space'] == self.leading
):
self.line_comments[-1][0] += '\n' + groups['line'][2:].replace('\\\n', '')
else:
self.line_comments.append(
[groups['line'][2:].replace('\\\n', ''), self.line_num, self.current_encoding]
)
self.leading = groups['leading_space']
self.prev_line = self.line_num | def function[evaluate_inline, parameter[self, groups]]:
constant[Evaluate inline comments on their own lines.]
if name[self].lines begin[:]
if <ast.BoolOp object at 0x7da2054a56f0> begin[:]
<ast.AugAssign object at 0x7da18c4ccd30>
name[self].leading assign[=] call[name[groups]][constant[leading_space]]
name[self].prev_line assign[=] name[self].line_num | keyword[def] identifier[evaluate_inline] ( identifier[self] , identifier[groups] ):
literal[string]
keyword[if] identifier[self] . identifier[lines] :
keyword[if] (
identifier[self] . identifier[group_comments] keyword[and]
identifier[self] . identifier[line_num] == identifier[self] . identifier[prev_line] + literal[int] keyword[and]
identifier[groups] [ literal[string] ]== identifier[self] . identifier[leading]
):
identifier[self] . identifier[line_comments] [- literal[int] ][ literal[int] ]+= literal[string] + identifier[groups] [ literal[string] ][ literal[int] :]. identifier[replace] ( literal[string] , literal[string] )
keyword[else] :
identifier[self] . identifier[line_comments] . identifier[append] (
[ identifier[groups] [ literal[string] ][ literal[int] :]. identifier[replace] ( literal[string] , literal[string] ), identifier[self] . identifier[line_num] , identifier[self] . identifier[current_encoding] ]
)
identifier[self] . identifier[leading] = identifier[groups] [ literal[string] ]
identifier[self] . identifier[prev_line] = identifier[self] . identifier[line_num] | def evaluate_inline(self, groups):
"""Evaluate inline comments on their own lines."""
# Consecutive lines with only comments with same leading whitespace
# will be captured as a single block.
if self.lines:
if self.group_comments and self.line_num == self.prev_line + 1 and (groups['leading_space'] == self.leading):
self.line_comments[-1][0] += '\n' + groups['line'][2:].replace('\\\n', '') # depends on [control=['if'], data=[]]
else:
self.line_comments.append([groups['line'][2:].replace('\\\n', ''), self.line_num, self.current_encoding])
self.leading = groups['leading_space']
self.prev_line = self.line_num # depends on [control=['if'], data=[]] |
def report_by_type_stats(sect, stats, _):
"""make a report of
* percentage of different types documented
* percentage of different types with a bad name
"""
# percentage of different types documented and/or with a bad name
nice_stats = {}
for node_type in ("module", "class", "method", "function"):
try:
total = stats[node_type]
except KeyError:
raise exceptions.EmptyReportError()
nice_stats[node_type] = {}
if total != 0:
try:
documented = total - stats["undocumented_" + node_type]
percent = (documented * 100.0) / total
nice_stats[node_type]["percent_documented"] = "%.2f" % percent
except KeyError:
nice_stats[node_type]["percent_documented"] = "NC"
try:
percent = (stats["badname_" + node_type] * 100.0) / total
nice_stats[node_type]["percent_badname"] = "%.2f" % percent
except KeyError:
nice_stats[node_type]["percent_badname"] = "NC"
lines = ("type", "number", "old number", "difference", "%documented", "%badname")
for node_type in ("module", "class", "method", "function"):
new = stats[node_type]
lines += (
node_type,
str(new),
"NC",
"NC",
nice_stats[node_type].get("percent_documented", "0"),
nice_stats[node_type].get("percent_badname", "0"),
)
sect.append(reporter_nodes.Table(children=lines, cols=6, rheaders=1)) | def function[report_by_type_stats, parameter[sect, stats, _]]:
constant[make a report of
* percentage of different types documented
* percentage of different types with a bad name
]
variable[nice_stats] assign[=] dictionary[[], []]
for taget[name[node_type]] in starred[tuple[[<ast.Constant object at 0x7da1b02969e0>, <ast.Constant object at 0x7da1b0295a20>, <ast.Constant object at 0x7da1b0294430>, <ast.Constant object at 0x7da1b02960b0>]]] begin[:]
<ast.Try object at 0x7da1b0297eb0>
call[name[nice_stats]][name[node_type]] assign[=] dictionary[[], []]
if compare[name[total] not_equal[!=] constant[0]] begin[:]
<ast.Try object at 0x7da1b059fa60>
<ast.Try object at 0x7da1b059ce20>
variable[lines] assign[=] tuple[[<ast.Constant object at 0x7da1b0314250>, <ast.Constant object at 0x7da1b03146a0>, <ast.Constant object at 0x7da1b03156f0>, <ast.Constant object at 0x7da1b0317280>, <ast.Constant object at 0x7da1b03157e0>, <ast.Constant object at 0x7da1b0317b20>]]
for taget[name[node_type]] in starred[tuple[[<ast.Constant object at 0x7da1b0317490>, <ast.Constant object at 0x7da1b0316770>, <ast.Constant object at 0x7da1b03151b0>, <ast.Constant object at 0x7da1b0315210>]]] begin[:]
variable[new] assign[=] call[name[stats]][name[node_type]]
<ast.AugAssign object at 0x7da1b0315240>
call[name[sect].append, parameter[call[name[reporter_nodes].Table, parameter[]]]] | keyword[def] identifier[report_by_type_stats] ( identifier[sect] , identifier[stats] , identifier[_] ):
literal[string]
identifier[nice_stats] ={}
keyword[for] identifier[node_type] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] ):
keyword[try] :
identifier[total] = identifier[stats] [ identifier[node_type] ]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[exceptions] . identifier[EmptyReportError] ()
identifier[nice_stats] [ identifier[node_type] ]={}
keyword[if] identifier[total] != literal[int] :
keyword[try] :
identifier[documented] = identifier[total] - identifier[stats] [ literal[string] + identifier[node_type] ]
identifier[percent] =( identifier[documented] * literal[int] )/ identifier[total]
identifier[nice_stats] [ identifier[node_type] ][ literal[string] ]= literal[string] % identifier[percent]
keyword[except] identifier[KeyError] :
identifier[nice_stats] [ identifier[node_type] ][ literal[string] ]= literal[string]
keyword[try] :
identifier[percent] =( identifier[stats] [ literal[string] + identifier[node_type] ]* literal[int] )/ identifier[total]
identifier[nice_stats] [ identifier[node_type] ][ literal[string] ]= literal[string] % identifier[percent]
keyword[except] identifier[KeyError] :
identifier[nice_stats] [ identifier[node_type] ][ literal[string] ]= literal[string]
identifier[lines] =( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] )
keyword[for] identifier[node_type] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] ):
identifier[new] = identifier[stats] [ identifier[node_type] ]
identifier[lines] +=(
identifier[node_type] ,
identifier[str] ( identifier[new] ),
literal[string] ,
literal[string] ,
identifier[nice_stats] [ identifier[node_type] ]. identifier[get] ( literal[string] , literal[string] ),
identifier[nice_stats] [ identifier[node_type] ]. identifier[get] ( literal[string] , literal[string] ),
)
identifier[sect] . identifier[append] ( identifier[reporter_nodes] . identifier[Table] ( identifier[children] = identifier[lines] , identifier[cols] = literal[int] , identifier[rheaders] = literal[int] )) | def report_by_type_stats(sect, stats, _):
"""make a report of
* percentage of different types documented
* percentage of different types with a bad name
"""
# percentage of different types documented and/or with a bad name
nice_stats = {}
for node_type in ('module', 'class', 'method', 'function'):
try:
total = stats[node_type] # depends on [control=['try'], data=[]]
except KeyError:
raise exceptions.EmptyReportError() # depends on [control=['except'], data=[]]
nice_stats[node_type] = {}
if total != 0:
try:
documented = total - stats['undocumented_' + node_type]
percent = documented * 100.0 / total
nice_stats[node_type]['percent_documented'] = '%.2f' % percent # depends on [control=['try'], data=[]]
except KeyError:
nice_stats[node_type]['percent_documented'] = 'NC' # depends on [control=['except'], data=[]]
try:
percent = stats['badname_' + node_type] * 100.0 / total
nice_stats[node_type]['percent_badname'] = '%.2f' % percent # depends on [control=['try'], data=[]]
except KeyError:
nice_stats[node_type]['percent_badname'] = 'NC' # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['total']] # depends on [control=['for'], data=['node_type']]
lines = ('type', 'number', 'old number', 'difference', '%documented', '%badname')
for node_type in ('module', 'class', 'method', 'function'):
new = stats[node_type]
lines += (node_type, str(new), 'NC', 'NC', nice_stats[node_type].get('percent_documented', '0'), nice_stats[node_type].get('percent_badname', '0')) # depends on [control=['for'], data=['node_type']]
sect.append(reporter_nodes.Table(children=lines, cols=6, rheaders=1)) |
def _generate_barcode_ids(info_iter):
"""Create unique barcode IDs assigned to sequences
"""
bc_type = "SampleSheet"
barcodes = list(set([x[-1] for x in info_iter]))
barcodes.sort()
barcode_ids = {}
for i, bc in enumerate(barcodes):
barcode_ids[bc] = (bc_type, i+1)
return barcode_ids | def function[_generate_barcode_ids, parameter[info_iter]]:
constant[Create unique barcode IDs assigned to sequences
]
variable[bc_type] assign[=] constant[SampleSheet]
variable[barcodes] assign[=] call[name[list], parameter[call[name[set], parameter[<ast.ListComp object at 0x7da1b17b4a60>]]]]
call[name[barcodes].sort, parameter[]]
variable[barcode_ids] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b178f3d0>, <ast.Name object at 0x7da1b178c6d0>]]] in starred[call[name[enumerate], parameter[name[barcodes]]]] begin[:]
call[name[barcode_ids]][name[bc]] assign[=] tuple[[<ast.Name object at 0x7da1b178ce50>, <ast.BinOp object at 0x7da1b178e980>]]
return[name[barcode_ids]] | keyword[def] identifier[_generate_barcode_ids] ( identifier[info_iter] ):
literal[string]
identifier[bc_type] = literal[string]
identifier[barcodes] = identifier[list] ( identifier[set] ([ identifier[x] [- literal[int] ] keyword[for] identifier[x] keyword[in] identifier[info_iter] ]))
identifier[barcodes] . identifier[sort] ()
identifier[barcode_ids] ={}
keyword[for] identifier[i] , identifier[bc] keyword[in] identifier[enumerate] ( identifier[barcodes] ):
identifier[barcode_ids] [ identifier[bc] ]=( identifier[bc_type] , identifier[i] + literal[int] )
keyword[return] identifier[barcode_ids] | def _generate_barcode_ids(info_iter):
"""Create unique barcode IDs assigned to sequences
"""
bc_type = 'SampleSheet'
barcodes = list(set([x[-1] for x in info_iter]))
barcodes.sort()
barcode_ids = {}
for (i, bc) in enumerate(barcodes):
barcode_ids[bc] = (bc_type, i + 1) # depends on [control=['for'], data=[]]
return barcode_ids |
def drawPath(self, pointList):
"""
Draws a series of lines on the current :py:class:`Layer` with the current :py:class:`Brush`.
No interpolation is applied to these point and :py:meth:`drawLine` will be used to connect all the points lineraly.
Coordinates are relative to the original layer size WITHOUT downsampling applied.
:param pointList: A list of point like :code:`[(0, 0), (100, 100), (100, 200)]`.
:rtype: Nothing.
"""
self.drawLine(pointList[0][0], pointList[0][1], pointList[1][0], pointList[1][1])
i = 1
while i<len(pointList)-1:
self.drawLine(pointList[i][0], pointList[i][1], pointList[i+1][0], pointList[i+1][1])
i+=1 | def function[drawPath, parameter[self, pointList]]:
constant[
Draws a series of lines on the current :py:class:`Layer` with the current :py:class:`Brush`.
No interpolation is applied to these point and :py:meth:`drawLine` will be used to connect all the points lineraly.
Coordinates are relative to the original layer size WITHOUT downsampling applied.
:param pointList: A list of point like :code:`[(0, 0), (100, 100), (100, 200)]`.
:rtype: Nothing.
]
call[name[self].drawLine, parameter[call[call[name[pointList]][constant[0]]][constant[0]], call[call[name[pointList]][constant[0]]][constant[1]], call[call[name[pointList]][constant[1]]][constant[0]], call[call[name[pointList]][constant[1]]][constant[1]]]]
variable[i] assign[=] constant[1]
while compare[name[i] less[<] binary_operation[call[name[len], parameter[name[pointList]]] - constant[1]]] begin[:]
call[name[self].drawLine, parameter[call[call[name[pointList]][name[i]]][constant[0]], call[call[name[pointList]][name[i]]][constant[1]], call[call[name[pointList]][binary_operation[name[i] + constant[1]]]][constant[0]], call[call[name[pointList]][binary_operation[name[i] + constant[1]]]][constant[1]]]]
<ast.AugAssign object at 0x7da18f09f850> | keyword[def] identifier[drawPath] ( identifier[self] , identifier[pointList] ):
literal[string]
identifier[self] . identifier[drawLine] ( identifier[pointList] [ literal[int] ][ literal[int] ], identifier[pointList] [ literal[int] ][ literal[int] ], identifier[pointList] [ literal[int] ][ literal[int] ], identifier[pointList] [ literal[int] ][ literal[int] ])
identifier[i] = literal[int]
keyword[while] identifier[i] < identifier[len] ( identifier[pointList] )- literal[int] :
identifier[self] . identifier[drawLine] ( identifier[pointList] [ identifier[i] ][ literal[int] ], identifier[pointList] [ identifier[i] ][ literal[int] ], identifier[pointList] [ identifier[i] + literal[int] ][ literal[int] ], identifier[pointList] [ identifier[i] + literal[int] ][ literal[int] ])
identifier[i] += literal[int] | def drawPath(self, pointList):
"""
Draws a series of lines on the current :py:class:`Layer` with the current :py:class:`Brush`.
No interpolation is applied to these point and :py:meth:`drawLine` will be used to connect all the points lineraly.
Coordinates are relative to the original layer size WITHOUT downsampling applied.
:param pointList: A list of point like :code:`[(0, 0), (100, 100), (100, 200)]`.
:rtype: Nothing.
"""
self.drawLine(pointList[0][0], pointList[0][1], pointList[1][0], pointList[1][1])
i = 1
while i < len(pointList) - 1:
self.drawLine(pointList[i][0], pointList[i][1], pointList[i + 1][0], pointList[i + 1][1])
i += 1 # depends on [control=['while'], data=['i']] |
def prepare_argparser():
"""Prepare argparser object. New options will be added in this function first."""
description = "%(prog)s -- Gene Set Enrichment Analysis in Python"
epilog = "For command line options of each command, type: %(prog)s COMMAND -h"
# top-level parser
argparser = ap.ArgumentParser(description=description, epilog=epilog)
argparser.add_argument("--version", action="version", version="%(prog)s "+ __version__)
subparsers = argparser.add_subparsers(dest='subcommand_name') #help="sub-command help")
# command for 'gsea'
add_gsea_parser(subparsers)
# command for 'prerank'
add_prerank_parser(subparsers)
# command for 'ssgsea'
add_singlesample_parser(subparsers)
# command for 'plot'
add_plot_parser(subparsers)
# command for 'enrichr'
add_enrichr_parser(subparsers)
# command for 'biomart'
add_biomart_parser(subparsers)
return argparser | def function[prepare_argparser, parameter[]]:
constant[Prepare argparser object. New options will be added in this function first.]
variable[description] assign[=] constant[%(prog)s -- Gene Set Enrichment Analysis in Python]
variable[epilog] assign[=] constant[For command line options of each command, type: %(prog)s COMMAND -h]
variable[argparser] assign[=] call[name[ap].ArgumentParser, parameter[]]
call[name[argparser].add_argument, parameter[constant[--version]]]
variable[subparsers] assign[=] call[name[argparser].add_subparsers, parameter[]]
call[name[add_gsea_parser], parameter[name[subparsers]]]
call[name[add_prerank_parser], parameter[name[subparsers]]]
call[name[add_singlesample_parser], parameter[name[subparsers]]]
call[name[add_plot_parser], parameter[name[subparsers]]]
call[name[add_enrichr_parser], parameter[name[subparsers]]]
call[name[add_biomart_parser], parameter[name[subparsers]]]
return[name[argparser]] | keyword[def] identifier[prepare_argparser] ():
literal[string]
identifier[description] = literal[string]
identifier[epilog] = literal[string]
identifier[argparser] = identifier[ap] . identifier[ArgumentParser] ( identifier[description] = identifier[description] , identifier[epilog] = identifier[epilog] )
identifier[argparser] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] , identifier[version] = literal[string] + identifier[__version__] )
identifier[subparsers] = identifier[argparser] . identifier[add_subparsers] ( identifier[dest] = literal[string] )
identifier[add_gsea_parser] ( identifier[subparsers] )
identifier[add_prerank_parser] ( identifier[subparsers] )
identifier[add_singlesample_parser] ( identifier[subparsers] )
identifier[add_plot_parser] ( identifier[subparsers] )
identifier[add_enrichr_parser] ( identifier[subparsers] )
identifier[add_biomart_parser] ( identifier[subparsers] )
keyword[return] identifier[argparser] | def prepare_argparser():
"""Prepare argparser object. New options will be added in this function first."""
description = '%(prog)s -- Gene Set Enrichment Analysis in Python'
epilog = 'For command line options of each command, type: %(prog)s COMMAND -h'
# top-level parser
argparser = ap.ArgumentParser(description=description, epilog=epilog)
argparser.add_argument('--version', action='version', version='%(prog)s ' + __version__)
subparsers = argparser.add_subparsers(dest='subcommand_name') #help="sub-command help")
# command for 'gsea'
add_gsea_parser(subparsers)
# command for 'prerank'
add_prerank_parser(subparsers)
# command for 'ssgsea'
add_singlesample_parser(subparsers)
# command for 'plot'
add_plot_parser(subparsers)
# command for 'enrichr'
add_enrichr_parser(subparsers)
# command for 'biomart'
add_biomart_parser(subparsers)
return argparser |
def transformer_moe_8k():
"""Hyper parameters specifics for long sequence generation."""
hparams = transformer_moe_base()
hparams.batch_size = 8192
hparams.max_length = 0 # max_length == batch_size
hparams.eval_drop_long_sequences = True
hparams.min_length_bucket = 256 # Avoid cyclic problems for big batches
hparams.default_ff = "sep"
hparams.hidden_size = 1024
return hparams | def function[transformer_moe_8k, parameter[]]:
constant[Hyper parameters specifics for long sequence generation.]
variable[hparams] assign[=] call[name[transformer_moe_base], parameter[]]
name[hparams].batch_size assign[=] constant[8192]
name[hparams].max_length assign[=] constant[0]
name[hparams].eval_drop_long_sequences assign[=] constant[True]
name[hparams].min_length_bucket assign[=] constant[256]
name[hparams].default_ff assign[=] constant[sep]
name[hparams].hidden_size assign[=] constant[1024]
return[name[hparams]] | keyword[def] identifier[transformer_moe_8k] ():
literal[string]
identifier[hparams] = identifier[transformer_moe_base] ()
identifier[hparams] . identifier[batch_size] = literal[int]
identifier[hparams] . identifier[max_length] = literal[int]
identifier[hparams] . identifier[eval_drop_long_sequences] = keyword[True]
identifier[hparams] . identifier[min_length_bucket] = literal[int]
identifier[hparams] . identifier[default_ff] = literal[string]
identifier[hparams] . identifier[hidden_size] = literal[int]
keyword[return] identifier[hparams] | def transformer_moe_8k():
"""Hyper parameters specifics for long sequence generation."""
hparams = transformer_moe_base()
hparams.batch_size = 8192
hparams.max_length = 0 # max_length == batch_size
hparams.eval_drop_long_sequences = True
hparams.min_length_bucket = 256 # Avoid cyclic problems for big batches
hparams.default_ff = 'sep'
hparams.hidden_size = 1024
return hparams |
def on_open(self, ws):
"""
Callback executed when a connection is opened to the server.
Handles streaming of audio to the server.
:param ws: Websocket client
"""
self.callback.on_connected()
# Send initialization message
init_data = self.build_start_message(self.options)
self.ws_client.send(json.dumps(init_data).encode('utf8'), websocket.ABNF.OPCODE_TEXT) | def function[on_open, parameter[self, ws]]:
constant[
Callback executed when a connection is opened to the server.
Handles streaming of audio to the server.
:param ws: Websocket client
]
call[name[self].callback.on_connected, parameter[]]
variable[init_data] assign[=] call[name[self].build_start_message, parameter[name[self].options]]
call[name[self].ws_client.send, parameter[call[call[name[json].dumps, parameter[name[init_data]]].encode, parameter[constant[utf8]]], name[websocket].ABNF.OPCODE_TEXT]] | keyword[def] identifier[on_open] ( identifier[self] , identifier[ws] ):
literal[string]
identifier[self] . identifier[callback] . identifier[on_connected] ()
identifier[init_data] = identifier[self] . identifier[build_start_message] ( identifier[self] . identifier[options] )
identifier[self] . identifier[ws_client] . identifier[send] ( identifier[json] . identifier[dumps] ( identifier[init_data] ). identifier[encode] ( literal[string] ), identifier[websocket] . identifier[ABNF] . identifier[OPCODE_TEXT] ) | def on_open(self, ws):
"""
Callback executed when a connection is opened to the server.
Handles streaming of audio to the server.
:param ws: Websocket client
"""
self.callback.on_connected()
# Send initialization message
init_data = self.build_start_message(self.options)
self.ws_client.send(json.dumps(init_data).encode('utf8'), websocket.ABNF.OPCODE_TEXT) |
def chunks(arr, size):
"""Splits a list into chunks
:param arr: list to split
:type arr: :class:`list`
:param size: number of elements in each chunk
:type size: :class:`int`
:return: generator object
:rtype: :class:`generator`
"""
for i in _range(0, len(arr), size):
yield arr[i:i+size] | def function[chunks, parameter[arr, size]]:
constant[Splits a list into chunks
:param arr: list to split
:type arr: :class:`list`
:param size: number of elements in each chunk
:type size: :class:`int`
:return: generator object
:rtype: :class:`generator`
]
for taget[name[i]] in starred[call[name[_range], parameter[constant[0], call[name[len], parameter[name[arr]]], name[size]]]] begin[:]
<ast.Yield object at 0x7da1b2313820> | keyword[def] identifier[chunks] ( identifier[arr] , identifier[size] ):
literal[string]
keyword[for] identifier[i] keyword[in] identifier[_range] ( literal[int] , identifier[len] ( identifier[arr] ), identifier[size] ):
keyword[yield] identifier[arr] [ identifier[i] : identifier[i] + identifier[size] ] | def chunks(arr, size):
"""Splits a list into chunks
:param arr: list to split
:type arr: :class:`list`
:param size: number of elements in each chunk
:type size: :class:`int`
:return: generator object
:rtype: :class:`generator`
"""
for i in _range(0, len(arr), size):
yield arr[i:i + size] # depends on [control=['for'], data=['i']] |
def _merge_dicts(*args):
'''
Shallow copy and merge dicts together, giving precedence to last in.
'''
ret = dict()
for arg in args:
ret.update(arg)
return ret | def function[_merge_dicts, parameter[]]:
constant[
Shallow copy and merge dicts together, giving precedence to last in.
]
variable[ret] assign[=] call[name[dict], parameter[]]
for taget[name[arg]] in starred[name[args]] begin[:]
call[name[ret].update, parameter[name[arg]]]
return[name[ret]] | keyword[def] identifier[_merge_dicts] (* identifier[args] ):
literal[string]
identifier[ret] = identifier[dict] ()
keyword[for] identifier[arg] keyword[in] identifier[args] :
identifier[ret] . identifier[update] ( identifier[arg] )
keyword[return] identifier[ret] | def _merge_dicts(*args):
"""
Shallow copy and merge dicts together, giving precedence to last in.
"""
ret = dict()
for arg in args:
ret.update(arg) # depends on [control=['for'], data=['arg']]
return ret |
def load(self, filename):
"""
Updates the setting from config file in JSON format.
:param filename: filename of the local JSON settings file. If None, the local config file is used.
"""
if filename is None:
filename = LOCALCONFIG
with open(filename, 'r') as fid:
self._conf.update(json.load(fid))
self.add_history('Updated from config file: %s' % filename) | def function[load, parameter[self, filename]]:
constant[
Updates the setting from config file in JSON format.
:param filename: filename of the local JSON settings file. If None, the local config file is used.
]
if compare[name[filename] is constant[None]] begin[:]
variable[filename] assign[=] name[LOCALCONFIG]
with call[name[open], parameter[name[filename], constant[r]]] begin[:]
call[name[self]._conf.update, parameter[call[name[json].load, parameter[name[fid]]]]]
call[name[self].add_history, parameter[binary_operation[constant[Updated from config file: %s] <ast.Mod object at 0x7da2590d6920> name[filename]]]] | keyword[def] identifier[load] ( identifier[self] , identifier[filename] ):
literal[string]
keyword[if] identifier[filename] keyword[is] keyword[None] :
identifier[filename] = identifier[LOCALCONFIG]
keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[fid] :
identifier[self] . identifier[_conf] . identifier[update] ( identifier[json] . identifier[load] ( identifier[fid] ))
identifier[self] . identifier[add_history] ( literal[string] % identifier[filename] ) | def load(self, filename):
"""
Updates the setting from config file in JSON format.
:param filename: filename of the local JSON settings file. If None, the local config file is used.
"""
if filename is None:
filename = LOCALCONFIG # depends on [control=['if'], data=['filename']]
with open(filename, 'r') as fid:
self._conf.update(json.load(fid)) # depends on [control=['with'], data=['fid']]
self.add_history('Updated from config file: %s' % filename) |
def plot_fit(self, **kwargs):
""" Plots the fit of the model
Returns
----------
None (plots data and the fit)
"""
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize',(10,7))
if self.latent_variables.estimated is False:
raise Exception("No latent variables estimated!")
else:
plt.figure(figsize=figsize)
date_index = self.index[max(self.p, self.q):]
t_params = self.transform_z()
sigma2, Y, ___ = self._model(self.latent_variables.get_z_values())
plt.plot(date_index, np.abs(Y-t_params[-1]), label=self.data_name + ' Absolute Demeaned Values')
plt.plot(date_index, np.power(sigma2,0.5), label='GARCH(' + str(self.p) + ',' + str(self.q) + ') Conditional Volatility',c='black')
plt.title(self.data_name + " Volatility Plot")
plt.legend(loc=2)
plt.show() | def function[plot_fit, parameter[self]]:
constant[ Plots the fit of the model
Returns
----------
None (plots data and the fit)
]
import module[matplotlib.pyplot] as alias[plt]
import module[seaborn] as alias[sns]
variable[figsize] assign[=] call[name[kwargs].get, parameter[constant[figsize], tuple[[<ast.Constant object at 0x7da2045668f0>, <ast.Constant object at 0x7da204564940>]]]]
if compare[name[self].latent_variables.estimated is constant[False]] begin[:]
<ast.Raise object at 0x7da2045640a0> | keyword[def] identifier[plot_fit] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
keyword[import] identifier[matplotlib] . identifier[pyplot] keyword[as] identifier[plt]
keyword[import] identifier[seaborn] keyword[as] identifier[sns]
identifier[figsize] = identifier[kwargs] . identifier[get] ( literal[string] ,( literal[int] , literal[int] ))
keyword[if] identifier[self] . identifier[latent_variables] . identifier[estimated] keyword[is] keyword[False] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[else] :
identifier[plt] . identifier[figure] ( identifier[figsize] = identifier[figsize] )
identifier[date_index] = identifier[self] . identifier[index] [ identifier[max] ( identifier[self] . identifier[p] , identifier[self] . identifier[q] ):]
identifier[t_params] = identifier[self] . identifier[transform_z] ()
identifier[sigma2] , identifier[Y] , identifier[___] = identifier[self] . identifier[_model] ( identifier[self] . identifier[latent_variables] . identifier[get_z_values] ())
identifier[plt] . identifier[plot] ( identifier[date_index] , identifier[np] . identifier[abs] ( identifier[Y] - identifier[t_params] [- literal[int] ]), identifier[label] = identifier[self] . identifier[data_name] + literal[string] )
identifier[plt] . identifier[plot] ( identifier[date_index] , identifier[np] . identifier[power] ( identifier[sigma2] , literal[int] ), identifier[label] = literal[string] + identifier[str] ( identifier[self] . identifier[p] )+ literal[string] + identifier[str] ( identifier[self] . identifier[q] )+ literal[string] , identifier[c] = literal[string] )
identifier[plt] . identifier[title] ( identifier[self] . identifier[data_name] + literal[string] )
identifier[plt] . identifier[legend] ( identifier[loc] = literal[int] )
identifier[plt] . identifier[show] () | def plot_fit(self, **kwargs):
""" Plots the fit of the model
Returns
----------
None (plots data and the fit)
"""
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize', (10, 7))
if self.latent_variables.estimated is False:
raise Exception('No latent variables estimated!') # depends on [control=['if'], data=[]]
else:
plt.figure(figsize=figsize)
date_index = self.index[max(self.p, self.q):]
t_params = self.transform_z()
(sigma2, Y, ___) = self._model(self.latent_variables.get_z_values())
plt.plot(date_index, np.abs(Y - t_params[-1]), label=self.data_name + ' Absolute Demeaned Values')
plt.plot(date_index, np.power(sigma2, 0.5), label='GARCH(' + str(self.p) + ',' + str(self.q) + ') Conditional Volatility', c='black')
plt.title(self.data_name + ' Volatility Plot')
plt.legend(loc=2)
plt.show() |
def change_numbering(self, new_index=None):
"""Change numbering to a new index.
Changes the numbering of index and all dependent numbering
(bond_with...) to a new_index.
The user has to make sure that the new_index consists of distinct
elements.
Args:
new_index (list): If None the new_index is taken from 1 to the
number of atoms.
Returns:
Zmat: Reindexed version of the zmatrix.
"""
if (new_index is None):
new_index = range(len(self))
elif len(new_index) != len(self):
raise ValueError('len(new_index) has to be the same as len(self)')
c_table = self.loc[:, ['b', 'a', 'd']]
# Strange bug in pandas where .replace is transitive for object columns
# and non-transitive for all other types.
# (Remember that string columns are just object columns)
# Example:
# A = {1: 2, 2: 3}
# Transtitive [1].replace(A) gives [3]
# Non-Transtitive [1].replace(A) gives [2]
# https://github.com/pandas-dev/pandas/issues/5338
# https://github.com/pandas-dev/pandas/issues/16051
# https://github.com/pandas-dev/pandas/issues/5541
# For this reason convert to int and replace then.
c_table = c_table.replace(constants.int_label)
try:
c_table = c_table.astype('i8')
except ValueError:
raise ValueError('Due to a bug in pandas it is necessary to have '
'integer columns')
c_table = c_table.replace(self.index, new_index)
c_table = c_table.replace(
{v: k for k, v in constants.int_label.items()})
out = self.copy()
out.unsafe_loc[:, ['b', 'a', 'd']] = c_table
out._frame.index = new_index
return out | def function[change_numbering, parameter[self, new_index]]:
constant[Change numbering to a new index.
Changes the numbering of index and all dependent numbering
(bond_with...) to a new_index.
The user has to make sure that the new_index consists of distinct
elements.
Args:
new_index (list): If None the new_index is taken from 1 to the
number of atoms.
Returns:
Zmat: Reindexed version of the zmatrix.
]
if compare[name[new_index] is constant[None]] begin[:]
variable[new_index] assign[=] call[name[range], parameter[call[name[len], parameter[name[self]]]]]
variable[c_table] assign[=] call[name[self].loc][tuple[[<ast.Slice object at 0x7da1b27bb760>, <ast.List object at 0x7da1b27bb070>]]]
variable[c_table] assign[=] call[name[c_table].replace, parameter[name[constants].int_label]]
<ast.Try object at 0x7da1b27b9cc0>
variable[c_table] assign[=] call[name[c_table].replace, parameter[name[self].index, name[new_index]]]
variable[c_table] assign[=] call[name[c_table].replace, parameter[<ast.DictComp object at 0x7da18c4cfb50>]]
variable[out] assign[=] call[name[self].copy, parameter[]]
call[name[out].unsafe_loc][tuple[[<ast.Slice object at 0x7da20c76c760>, <ast.List object at 0x7da20c76eda0>]]] assign[=] name[c_table]
name[out]._frame.index assign[=] name[new_index]
return[name[out]] | keyword[def] identifier[change_numbering] ( identifier[self] , identifier[new_index] = keyword[None] ):
literal[string]
keyword[if] ( identifier[new_index] keyword[is] keyword[None] ):
identifier[new_index] = identifier[range] ( identifier[len] ( identifier[self] ))
keyword[elif] identifier[len] ( identifier[new_index] )!= identifier[len] ( identifier[self] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[c_table] = identifier[self] . identifier[loc] [:,[ literal[string] , literal[string] , literal[string] ]]
identifier[c_table] = identifier[c_table] . identifier[replace] ( identifier[constants] . identifier[int_label] )
keyword[try] :
identifier[c_table] = identifier[c_table] . identifier[astype] ( literal[string] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
identifier[c_table] = identifier[c_table] . identifier[replace] ( identifier[self] . identifier[index] , identifier[new_index] )
identifier[c_table] = identifier[c_table] . identifier[replace] (
{ identifier[v] : identifier[k] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[constants] . identifier[int_label] . identifier[items] ()})
identifier[out] = identifier[self] . identifier[copy] ()
identifier[out] . identifier[unsafe_loc] [:,[ literal[string] , literal[string] , literal[string] ]]= identifier[c_table]
identifier[out] . identifier[_frame] . identifier[index] = identifier[new_index]
keyword[return] identifier[out] | def change_numbering(self, new_index=None):
"""Change numbering to a new index.
Changes the numbering of index and all dependent numbering
(bond_with...) to a new_index.
The user has to make sure that the new_index consists of distinct
elements.
Args:
new_index (list): If None the new_index is taken from 1 to the
number of atoms.
Returns:
Zmat: Reindexed version of the zmatrix.
"""
if new_index is None:
new_index = range(len(self)) # depends on [control=['if'], data=['new_index']]
elif len(new_index) != len(self):
raise ValueError('len(new_index) has to be the same as len(self)') # depends on [control=['if'], data=[]]
c_table = self.loc[:, ['b', 'a', 'd']]
# Strange bug in pandas where .replace is transitive for object columns
# and non-transitive for all other types.
# (Remember that string columns are just object columns)
# Example:
# A = {1: 2, 2: 3}
# Transtitive [1].replace(A) gives [3]
# Non-Transtitive [1].replace(A) gives [2]
# https://github.com/pandas-dev/pandas/issues/5338
# https://github.com/pandas-dev/pandas/issues/16051
# https://github.com/pandas-dev/pandas/issues/5541
# For this reason convert to int and replace then.
c_table = c_table.replace(constants.int_label)
try:
c_table = c_table.astype('i8') # depends on [control=['try'], data=[]]
except ValueError:
raise ValueError('Due to a bug in pandas it is necessary to have integer columns') # depends on [control=['except'], data=[]]
c_table = c_table.replace(self.index, new_index)
c_table = c_table.replace({v: k for (k, v) in constants.int_label.items()})
out = self.copy()
out.unsafe_loc[:, ['b', 'a', 'd']] = c_table
out._frame.index = new_index
return out |
def uemify(self, reference, hypothesis, uem=None, collar=0.,
skip_overlap=False, returns_uem=False, returns_timeline=False):
"""Crop 'reference' and 'hypothesis' to 'uem' support
Parameters
----------
reference, hypothesis : Annotation
Reference and hypothesis annotations.
uem : Timeline, optional
Evaluation map.
collar : float, optional
When provided, set the duration of collars centered around
reference segment boundaries that are extruded from both reference
and hypothesis. Defaults to 0. (i.e. no collar).
skip_overlap : bool, optional
Set to True to not evaluate overlap regions.
Defaults to False (i.e. keep overlap regions).
returns_uem : bool, optional
Set to True to return extruded uem as well.
Defaults to False (i.e. only return reference and hypothesis)
returns_timeline : bool, optional
Set to True to oversegment reference and hypothesis so that they
share the same internal timeline.
Returns
-------
reference, hypothesis : Annotation
Extruded reference and hypothesis annotations
uem : Timeline
Extruded uem (returned only when 'returns_uem' is True)
timeline : Timeline:
Common timeline (returned only when 'returns_timeline' is True)
"""
# when uem is not provided, use the union of reference and hypothesis
# extents -- and warn the user about that.
if uem is None:
r_extent = reference.get_timeline().extent()
h_extent = hypothesis.get_timeline().extent()
extent = r_extent | h_extent
uem = Timeline(segments=[extent] if extent else [],
uri=reference.uri)
warnings.warn(
"'uem' was approximated by the union of 'reference' "
"and 'hypothesis' extents.")
# extrude collars (and overlap regions) from uem
uem = self.extrude(uem, reference, collar=collar,
skip_overlap=skip_overlap)
# extrude regions outside of uem
reference = reference.crop(uem, mode='intersection')
hypothesis = hypothesis.crop(uem, mode='intersection')
# project reference and hypothesis on common timeline
if returns_timeline:
timeline = self.common_timeline(reference, hypothesis)
reference = self.project(reference, timeline)
hypothesis = self.project(hypothesis, timeline)
result = (reference, hypothesis)
if returns_uem:
result += (uem, )
if returns_timeline:
result += (timeline, )
return result | def function[uemify, parameter[self, reference, hypothesis, uem, collar, skip_overlap, returns_uem, returns_timeline]]:
constant[Crop 'reference' and 'hypothesis' to 'uem' support
Parameters
----------
reference, hypothesis : Annotation
Reference and hypothesis annotations.
uem : Timeline, optional
Evaluation map.
collar : float, optional
When provided, set the duration of collars centered around
reference segment boundaries that are extruded from both reference
and hypothesis. Defaults to 0. (i.e. no collar).
skip_overlap : bool, optional
Set to True to not evaluate overlap regions.
Defaults to False (i.e. keep overlap regions).
returns_uem : bool, optional
Set to True to return extruded uem as well.
Defaults to False (i.e. only return reference and hypothesis)
returns_timeline : bool, optional
Set to True to oversegment reference and hypothesis so that they
share the same internal timeline.
Returns
-------
reference, hypothesis : Annotation
Extruded reference and hypothesis annotations
uem : Timeline
Extruded uem (returned only when 'returns_uem' is True)
timeline : Timeline:
Common timeline (returned only when 'returns_timeline' is True)
]
if compare[name[uem] is constant[None]] begin[:]
variable[r_extent] assign[=] call[call[name[reference].get_timeline, parameter[]].extent, parameter[]]
variable[h_extent] assign[=] call[call[name[hypothesis].get_timeline, parameter[]].extent, parameter[]]
variable[extent] assign[=] binary_operation[name[r_extent] <ast.BitOr object at 0x7da2590d6aa0> name[h_extent]]
variable[uem] assign[=] call[name[Timeline], parameter[]]
call[name[warnings].warn, parameter[constant['uem' was approximated by the union of 'reference' and 'hypothesis' extents.]]]
variable[uem] assign[=] call[name[self].extrude, parameter[name[uem], name[reference]]]
variable[reference] assign[=] call[name[reference].crop, parameter[name[uem]]]
variable[hypothesis] assign[=] call[name[hypothesis].crop, parameter[name[uem]]]
if name[returns_timeline] begin[:]
variable[timeline] assign[=] call[name[self].common_timeline, parameter[name[reference], name[hypothesis]]]
variable[reference] assign[=] call[name[self].project, parameter[name[reference], name[timeline]]]
variable[hypothesis] assign[=] call[name[self].project, parameter[name[hypothesis], name[timeline]]]
variable[result] assign[=] tuple[[<ast.Name object at 0x7da1b12bb940>, <ast.Name object at 0x7da1b12b8190>]]
if name[returns_uem] begin[:]
<ast.AugAssign object at 0x7da1b12ba8c0>
if name[returns_timeline] begin[:]
<ast.AugAssign object at 0x7da1b12b98d0>
return[name[result]] | keyword[def] identifier[uemify] ( identifier[self] , identifier[reference] , identifier[hypothesis] , identifier[uem] = keyword[None] , identifier[collar] = literal[int] ,
identifier[skip_overlap] = keyword[False] , identifier[returns_uem] = keyword[False] , identifier[returns_timeline] = keyword[False] ):
literal[string]
keyword[if] identifier[uem] keyword[is] keyword[None] :
identifier[r_extent] = identifier[reference] . identifier[get_timeline] (). identifier[extent] ()
identifier[h_extent] = identifier[hypothesis] . identifier[get_timeline] (). identifier[extent] ()
identifier[extent] = identifier[r_extent] | identifier[h_extent]
identifier[uem] = identifier[Timeline] ( identifier[segments] =[ identifier[extent] ] keyword[if] identifier[extent] keyword[else] [],
identifier[uri] = identifier[reference] . identifier[uri] )
identifier[warnings] . identifier[warn] (
literal[string]
literal[string] )
identifier[uem] = identifier[self] . identifier[extrude] ( identifier[uem] , identifier[reference] , identifier[collar] = identifier[collar] ,
identifier[skip_overlap] = identifier[skip_overlap] )
identifier[reference] = identifier[reference] . identifier[crop] ( identifier[uem] , identifier[mode] = literal[string] )
identifier[hypothesis] = identifier[hypothesis] . identifier[crop] ( identifier[uem] , identifier[mode] = literal[string] )
keyword[if] identifier[returns_timeline] :
identifier[timeline] = identifier[self] . identifier[common_timeline] ( identifier[reference] , identifier[hypothesis] )
identifier[reference] = identifier[self] . identifier[project] ( identifier[reference] , identifier[timeline] )
identifier[hypothesis] = identifier[self] . identifier[project] ( identifier[hypothesis] , identifier[timeline] )
identifier[result] =( identifier[reference] , identifier[hypothesis] )
keyword[if] identifier[returns_uem] :
identifier[result] +=( identifier[uem] ,)
keyword[if] identifier[returns_timeline] :
identifier[result] +=( identifier[timeline] ,)
keyword[return] identifier[result] | def uemify(self, reference, hypothesis, uem=None, collar=0.0, skip_overlap=False, returns_uem=False, returns_timeline=False):
"""Crop 'reference' and 'hypothesis' to 'uem' support
Parameters
----------
reference, hypothesis : Annotation
Reference and hypothesis annotations.
uem : Timeline, optional
Evaluation map.
collar : float, optional
When provided, set the duration of collars centered around
reference segment boundaries that are extruded from both reference
and hypothesis. Defaults to 0. (i.e. no collar).
skip_overlap : bool, optional
Set to True to not evaluate overlap regions.
Defaults to False (i.e. keep overlap regions).
returns_uem : bool, optional
Set to True to return extruded uem as well.
Defaults to False (i.e. only return reference and hypothesis)
returns_timeline : bool, optional
Set to True to oversegment reference and hypothesis so that they
share the same internal timeline.
Returns
-------
reference, hypothesis : Annotation
Extruded reference and hypothesis annotations
uem : Timeline
Extruded uem (returned only when 'returns_uem' is True)
timeline : Timeline:
Common timeline (returned only when 'returns_timeline' is True)
"""
# when uem is not provided, use the union of reference and hypothesis
# extents -- and warn the user about that.
if uem is None:
r_extent = reference.get_timeline().extent()
h_extent = hypothesis.get_timeline().extent()
extent = r_extent | h_extent
uem = Timeline(segments=[extent] if extent else [], uri=reference.uri)
warnings.warn("'uem' was approximated by the union of 'reference' and 'hypothesis' extents.") # depends on [control=['if'], data=['uem']]
# extrude collars (and overlap regions) from uem
uem = self.extrude(uem, reference, collar=collar, skip_overlap=skip_overlap)
# extrude regions outside of uem
reference = reference.crop(uem, mode='intersection')
hypothesis = hypothesis.crop(uem, mode='intersection')
# project reference and hypothesis on common timeline
if returns_timeline:
timeline = self.common_timeline(reference, hypothesis)
reference = self.project(reference, timeline)
hypothesis = self.project(hypothesis, timeline) # depends on [control=['if'], data=[]]
result = (reference, hypothesis)
if returns_uem:
result += (uem,) # depends on [control=['if'], data=[]]
if returns_timeline:
result += (timeline,) # depends on [control=['if'], data=[]]
return result |
def _get_warped_array(
input_file=None,
indexes=None,
dst_bounds=None,
dst_shape=None,
dst_crs=None,
resampling=None,
src_nodata=None,
dst_nodata=None
):
"""Extract a numpy array from a raster file."""
try:
return _rasterio_read(
input_file=input_file,
indexes=indexes,
dst_bounds=dst_bounds,
dst_shape=dst_shape,
dst_crs=dst_crs,
resampling=resampling,
src_nodata=src_nodata,
dst_nodata=dst_nodata
)
except Exception as e:
logger.exception("error while reading file %s: %s", input_file, e)
raise | def function[_get_warped_array, parameter[input_file, indexes, dst_bounds, dst_shape, dst_crs, resampling, src_nodata, dst_nodata]]:
constant[Extract a numpy array from a raster file.]
<ast.Try object at 0x7da1b012f9a0> | keyword[def] identifier[_get_warped_array] (
identifier[input_file] = keyword[None] ,
identifier[indexes] = keyword[None] ,
identifier[dst_bounds] = keyword[None] ,
identifier[dst_shape] = keyword[None] ,
identifier[dst_crs] = keyword[None] ,
identifier[resampling] = keyword[None] ,
identifier[src_nodata] = keyword[None] ,
identifier[dst_nodata] = keyword[None]
):
literal[string]
keyword[try] :
keyword[return] identifier[_rasterio_read] (
identifier[input_file] = identifier[input_file] ,
identifier[indexes] = identifier[indexes] ,
identifier[dst_bounds] = identifier[dst_bounds] ,
identifier[dst_shape] = identifier[dst_shape] ,
identifier[dst_crs] = identifier[dst_crs] ,
identifier[resampling] = identifier[resampling] ,
identifier[src_nodata] = identifier[src_nodata] ,
identifier[dst_nodata] = identifier[dst_nodata]
)
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logger] . identifier[exception] ( literal[string] , identifier[input_file] , identifier[e] )
keyword[raise] | def _get_warped_array(input_file=None, indexes=None, dst_bounds=None, dst_shape=None, dst_crs=None, resampling=None, src_nodata=None, dst_nodata=None):
"""Extract a numpy array from a raster file."""
try:
return _rasterio_read(input_file=input_file, indexes=indexes, dst_bounds=dst_bounds, dst_shape=dst_shape, dst_crs=dst_crs, resampling=resampling, src_nodata=src_nodata, dst_nodata=dst_nodata) # depends on [control=['try'], data=[]]
except Exception as e:
logger.exception('error while reading file %s: %s', input_file, e)
raise # depends on [control=['except'], data=['e']] |
def as_file(self):
"""If obj[%data_key_name] exists, return name of a file with base64
decoded obj[%data_key_name] content otherwise obj[%file_key_name]."""
use_data_if_no_file = not self._file and self._data
if use_data_if_no_file:
if self._base64_file_content:
if isinstance(self._data, str):
content = self._data.encode()
else:
content = self._data
self._file = _create_temp_file_with_content(
base64.standard_b64decode(content))
else:
self._file = _create_temp_file_with_content(self._data)
if self._file and not os.path.isfile(self._file):
raise ConfigException("File does not exists: %s" % self._file)
return self._file | def function[as_file, parameter[self]]:
constant[If obj[%data_key_name] exists, return name of a file with base64
decoded obj[%data_key_name] content otherwise obj[%file_key_name].]
variable[use_data_if_no_file] assign[=] <ast.BoolOp object at 0x7da20c6c4e50>
if name[use_data_if_no_file] begin[:]
if name[self]._base64_file_content begin[:]
if call[name[isinstance], parameter[name[self]._data, name[str]]] begin[:]
variable[content] assign[=] call[name[self]._data.encode, parameter[]]
name[self]._file assign[=] call[name[_create_temp_file_with_content], parameter[call[name[base64].standard_b64decode, parameter[name[content]]]]]
if <ast.BoolOp object at 0x7da20c6c4430> begin[:]
<ast.Raise object at 0x7da20c6c5060>
return[name[self]._file] | keyword[def] identifier[as_file] ( identifier[self] ):
literal[string]
identifier[use_data_if_no_file] = keyword[not] identifier[self] . identifier[_file] keyword[and] identifier[self] . identifier[_data]
keyword[if] identifier[use_data_if_no_file] :
keyword[if] identifier[self] . identifier[_base64_file_content] :
keyword[if] identifier[isinstance] ( identifier[self] . identifier[_data] , identifier[str] ):
identifier[content] = identifier[self] . identifier[_data] . identifier[encode] ()
keyword[else] :
identifier[content] = identifier[self] . identifier[_data]
identifier[self] . identifier[_file] = identifier[_create_temp_file_with_content] (
identifier[base64] . identifier[standard_b64decode] ( identifier[content] ))
keyword[else] :
identifier[self] . identifier[_file] = identifier[_create_temp_file_with_content] ( identifier[self] . identifier[_data] )
keyword[if] identifier[self] . identifier[_file] keyword[and] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[self] . identifier[_file] ):
keyword[raise] identifier[ConfigException] ( literal[string] % identifier[self] . identifier[_file] )
keyword[return] identifier[self] . identifier[_file] | def as_file(self):
"""If obj[%data_key_name] exists, return name of a file with base64
decoded obj[%data_key_name] content otherwise obj[%file_key_name]."""
use_data_if_no_file = not self._file and self._data
if use_data_if_no_file:
if self._base64_file_content:
if isinstance(self._data, str):
content = self._data.encode() # depends on [control=['if'], data=[]]
else:
content = self._data
self._file = _create_temp_file_with_content(base64.standard_b64decode(content)) # depends on [control=['if'], data=[]]
else:
self._file = _create_temp_file_with_content(self._data) # depends on [control=['if'], data=[]]
if self._file and (not os.path.isfile(self._file)):
raise ConfigException('File does not exists: %s' % self._file) # depends on [control=['if'], data=[]]
return self._file |
def _q(self, number, precision='.00001'):
"""quantiztion of BSSEval values"""
if np.isinf(number):
return np.nan
else:
return D(D(number).quantize(D(precision))) | def function[_q, parameter[self, number, precision]]:
constant[quantiztion of BSSEval values]
if call[name[np].isinf, parameter[name[number]]] begin[:]
return[name[np].nan] | keyword[def] identifier[_q] ( identifier[self] , identifier[number] , identifier[precision] = literal[string] ):
literal[string]
keyword[if] identifier[np] . identifier[isinf] ( identifier[number] ):
keyword[return] identifier[np] . identifier[nan]
keyword[else] :
keyword[return] identifier[D] ( identifier[D] ( identifier[number] ). identifier[quantize] ( identifier[D] ( identifier[precision] ))) | def _q(self, number, precision='.00001'):
"""quantiztion of BSSEval values"""
if np.isinf(number):
return np.nan # depends on [control=['if'], data=[]]
else:
return D(D(number).quantize(D(precision))) |
def obfuscate(
obfuscate_globals=False, shadow_funcname=False, reserved_keywords=()):
"""
An example, barebone name obfuscation ruleset
obfuscate_globals
If true, identifier names on the global scope will also be
obfuscated. Default is False.
shadow_funcname
If True, obfuscated function names will be shadowed. Default is
False.
reserved_keywords
A tuple of strings that should not be generated as obfuscated
identifiers.
"""
def name_obfuscation_rules():
inst = Obfuscator(
obfuscate_globals=obfuscate_globals,
shadow_funcname=shadow_funcname,
reserved_keywords=reserved_keywords,
)
return {
'token_handler': token_handler_unobfuscate,
'deferrable_handlers': {
Resolve: inst.resolve,
},
'prewalk_hooks': [
inst.prewalk_hook,
],
}
return name_obfuscation_rules | def function[obfuscate, parameter[obfuscate_globals, shadow_funcname, reserved_keywords]]:
constant[
An example, barebone name obfuscation ruleset
obfuscate_globals
If true, identifier names on the global scope will also be
obfuscated. Default is False.
shadow_funcname
If True, obfuscated function names will be shadowed. Default is
False.
reserved_keywords
A tuple of strings that should not be generated as obfuscated
identifiers.
]
def function[name_obfuscation_rules, parameter[]]:
variable[inst] assign[=] call[name[Obfuscator], parameter[]]
return[dictionary[[<ast.Constant object at 0x7da18ede5ab0>, <ast.Constant object at 0x7da18ede5390>, <ast.Constant object at 0x7da18ede4b80>], [<ast.Name object at 0x7da18ede7310>, <ast.Dict object at 0x7da18ede7c10>, <ast.List object at 0x7da18ede6530>]]]
return[name[name_obfuscation_rules]] | keyword[def] identifier[obfuscate] (
identifier[obfuscate_globals] = keyword[False] , identifier[shadow_funcname] = keyword[False] , identifier[reserved_keywords] =()):
literal[string]
keyword[def] identifier[name_obfuscation_rules] ():
identifier[inst] = identifier[Obfuscator] (
identifier[obfuscate_globals] = identifier[obfuscate_globals] ,
identifier[shadow_funcname] = identifier[shadow_funcname] ,
identifier[reserved_keywords] = identifier[reserved_keywords] ,
)
keyword[return] {
literal[string] : identifier[token_handler_unobfuscate] ,
literal[string] :{
identifier[Resolve] : identifier[inst] . identifier[resolve] ,
},
literal[string] :[
identifier[inst] . identifier[prewalk_hook] ,
],
}
keyword[return] identifier[name_obfuscation_rules] | def obfuscate(obfuscate_globals=False, shadow_funcname=False, reserved_keywords=()):
"""
An example, barebone name obfuscation ruleset
obfuscate_globals
If true, identifier names on the global scope will also be
obfuscated. Default is False.
shadow_funcname
If True, obfuscated function names will be shadowed. Default is
False.
reserved_keywords
A tuple of strings that should not be generated as obfuscated
identifiers.
"""
def name_obfuscation_rules():
inst = Obfuscator(obfuscate_globals=obfuscate_globals, shadow_funcname=shadow_funcname, reserved_keywords=reserved_keywords)
return {'token_handler': token_handler_unobfuscate, 'deferrable_handlers': {Resolve: inst.resolve}, 'prewalk_hooks': [inst.prewalk_hook]}
return name_obfuscation_rules |
def cleanString(someText):
"""
remove special characters and spaces from string
and convert to lowercase
"""
ret = ''
if someText is not None:
ret = filter(unicode.isalnum, someText.lower())
return ret | def function[cleanString, parameter[someText]]:
constant[
remove special characters and spaces from string
and convert to lowercase
]
variable[ret] assign[=] constant[]
if compare[name[someText] is_not constant[None]] begin[:]
variable[ret] assign[=] call[name[filter], parameter[name[unicode].isalnum, call[name[someText].lower, parameter[]]]]
return[name[ret]] | keyword[def] identifier[cleanString] ( identifier[someText] ):
literal[string]
identifier[ret] = literal[string]
keyword[if] identifier[someText] keyword[is] keyword[not] keyword[None] :
identifier[ret] = identifier[filter] ( identifier[unicode] . identifier[isalnum] , identifier[someText] . identifier[lower] ())
keyword[return] identifier[ret] | def cleanString(someText):
"""
remove special characters and spaces from string
and convert to lowercase
"""
ret = ''
if someText is not None:
ret = filter(unicode.isalnum, someText.lower()) # depends on [control=['if'], data=['someText']]
return ret |
def _AugAssign(self, t):
""" +=,-=,*=,/=,**=, etc. operations
"""
self._fill()
self._dispatch(t.node)
self._write(' '+t.op+' ')
self._dispatch(t.expr)
if not self._do_indent:
self._write(';') | def function[_AugAssign, parameter[self, t]]:
constant[ +=,-=,*=,/=,**=, etc. operations
]
call[name[self]._fill, parameter[]]
call[name[self]._dispatch, parameter[name[t].node]]
call[name[self]._write, parameter[binary_operation[binary_operation[constant[ ] + name[t].op] + constant[ ]]]]
call[name[self]._dispatch, parameter[name[t].expr]]
if <ast.UnaryOp object at 0x7da1b12da080> begin[:]
call[name[self]._write, parameter[constant[;]]] | keyword[def] identifier[_AugAssign] ( identifier[self] , identifier[t] ):
literal[string]
identifier[self] . identifier[_fill] ()
identifier[self] . identifier[_dispatch] ( identifier[t] . identifier[node] )
identifier[self] . identifier[_write] ( literal[string] + identifier[t] . identifier[op] + literal[string] )
identifier[self] . identifier[_dispatch] ( identifier[t] . identifier[expr] )
keyword[if] keyword[not] identifier[self] . identifier[_do_indent] :
identifier[self] . identifier[_write] ( literal[string] ) | def _AugAssign(self, t):
""" +=,-=,*=,/=,**=, etc. operations
"""
self._fill()
self._dispatch(t.node)
self._write(' ' + t.op + ' ')
self._dispatch(t.expr)
if not self._do_indent:
self._write(';') # depends on [control=['if'], data=[]] |
def new(self, interchange_level=1, sys_ident='', vol_ident='', set_size=1,
seqnum=1, log_block_size=2048, vol_set_ident=' ', pub_ident_str='',
preparer_ident_str='', app_ident_str='', copyright_file='',
abstract_file='', bibli_file='', vol_expire_date=None, app_use='',
joliet=None, rock_ridge=None, xa=False, udf=None):
# type: (int, str, str, int, int, int, str, str, str, str, str, str, str, Optional[float], str, Optional[int], Optional[str], bool, Optional[str]) -> None
'''
Create a new ISO from scratch.
Parameters:
interchange_level - The ISO9660 interchange level to use; this dictates
the rules on the names of files. Levels 1, 2, 3,
and 4 are supported. Level 1 is the most
conservative, and is the default, but level 3 is
recommended.
sys_ident - The system identification string to use on the new ISO.
vol_ident - The volume identification string to use on the new ISO.
set_size - The size of the set of ISOs this ISO is a part of.
seqnum - The sequence number of the set of this ISO.
log_block_size - The logical block size to use for the ISO. While ISO9660
technically supports sizes other than 2048 (the default),
this almost certainly doesn't work.
vol_set_ident - The volume set identification string to use on the new ISO.
pub_ident_str - The publisher identification string to use on the new ISO.
preparer_ident_str - The preparer identification string to use on the new ISO.
app_ident_str - The application identification string to use on the new ISO.
copyright_file - The name of a file at the root of the ISO to use as the
copyright file.
abstract_file - The name of a file at the root of the ISO to use as the
abstract file.
bibli_file - The name of a file at the root of the ISO to use as the
bibliographic file.
vol_expire_date - The date that this ISO will expire at.
app_use - Arbitrary data that the application can stuff into the primary
volume descriptor of this ISO.
joliet - A integer that can have the value 1, 2, or 3 for Joliet
levels 1, 2, or 3 (3 is by far the most common), or None for
no Joliet support (the default). For legacy reasons, this
parameter also accepts a boolean, where the value of 'False'
means no Joliet and a value of 'True' means level 3.
rock_ridge - Whether to make this ISO have the Rock Ridge extensions or
not. The default value of None does not add Rock Ridge
extensions. A string value of '1.09', '1.10', or '1.12'
adds the specified Rock Ridge version to the ISO. If
unsure, pass '1.09' to ensure maximum compatibility.
xa - Whether to add the ISO9660 Extended Attribute extensions to this
ISO. The default is False.
udf - Whether to add UDF support to this ISO. If it is None (the
default), no UDF support is added. If it is "2.60", version 2.60
of the UDF spec is used. All other values are disallowed.
Returns:
Nothing.
'''
# Start out with argument checking.
if self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object already has an ISO; either close it or create a new object')
if interchange_level < 1 or interchange_level > 4:
raise pycdlibexception.PyCdlibInvalidInput('Invalid interchange level (must be between 1 and 4)')
if rock_ridge and rock_ridge not in ['1.09', '1.10', '1.12']:
raise pycdlibexception.PyCdlibInvalidInput('Rock Ridge value must be None (no Rock Ridge), 1.09, 1.10, or 1.12')
if udf and udf != '2.60':
raise pycdlibexception.PyCdlibInvalidInput('UDF value must be empty (no UDF), or 2.60')
# Now save off the arguments we need to keep around.
if not app_ident_str:
app_ident_str = 'PyCdlib (C) 2015-2018 Chris Lalancette'
self.interchange_level = interchange_level
self.xa = xa
if isinstance(joliet, bool):
if joliet:
joliet = 3
else:
joliet = None
if rock_ridge:
self.rock_ridge = rock_ridge
sys_ident_bytes = sys_ident.encode('utf-8')
vol_ident_bytes = vol_ident.encode('utf-8')
vol_set_ident_bytes = vol_set_ident.encode('utf-8')
pub_ident_bytes = pub_ident_str.encode('utf-8')
preparer_ident_bytes = preparer_ident_str.encode('utf-8')
app_ident_bytes = app_ident_str.encode('utf-8')
copyright_file_bytes = copyright_file.encode('utf-8')
abstract_file_bytes = abstract_file.encode('utf-8')
bibli_file_bytes = bibli_file.encode('utf-8')
app_use_bytes = app_use.encode('utf-8')
if vol_expire_date is None:
real_vol_expire_date = 0.0
else:
real_vol_expire_date = vol_expire_date
# Now start creating the ISO.
self.pvd = headervd.pvd_factory(sys_ident_bytes, vol_ident_bytes,
set_size, seqnum, log_block_size,
vol_set_ident_bytes, pub_ident_bytes,
preparer_ident_bytes, app_ident_bytes,
copyright_file_bytes,
abstract_file_bytes, bibli_file_bytes,
real_vol_expire_date, app_use_bytes, xa)
self.pvds.append(self.pvd)
pvd_log_block_size = self.pvd.logical_block_size()
num_bytes_to_add = 0
if self.interchange_level == 4:
self.enhanced_vd = headervd.enhanced_vd_factory(sys_ident_bytes,
vol_ident_bytes,
set_size, seqnum,
log_block_size,
vol_set_ident_bytes,
pub_ident_bytes,
preparer_ident_bytes,
app_ident_bytes,
copyright_file_bytes,
abstract_file_bytes,
bibli_file_bytes,
real_vol_expire_date,
app_use_bytes, xa)
self.svds.append(self.enhanced_vd)
num_bytes_to_add += self.enhanced_vd.logical_block_size()
if joliet is not None:
self.joliet_vd = headervd.joliet_vd_factory(joliet, sys_ident_bytes,
vol_ident_bytes, set_size,
seqnum, log_block_size,
vol_set_ident_bytes,
pub_ident_bytes,
preparer_ident_bytes,
app_ident_bytes,
copyright_file_bytes,
abstract_file_bytes,
bibli_file_bytes,
real_vol_expire_date,
app_use_bytes, xa)
self.svds.append(self.joliet_vd)
# Now that we have added joliet, we need to add the new space to the
# PVD for the VD itself.
num_bytes_to_add += self.joliet_vd.logical_block_size()
self.vdsts.append(headervd.vdst_factory())
num_bytes_to_add += pvd_log_block_size
if udf:
self._has_udf = True
# Create the Bridge Recognition Volume Sequence
self.udf_bea.new()
self.udf_nsr.new(2)
self.udf_tea.new()
num_bytes_to_add += 3 * pvd_log_block_size
# We always create an empty version volume descriptor
self.version_vd = headervd.version_vd_factory(pvd_log_block_size)
num_bytes_to_add += pvd_log_block_size
if udf:
# We need to pad out to extent 32. The padding should be the
# distance between the current PVD space size and 32.
additional_extents = 32 - (self.pvd.space_size + (num_bytes_to_add // pvd_log_block_size))
num_bytes_to_add += additional_extents * pvd_log_block_size
# Create the Main Volume Descriptor Sequence
self.udf_main_descs.pvd.new()
self.udf_main_descs.impl_use.new()
self.udf_main_descs.partition.new()
self.udf_main_descs.logical_volume.new()
self.udf_main_descs.unallocated_space.new()
self.udf_main_descs.terminator.new()
num_bytes_to_add += 16 * pvd_log_block_size
# Create the Reserve Volume Descriptor Sequence
self.udf_reserve_descs.pvd.new()
self.udf_reserve_descs.impl_use.new()
self.udf_reserve_descs.partition.new()
self.udf_reserve_descs.logical_volume.new()
self.udf_reserve_descs.unallocated_space.new()
self.udf_reserve_descs.terminator.new()
num_bytes_to_add += 16 * pvd_log_block_size
# Create the Logical Volume Integrity Sequence
self.udf_logical_volume_integrity.new()
self.udf_logical_volume_integrity_terminator.new()
num_bytes_to_add += 192 * pvd_log_block_size
# Create the Anchor
anchor1 = udfmod.UDFAnchorVolumeStructure()
anchor1.new()
self.udf_anchors.append(anchor1)
num_bytes_to_add += pvd_log_block_size
# Create the File Set
self.udf_file_set.new()
self.udf_file_set_terminator.new()
num_bytes_to_add += 2 * pvd_log_block_size
# Create the root directory, and the 'parent' entry inside.
self.udf_root = udfmod.UDFFileEntry()
self.udf_root.new(0, 'dir', None, pvd_log_block_size)
num_bytes_to_add += pvd_log_block_size
parent = udfmod.UDFFileIdentifierDescriptor()
parent.new(True, True, b'', None)
num_new_extents = self.udf_root.add_file_ident_desc(parent, pvd_log_block_size)
num_bytes_to_add += num_new_extents * pvd_log_block_size
num_partition_bytes_to_add = 0
# Create the PTR, and add the 4 extents that comprise of the LE PTR and
# BE PTR to the number of bytes to add.
ptr = path_table_record.PathTableRecord()
ptr.new_root()
self.pvd.root_directory_record().set_ptr(ptr)
num_partition_bytes_to_add += 4 * pvd_log_block_size
# Also add one extent to the size for the root directory record.
num_partition_bytes_to_add += pvd_log_block_size
self._create_dot(self.pvd, self.pvd.root_directory_record(),
self.rock_ridge, self.xa, 0o040555)
self._create_dotdot(self.pvd, self.pvd.root_directory_record(),
self.rock_ridge, False, self.xa, 0o040555)
if self.joliet_vd is not None:
# Create the PTR, and add the 4 extents that comprise of the LE PTR and
# BE PTR to the number of bytes to add.
ptr = path_table_record.PathTableRecord()
ptr.new_root()
self.joliet_vd.root_directory_record().set_ptr(ptr)
num_partition_bytes_to_add += 4 * pvd_log_block_size
# Also add one extent to the size for the root directory record.
num_partition_bytes_to_add += pvd_log_block_size
self._create_dot(self.joliet_vd,
self.joliet_vd.root_directory_record(), '',
False, -1)
self._create_dotdot(self.joliet_vd,
self.joliet_vd.root_directory_record(), '',
False, False, -1)
if self.rock_ridge:
num_partition_bytes_to_add += pvd_log_block_size
if udf:
anchor2 = udfmod.UDFAnchorVolumeStructure()
anchor2.new()
self.udf_anchors.append(anchor2)
num_partition_bytes_to_add += pvd_log_block_size
self._finish_add(num_bytes_to_add, num_partition_bytes_to_add)
self._initialized = True | def function[new, parameter[self, interchange_level, sys_ident, vol_ident, set_size, seqnum, log_block_size, vol_set_ident, pub_ident_str, preparer_ident_str, app_ident_str, copyright_file, abstract_file, bibli_file, vol_expire_date, app_use, joliet, rock_ridge, xa, udf]]:
constant[
Create a new ISO from scratch.
Parameters:
interchange_level - The ISO9660 interchange level to use; this dictates
the rules on the names of files. Levels 1, 2, 3,
and 4 are supported. Level 1 is the most
conservative, and is the default, but level 3 is
recommended.
sys_ident - The system identification string to use on the new ISO.
vol_ident - The volume identification string to use on the new ISO.
set_size - The size of the set of ISOs this ISO is a part of.
seqnum - The sequence number of the set of this ISO.
log_block_size - The logical block size to use for the ISO. While ISO9660
technically supports sizes other than 2048 (the default),
this almost certainly doesn't work.
vol_set_ident - The volume set identification string to use on the new ISO.
pub_ident_str - The publisher identification string to use on the new ISO.
preparer_ident_str - The preparer identification string to use on the new ISO.
app_ident_str - The application identification string to use on the new ISO.
copyright_file - The name of a file at the root of the ISO to use as the
copyright file.
abstract_file - The name of a file at the root of the ISO to use as the
abstract file.
bibli_file - The name of a file at the root of the ISO to use as the
bibliographic file.
vol_expire_date - The date that this ISO will expire at.
app_use - Arbitrary data that the application can stuff into the primary
volume descriptor of this ISO.
joliet - A integer that can have the value 1, 2, or 3 for Joliet
levels 1, 2, or 3 (3 is by far the most common), or None for
no Joliet support (the default). For legacy reasons, this
parameter also accepts a boolean, where the value of 'False'
means no Joliet and a value of 'True' means level 3.
rock_ridge - Whether to make this ISO have the Rock Ridge extensions or
not. The default value of None does not add Rock Ridge
extensions. A string value of '1.09', '1.10', or '1.12'
adds the specified Rock Ridge version to the ISO. If
unsure, pass '1.09' to ensure maximum compatibility.
xa - Whether to add the ISO9660 Extended Attribute extensions to this
ISO. The default is False.
udf - Whether to add UDF support to this ISO. If it is None (the
default), no UDF support is added. If it is "2.60", version 2.60
of the UDF spec is used. All other values are disallowed.
Returns:
Nothing.
]
if name[self]._initialized begin[:]
<ast.Raise object at 0x7da1b0d0c100>
if <ast.BoolOp object at 0x7da1b0d0eb90> begin[:]
<ast.Raise object at 0x7da1b0d0f7c0>
if <ast.BoolOp object at 0x7da1b0d0e2c0> begin[:]
<ast.Raise object at 0x7da1b0d0f3d0>
if <ast.BoolOp object at 0x7da1b0d0fd90> begin[:]
<ast.Raise object at 0x7da1b0d0e8f0>
if <ast.UnaryOp object at 0x7da1b0d0d900> begin[:]
variable[app_ident_str] assign[=] constant[PyCdlib (C) 2015-2018 Chris Lalancette]
name[self].interchange_level assign[=] name[interchange_level]
name[self].xa assign[=] name[xa]
if call[name[isinstance], parameter[name[joliet], name[bool]]] begin[:]
if name[joliet] begin[:]
variable[joliet] assign[=] constant[3]
if name[rock_ridge] begin[:]
name[self].rock_ridge assign[=] name[rock_ridge]
variable[sys_ident_bytes] assign[=] call[name[sys_ident].encode, parameter[constant[utf-8]]]
variable[vol_ident_bytes] assign[=] call[name[vol_ident].encode, parameter[constant[utf-8]]]
variable[vol_set_ident_bytes] assign[=] call[name[vol_set_ident].encode, parameter[constant[utf-8]]]
variable[pub_ident_bytes] assign[=] call[name[pub_ident_str].encode, parameter[constant[utf-8]]]
variable[preparer_ident_bytes] assign[=] call[name[preparer_ident_str].encode, parameter[constant[utf-8]]]
variable[app_ident_bytes] assign[=] call[name[app_ident_str].encode, parameter[constant[utf-8]]]
variable[copyright_file_bytes] assign[=] call[name[copyright_file].encode, parameter[constant[utf-8]]]
variable[abstract_file_bytes] assign[=] call[name[abstract_file].encode, parameter[constant[utf-8]]]
variable[bibli_file_bytes] assign[=] call[name[bibli_file].encode, parameter[constant[utf-8]]]
variable[app_use_bytes] assign[=] call[name[app_use].encode, parameter[constant[utf-8]]]
if compare[name[vol_expire_date] is constant[None]] begin[:]
variable[real_vol_expire_date] assign[=] constant[0.0]
name[self].pvd assign[=] call[name[headervd].pvd_factory, parameter[name[sys_ident_bytes], name[vol_ident_bytes], name[set_size], name[seqnum], name[log_block_size], name[vol_set_ident_bytes], name[pub_ident_bytes], name[preparer_ident_bytes], name[app_ident_bytes], name[copyright_file_bytes], name[abstract_file_bytes], name[bibli_file_bytes], name[real_vol_expire_date], name[app_use_bytes], name[xa]]]
call[name[self].pvds.append, parameter[name[self].pvd]]
variable[pvd_log_block_size] assign[=] call[name[self].pvd.logical_block_size, parameter[]]
variable[num_bytes_to_add] assign[=] constant[0]
if compare[name[self].interchange_level equal[==] constant[4]] begin[:]
name[self].enhanced_vd assign[=] call[name[headervd].enhanced_vd_factory, parameter[name[sys_ident_bytes], name[vol_ident_bytes], name[set_size], name[seqnum], name[log_block_size], name[vol_set_ident_bytes], name[pub_ident_bytes], name[preparer_ident_bytes], name[app_ident_bytes], name[copyright_file_bytes], name[abstract_file_bytes], name[bibli_file_bytes], name[real_vol_expire_date], name[app_use_bytes], name[xa]]]
call[name[self].svds.append, parameter[name[self].enhanced_vd]]
<ast.AugAssign object at 0x7da1b0d99450>
if compare[name[joliet] is_not constant[None]] begin[:]
name[self].joliet_vd assign[=] call[name[headervd].joliet_vd_factory, parameter[name[joliet], name[sys_ident_bytes], name[vol_ident_bytes], name[set_size], name[seqnum], name[log_block_size], name[vol_set_ident_bytes], name[pub_ident_bytes], name[preparer_ident_bytes], name[app_ident_bytes], name[copyright_file_bytes], name[abstract_file_bytes], name[bibli_file_bytes], name[real_vol_expire_date], name[app_use_bytes], name[xa]]]
call[name[self].svds.append, parameter[name[self].joliet_vd]]
<ast.AugAssign object at 0x7da1b0d99bd0>
call[name[self].vdsts.append, parameter[call[name[headervd].vdst_factory, parameter[]]]]
<ast.AugAssign object at 0x7da1b0f18ee0>
if name[udf] begin[:]
name[self]._has_udf assign[=] constant[True]
call[name[self].udf_bea.new, parameter[]]
call[name[self].udf_nsr.new, parameter[constant[2]]]
call[name[self].udf_tea.new, parameter[]]
<ast.AugAssign object at 0x7da18f721ab0>
name[self].version_vd assign[=] call[name[headervd].version_vd_factory, parameter[name[pvd_log_block_size]]]
<ast.AugAssign object at 0x7da18f723fd0>
if name[udf] begin[:]
variable[additional_extents] assign[=] binary_operation[constant[32] - binary_operation[name[self].pvd.space_size + binary_operation[name[num_bytes_to_add] <ast.FloorDiv object at 0x7da2590d6bc0> name[pvd_log_block_size]]]]
<ast.AugAssign object at 0x7da1b0f0e500>
call[name[self].udf_main_descs.pvd.new, parameter[]]
call[name[self].udf_main_descs.impl_use.new, parameter[]]
call[name[self].udf_main_descs.partition.new, parameter[]]
call[name[self].udf_main_descs.logical_volume.new, parameter[]]
call[name[self].udf_main_descs.unallocated_space.new, parameter[]]
call[name[self].udf_main_descs.terminator.new, parameter[]]
<ast.AugAssign object at 0x7da1b0f0d990>
call[name[self].udf_reserve_descs.pvd.new, parameter[]]
call[name[self].udf_reserve_descs.impl_use.new, parameter[]]
call[name[self].udf_reserve_descs.partition.new, parameter[]]
call[name[self].udf_reserve_descs.logical_volume.new, parameter[]]
call[name[self].udf_reserve_descs.unallocated_space.new, parameter[]]
call[name[self].udf_reserve_descs.terminator.new, parameter[]]
<ast.AugAssign object at 0x7da1b0f0e7a0>
call[name[self].udf_logical_volume_integrity.new, parameter[]]
call[name[self].udf_logical_volume_integrity_terminator.new, parameter[]]
<ast.AugAssign object at 0x7da1b0f0f5e0>
variable[anchor1] assign[=] call[name[udfmod].UDFAnchorVolumeStructure, parameter[]]
call[name[anchor1].new, parameter[]]
call[name[self].udf_anchors.append, parameter[name[anchor1]]]
<ast.AugAssign object at 0x7da1b0f0cd30>
call[name[self].udf_file_set.new, parameter[]]
call[name[self].udf_file_set_terminator.new, parameter[]]
<ast.AugAssign object at 0x7da1b0f0fc40>
name[self].udf_root assign[=] call[name[udfmod].UDFFileEntry, parameter[]]
call[name[self].udf_root.new, parameter[constant[0], constant[dir], constant[None], name[pvd_log_block_size]]]
<ast.AugAssign object at 0x7da1b0f0cd60>
variable[parent] assign[=] call[name[udfmod].UDFFileIdentifierDescriptor, parameter[]]
call[name[parent].new, parameter[constant[True], constant[True], constant[b''], constant[None]]]
variable[num_new_extents] assign[=] call[name[self].udf_root.add_file_ident_desc, parameter[name[parent], name[pvd_log_block_size]]]
<ast.AugAssign object at 0x7da1b0f0e9b0>
variable[num_partition_bytes_to_add] assign[=] constant[0]
variable[ptr] assign[=] call[name[path_table_record].PathTableRecord, parameter[]]
call[name[ptr].new_root, parameter[]]
call[call[name[self].pvd.root_directory_record, parameter[]].set_ptr, parameter[name[ptr]]]
<ast.AugAssign object at 0x7da1b0f0cd90>
<ast.AugAssign object at 0x7da1b0f0ce20>
call[name[self]._create_dot, parameter[name[self].pvd, call[name[self].pvd.root_directory_record, parameter[]], name[self].rock_ridge, name[self].xa, constant[16749]]]
call[name[self]._create_dotdot, parameter[name[self].pvd, call[name[self].pvd.root_directory_record, parameter[]], name[self].rock_ridge, constant[False], name[self].xa, constant[16749]]]
if compare[name[self].joliet_vd is_not constant[None]] begin[:]
variable[ptr] assign[=] call[name[path_table_record].PathTableRecord, parameter[]]
call[name[ptr].new_root, parameter[]]
call[call[name[self].joliet_vd.root_directory_record, parameter[]].set_ptr, parameter[name[ptr]]]
<ast.AugAssign object at 0x7da1b0fcec50>
<ast.AugAssign object at 0x7da1b0fcc820>
call[name[self]._create_dot, parameter[name[self].joliet_vd, call[name[self].joliet_vd.root_directory_record, parameter[]], constant[], constant[False], <ast.UnaryOp object at 0x7da1b0fcdcc0>]]
call[name[self]._create_dotdot, parameter[name[self].joliet_vd, call[name[self].joliet_vd.root_directory_record, parameter[]], constant[], constant[False], constant[False], <ast.UnaryOp object at 0x7da1b0fcc790>]]
if name[self].rock_ridge begin[:]
<ast.AugAssign object at 0x7da20e957070>
if name[udf] begin[:]
variable[anchor2] assign[=] call[name[udfmod].UDFAnchorVolumeStructure, parameter[]]
call[name[anchor2].new, parameter[]]
call[name[self].udf_anchors.append, parameter[name[anchor2]]]
<ast.AugAssign object at 0x7da20e9567a0>
call[name[self]._finish_add, parameter[name[num_bytes_to_add], name[num_partition_bytes_to_add]]]
name[self]._initialized assign[=] constant[True] | keyword[def] identifier[new] ( identifier[self] , identifier[interchange_level] = literal[int] , identifier[sys_ident] = literal[string] , identifier[vol_ident] = literal[string] , identifier[set_size] = literal[int] ,
identifier[seqnum] = literal[int] , identifier[log_block_size] = literal[int] , identifier[vol_set_ident] = literal[string] , identifier[pub_ident_str] = literal[string] ,
identifier[preparer_ident_str] = literal[string] , identifier[app_ident_str] = literal[string] , identifier[copyright_file] = literal[string] ,
identifier[abstract_file] = literal[string] , identifier[bibli_file] = literal[string] , identifier[vol_expire_date] = keyword[None] , identifier[app_use] = literal[string] ,
identifier[joliet] = keyword[None] , identifier[rock_ridge] = keyword[None] , identifier[xa] = keyword[False] , identifier[udf] = keyword[None] ):
literal[string]
keyword[if] identifier[self] . identifier[_initialized] :
keyword[raise] identifier[pycdlibexception] . identifier[PyCdlibInvalidInput] ( literal[string] )
keyword[if] identifier[interchange_level] < literal[int] keyword[or] identifier[interchange_level] > literal[int] :
keyword[raise] identifier[pycdlibexception] . identifier[PyCdlibInvalidInput] ( literal[string] )
keyword[if] identifier[rock_ridge] keyword[and] identifier[rock_ridge] keyword[not] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
keyword[raise] identifier[pycdlibexception] . identifier[PyCdlibInvalidInput] ( literal[string] )
keyword[if] identifier[udf] keyword[and] identifier[udf] != literal[string] :
keyword[raise] identifier[pycdlibexception] . identifier[PyCdlibInvalidInput] ( literal[string] )
keyword[if] keyword[not] identifier[app_ident_str] :
identifier[app_ident_str] = literal[string]
identifier[self] . identifier[interchange_level] = identifier[interchange_level]
identifier[self] . identifier[xa] = identifier[xa]
keyword[if] identifier[isinstance] ( identifier[joliet] , identifier[bool] ):
keyword[if] identifier[joliet] :
identifier[joliet] = literal[int]
keyword[else] :
identifier[joliet] = keyword[None]
keyword[if] identifier[rock_ridge] :
identifier[self] . identifier[rock_ridge] = identifier[rock_ridge]
identifier[sys_ident_bytes] = identifier[sys_ident] . identifier[encode] ( literal[string] )
identifier[vol_ident_bytes] = identifier[vol_ident] . identifier[encode] ( literal[string] )
identifier[vol_set_ident_bytes] = identifier[vol_set_ident] . identifier[encode] ( literal[string] )
identifier[pub_ident_bytes] = identifier[pub_ident_str] . identifier[encode] ( literal[string] )
identifier[preparer_ident_bytes] = identifier[preparer_ident_str] . identifier[encode] ( literal[string] )
identifier[app_ident_bytes] = identifier[app_ident_str] . identifier[encode] ( literal[string] )
identifier[copyright_file_bytes] = identifier[copyright_file] . identifier[encode] ( literal[string] )
identifier[abstract_file_bytes] = identifier[abstract_file] . identifier[encode] ( literal[string] )
identifier[bibli_file_bytes] = identifier[bibli_file] . identifier[encode] ( literal[string] )
identifier[app_use_bytes] = identifier[app_use] . identifier[encode] ( literal[string] )
keyword[if] identifier[vol_expire_date] keyword[is] keyword[None] :
identifier[real_vol_expire_date] = literal[int]
keyword[else] :
identifier[real_vol_expire_date] = identifier[vol_expire_date]
identifier[self] . identifier[pvd] = identifier[headervd] . identifier[pvd_factory] ( identifier[sys_ident_bytes] , identifier[vol_ident_bytes] ,
identifier[set_size] , identifier[seqnum] , identifier[log_block_size] ,
identifier[vol_set_ident_bytes] , identifier[pub_ident_bytes] ,
identifier[preparer_ident_bytes] , identifier[app_ident_bytes] ,
identifier[copyright_file_bytes] ,
identifier[abstract_file_bytes] , identifier[bibli_file_bytes] ,
identifier[real_vol_expire_date] , identifier[app_use_bytes] , identifier[xa] )
identifier[self] . identifier[pvds] . identifier[append] ( identifier[self] . identifier[pvd] )
identifier[pvd_log_block_size] = identifier[self] . identifier[pvd] . identifier[logical_block_size] ()
identifier[num_bytes_to_add] = literal[int]
keyword[if] identifier[self] . identifier[interchange_level] == literal[int] :
identifier[self] . identifier[enhanced_vd] = identifier[headervd] . identifier[enhanced_vd_factory] ( identifier[sys_ident_bytes] ,
identifier[vol_ident_bytes] ,
identifier[set_size] , identifier[seqnum] ,
identifier[log_block_size] ,
identifier[vol_set_ident_bytes] ,
identifier[pub_ident_bytes] ,
identifier[preparer_ident_bytes] ,
identifier[app_ident_bytes] ,
identifier[copyright_file_bytes] ,
identifier[abstract_file_bytes] ,
identifier[bibli_file_bytes] ,
identifier[real_vol_expire_date] ,
identifier[app_use_bytes] , identifier[xa] )
identifier[self] . identifier[svds] . identifier[append] ( identifier[self] . identifier[enhanced_vd] )
identifier[num_bytes_to_add] += identifier[self] . identifier[enhanced_vd] . identifier[logical_block_size] ()
keyword[if] identifier[joliet] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[joliet_vd] = identifier[headervd] . identifier[joliet_vd_factory] ( identifier[joliet] , identifier[sys_ident_bytes] ,
identifier[vol_ident_bytes] , identifier[set_size] ,
identifier[seqnum] , identifier[log_block_size] ,
identifier[vol_set_ident_bytes] ,
identifier[pub_ident_bytes] ,
identifier[preparer_ident_bytes] ,
identifier[app_ident_bytes] ,
identifier[copyright_file_bytes] ,
identifier[abstract_file_bytes] ,
identifier[bibli_file_bytes] ,
identifier[real_vol_expire_date] ,
identifier[app_use_bytes] , identifier[xa] )
identifier[self] . identifier[svds] . identifier[append] ( identifier[self] . identifier[joliet_vd] )
identifier[num_bytes_to_add] += identifier[self] . identifier[joliet_vd] . identifier[logical_block_size] ()
identifier[self] . identifier[vdsts] . identifier[append] ( identifier[headervd] . identifier[vdst_factory] ())
identifier[num_bytes_to_add] += identifier[pvd_log_block_size]
keyword[if] identifier[udf] :
identifier[self] . identifier[_has_udf] = keyword[True]
identifier[self] . identifier[udf_bea] . identifier[new] ()
identifier[self] . identifier[udf_nsr] . identifier[new] ( literal[int] )
identifier[self] . identifier[udf_tea] . identifier[new] ()
identifier[num_bytes_to_add] += literal[int] * identifier[pvd_log_block_size]
identifier[self] . identifier[version_vd] = identifier[headervd] . identifier[version_vd_factory] ( identifier[pvd_log_block_size] )
identifier[num_bytes_to_add] += identifier[pvd_log_block_size]
keyword[if] identifier[udf] :
identifier[additional_extents] = literal[int] -( identifier[self] . identifier[pvd] . identifier[space_size] +( identifier[num_bytes_to_add] // identifier[pvd_log_block_size] ))
identifier[num_bytes_to_add] += identifier[additional_extents] * identifier[pvd_log_block_size]
identifier[self] . identifier[udf_main_descs] . identifier[pvd] . identifier[new] ()
identifier[self] . identifier[udf_main_descs] . identifier[impl_use] . identifier[new] ()
identifier[self] . identifier[udf_main_descs] . identifier[partition] . identifier[new] ()
identifier[self] . identifier[udf_main_descs] . identifier[logical_volume] . identifier[new] ()
identifier[self] . identifier[udf_main_descs] . identifier[unallocated_space] . identifier[new] ()
identifier[self] . identifier[udf_main_descs] . identifier[terminator] . identifier[new] ()
identifier[num_bytes_to_add] += literal[int] * identifier[pvd_log_block_size]
identifier[self] . identifier[udf_reserve_descs] . identifier[pvd] . identifier[new] ()
identifier[self] . identifier[udf_reserve_descs] . identifier[impl_use] . identifier[new] ()
identifier[self] . identifier[udf_reserve_descs] . identifier[partition] . identifier[new] ()
identifier[self] . identifier[udf_reserve_descs] . identifier[logical_volume] . identifier[new] ()
identifier[self] . identifier[udf_reserve_descs] . identifier[unallocated_space] . identifier[new] ()
identifier[self] . identifier[udf_reserve_descs] . identifier[terminator] . identifier[new] ()
identifier[num_bytes_to_add] += literal[int] * identifier[pvd_log_block_size]
identifier[self] . identifier[udf_logical_volume_integrity] . identifier[new] ()
identifier[self] . identifier[udf_logical_volume_integrity_terminator] . identifier[new] ()
identifier[num_bytes_to_add] += literal[int] * identifier[pvd_log_block_size]
identifier[anchor1] = identifier[udfmod] . identifier[UDFAnchorVolumeStructure] ()
identifier[anchor1] . identifier[new] ()
identifier[self] . identifier[udf_anchors] . identifier[append] ( identifier[anchor1] )
identifier[num_bytes_to_add] += identifier[pvd_log_block_size]
identifier[self] . identifier[udf_file_set] . identifier[new] ()
identifier[self] . identifier[udf_file_set_terminator] . identifier[new] ()
identifier[num_bytes_to_add] += literal[int] * identifier[pvd_log_block_size]
identifier[self] . identifier[udf_root] = identifier[udfmod] . identifier[UDFFileEntry] ()
identifier[self] . identifier[udf_root] . identifier[new] ( literal[int] , literal[string] , keyword[None] , identifier[pvd_log_block_size] )
identifier[num_bytes_to_add] += identifier[pvd_log_block_size]
identifier[parent] = identifier[udfmod] . identifier[UDFFileIdentifierDescriptor] ()
identifier[parent] . identifier[new] ( keyword[True] , keyword[True] , literal[string] , keyword[None] )
identifier[num_new_extents] = identifier[self] . identifier[udf_root] . identifier[add_file_ident_desc] ( identifier[parent] , identifier[pvd_log_block_size] )
identifier[num_bytes_to_add] += identifier[num_new_extents] * identifier[pvd_log_block_size]
identifier[num_partition_bytes_to_add] = literal[int]
identifier[ptr] = identifier[path_table_record] . identifier[PathTableRecord] ()
identifier[ptr] . identifier[new_root] ()
identifier[self] . identifier[pvd] . identifier[root_directory_record] (). identifier[set_ptr] ( identifier[ptr] )
identifier[num_partition_bytes_to_add] += literal[int] * identifier[pvd_log_block_size]
identifier[num_partition_bytes_to_add] += identifier[pvd_log_block_size]
identifier[self] . identifier[_create_dot] ( identifier[self] . identifier[pvd] , identifier[self] . identifier[pvd] . identifier[root_directory_record] (),
identifier[self] . identifier[rock_ridge] , identifier[self] . identifier[xa] , literal[int] )
identifier[self] . identifier[_create_dotdot] ( identifier[self] . identifier[pvd] , identifier[self] . identifier[pvd] . identifier[root_directory_record] (),
identifier[self] . identifier[rock_ridge] , keyword[False] , identifier[self] . identifier[xa] , literal[int] )
keyword[if] identifier[self] . identifier[joliet_vd] keyword[is] keyword[not] keyword[None] :
identifier[ptr] = identifier[path_table_record] . identifier[PathTableRecord] ()
identifier[ptr] . identifier[new_root] ()
identifier[self] . identifier[joliet_vd] . identifier[root_directory_record] (). identifier[set_ptr] ( identifier[ptr] )
identifier[num_partition_bytes_to_add] += literal[int] * identifier[pvd_log_block_size]
identifier[num_partition_bytes_to_add] += identifier[pvd_log_block_size]
identifier[self] . identifier[_create_dot] ( identifier[self] . identifier[joliet_vd] ,
identifier[self] . identifier[joliet_vd] . identifier[root_directory_record] (), literal[string] ,
keyword[False] ,- literal[int] )
identifier[self] . identifier[_create_dotdot] ( identifier[self] . identifier[joliet_vd] ,
identifier[self] . identifier[joliet_vd] . identifier[root_directory_record] (), literal[string] ,
keyword[False] , keyword[False] ,- literal[int] )
keyword[if] identifier[self] . identifier[rock_ridge] :
identifier[num_partition_bytes_to_add] += identifier[pvd_log_block_size]
keyword[if] identifier[udf] :
identifier[anchor2] = identifier[udfmod] . identifier[UDFAnchorVolumeStructure] ()
identifier[anchor2] . identifier[new] ()
identifier[self] . identifier[udf_anchors] . identifier[append] ( identifier[anchor2] )
identifier[num_partition_bytes_to_add] += identifier[pvd_log_block_size]
identifier[self] . identifier[_finish_add] ( identifier[num_bytes_to_add] , identifier[num_partition_bytes_to_add] )
identifier[self] . identifier[_initialized] = keyword[True] | def new(self, interchange_level=1, sys_ident='', vol_ident='', set_size=1, seqnum=1, log_block_size=2048, vol_set_ident=' ', pub_ident_str='', preparer_ident_str='', app_ident_str='', copyright_file='', abstract_file='', bibli_file='', vol_expire_date=None, app_use='', joliet=None, rock_ridge=None, xa=False, udf=None):
# type: (int, str, str, int, int, int, str, str, str, str, str, str, str, Optional[float], str, Optional[int], Optional[str], bool, Optional[str]) -> None
'\n Create a new ISO from scratch.\n\n Parameters:\n interchange_level - The ISO9660 interchange level to use; this dictates\n the rules on the names of files. Levels 1, 2, 3,\n and 4 are supported. Level 1 is the most\n conservative, and is the default, but level 3 is\n recommended.\n sys_ident - The system identification string to use on the new ISO.\n vol_ident - The volume identification string to use on the new ISO.\n set_size - The size of the set of ISOs this ISO is a part of.\n seqnum - The sequence number of the set of this ISO.\n log_block_size - The logical block size to use for the ISO. While ISO9660\n technically supports sizes other than 2048 (the default),\n this almost certainly doesn\'t work.\n vol_set_ident - The volume set identification string to use on the new ISO.\n pub_ident_str - The publisher identification string to use on the new ISO.\n preparer_ident_str - The preparer identification string to use on the new ISO.\n app_ident_str - The application identification string to use on the new ISO.\n copyright_file - The name of a file at the root of the ISO to use as the\n copyright file.\n abstract_file - The name of a file at the root of the ISO to use as the\n abstract file.\n bibli_file - The name of a file at the root of the ISO to use as the\n bibliographic file.\n vol_expire_date - The date that this ISO will expire at.\n app_use - Arbitrary data that the application can stuff into the primary\n volume descriptor of this ISO.\n joliet - A integer that can have the value 1, 2, or 3 for Joliet\n levels 1, 2, or 3 (3 is by far the most common), or None for\n no Joliet support (the default). For legacy reasons, this\n parameter also accepts a boolean, where the value of \'False\'\n means no Joliet and a value of \'True\' means level 3.\n rock_ridge - Whether to make this ISO have the Rock Ridge extensions or\n not. The default value of None does not add Rock Ridge\n extensions. A string value of \'1.09\', \'1.10\', or \'1.12\'\n adds the specified Rock Ridge version to the ISO. If\n unsure, pass \'1.09\' to ensure maximum compatibility.\n xa - Whether to add the ISO9660 Extended Attribute extensions to this\n ISO. The default is False.\n udf - Whether to add UDF support to this ISO. If it is None (the\n default), no UDF support is added. If it is "2.60", version 2.60\n of the UDF spec is used. All other values are disallowed.\n Returns:\n Nothing.\n '
# Start out with argument checking.
if self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object already has an ISO; either close it or create a new object') # depends on [control=['if'], data=[]]
if interchange_level < 1 or interchange_level > 4:
raise pycdlibexception.PyCdlibInvalidInput('Invalid interchange level (must be between 1 and 4)') # depends on [control=['if'], data=[]]
if rock_ridge and rock_ridge not in ['1.09', '1.10', '1.12']:
raise pycdlibexception.PyCdlibInvalidInput('Rock Ridge value must be None (no Rock Ridge), 1.09, 1.10, or 1.12') # depends on [control=['if'], data=[]]
if udf and udf != '2.60':
raise pycdlibexception.PyCdlibInvalidInput('UDF value must be empty (no UDF), or 2.60') # depends on [control=['if'], data=[]]
# Now save off the arguments we need to keep around.
if not app_ident_str:
app_ident_str = 'PyCdlib (C) 2015-2018 Chris Lalancette' # depends on [control=['if'], data=[]]
self.interchange_level = interchange_level
self.xa = xa
if isinstance(joliet, bool):
if joliet:
joliet = 3 # depends on [control=['if'], data=[]]
else:
joliet = None # depends on [control=['if'], data=[]]
if rock_ridge:
self.rock_ridge = rock_ridge # depends on [control=['if'], data=[]]
sys_ident_bytes = sys_ident.encode('utf-8')
vol_ident_bytes = vol_ident.encode('utf-8')
vol_set_ident_bytes = vol_set_ident.encode('utf-8')
pub_ident_bytes = pub_ident_str.encode('utf-8')
preparer_ident_bytes = preparer_ident_str.encode('utf-8')
app_ident_bytes = app_ident_str.encode('utf-8')
copyright_file_bytes = copyright_file.encode('utf-8')
abstract_file_bytes = abstract_file.encode('utf-8')
bibli_file_bytes = bibli_file.encode('utf-8')
app_use_bytes = app_use.encode('utf-8')
if vol_expire_date is None:
real_vol_expire_date = 0.0 # depends on [control=['if'], data=[]]
else:
real_vol_expire_date = vol_expire_date
# Now start creating the ISO.
self.pvd = headervd.pvd_factory(sys_ident_bytes, vol_ident_bytes, set_size, seqnum, log_block_size, vol_set_ident_bytes, pub_ident_bytes, preparer_ident_bytes, app_ident_bytes, copyright_file_bytes, abstract_file_bytes, bibli_file_bytes, real_vol_expire_date, app_use_bytes, xa)
self.pvds.append(self.pvd)
pvd_log_block_size = self.pvd.logical_block_size()
num_bytes_to_add = 0
if self.interchange_level == 4:
self.enhanced_vd = headervd.enhanced_vd_factory(sys_ident_bytes, vol_ident_bytes, set_size, seqnum, log_block_size, vol_set_ident_bytes, pub_ident_bytes, preparer_ident_bytes, app_ident_bytes, copyright_file_bytes, abstract_file_bytes, bibli_file_bytes, real_vol_expire_date, app_use_bytes, xa)
self.svds.append(self.enhanced_vd)
num_bytes_to_add += self.enhanced_vd.logical_block_size() # depends on [control=['if'], data=[]]
if joliet is not None:
self.joliet_vd = headervd.joliet_vd_factory(joliet, sys_ident_bytes, vol_ident_bytes, set_size, seqnum, log_block_size, vol_set_ident_bytes, pub_ident_bytes, preparer_ident_bytes, app_ident_bytes, copyright_file_bytes, abstract_file_bytes, bibli_file_bytes, real_vol_expire_date, app_use_bytes, xa)
self.svds.append(self.joliet_vd)
# Now that we have added joliet, we need to add the new space to the
# PVD for the VD itself.
num_bytes_to_add += self.joliet_vd.logical_block_size() # depends on [control=['if'], data=['joliet']]
self.vdsts.append(headervd.vdst_factory())
num_bytes_to_add += pvd_log_block_size
if udf:
self._has_udf = True
# Create the Bridge Recognition Volume Sequence
self.udf_bea.new()
self.udf_nsr.new(2)
self.udf_tea.new()
num_bytes_to_add += 3 * pvd_log_block_size # depends on [control=['if'], data=[]]
# We always create an empty version volume descriptor
self.version_vd = headervd.version_vd_factory(pvd_log_block_size)
num_bytes_to_add += pvd_log_block_size
if udf:
# We need to pad out to extent 32. The padding should be the
# distance between the current PVD space size and 32.
additional_extents = 32 - (self.pvd.space_size + num_bytes_to_add // pvd_log_block_size)
num_bytes_to_add += additional_extents * pvd_log_block_size
# Create the Main Volume Descriptor Sequence
self.udf_main_descs.pvd.new()
self.udf_main_descs.impl_use.new()
self.udf_main_descs.partition.new()
self.udf_main_descs.logical_volume.new()
self.udf_main_descs.unallocated_space.new()
self.udf_main_descs.terminator.new()
num_bytes_to_add += 16 * pvd_log_block_size
# Create the Reserve Volume Descriptor Sequence
self.udf_reserve_descs.pvd.new()
self.udf_reserve_descs.impl_use.new()
self.udf_reserve_descs.partition.new()
self.udf_reserve_descs.logical_volume.new()
self.udf_reserve_descs.unallocated_space.new()
self.udf_reserve_descs.terminator.new()
num_bytes_to_add += 16 * pvd_log_block_size
# Create the Logical Volume Integrity Sequence
self.udf_logical_volume_integrity.new()
self.udf_logical_volume_integrity_terminator.new()
num_bytes_to_add += 192 * pvd_log_block_size
# Create the Anchor
anchor1 = udfmod.UDFAnchorVolumeStructure()
anchor1.new()
self.udf_anchors.append(anchor1)
num_bytes_to_add += pvd_log_block_size
# Create the File Set
self.udf_file_set.new()
self.udf_file_set_terminator.new()
num_bytes_to_add += 2 * pvd_log_block_size
# Create the root directory, and the 'parent' entry inside.
self.udf_root = udfmod.UDFFileEntry()
self.udf_root.new(0, 'dir', None, pvd_log_block_size)
num_bytes_to_add += pvd_log_block_size
parent = udfmod.UDFFileIdentifierDescriptor()
parent.new(True, True, b'', None)
num_new_extents = self.udf_root.add_file_ident_desc(parent, pvd_log_block_size)
num_bytes_to_add += num_new_extents * pvd_log_block_size # depends on [control=['if'], data=[]]
num_partition_bytes_to_add = 0
# Create the PTR, and add the 4 extents that comprise of the LE PTR and
# BE PTR to the number of bytes to add.
ptr = path_table_record.PathTableRecord()
ptr.new_root()
self.pvd.root_directory_record().set_ptr(ptr)
num_partition_bytes_to_add += 4 * pvd_log_block_size
# Also add one extent to the size for the root directory record.
num_partition_bytes_to_add += pvd_log_block_size
self._create_dot(self.pvd, self.pvd.root_directory_record(), self.rock_ridge, self.xa, 16749)
self._create_dotdot(self.pvd, self.pvd.root_directory_record(), self.rock_ridge, False, self.xa, 16749)
if self.joliet_vd is not None:
# Create the PTR, and add the 4 extents that comprise of the LE PTR and
# BE PTR to the number of bytes to add.
ptr = path_table_record.PathTableRecord()
ptr.new_root()
self.joliet_vd.root_directory_record().set_ptr(ptr)
num_partition_bytes_to_add += 4 * pvd_log_block_size
# Also add one extent to the size for the root directory record.
num_partition_bytes_to_add += pvd_log_block_size
self._create_dot(self.joliet_vd, self.joliet_vd.root_directory_record(), '', False, -1)
self._create_dotdot(self.joliet_vd, self.joliet_vd.root_directory_record(), '', False, False, -1) # depends on [control=['if'], data=[]]
if self.rock_ridge:
num_partition_bytes_to_add += pvd_log_block_size # depends on [control=['if'], data=[]]
if udf:
anchor2 = udfmod.UDFAnchorVolumeStructure()
anchor2.new()
self.udf_anchors.append(anchor2)
num_partition_bytes_to_add += pvd_log_block_size # depends on [control=['if'], data=[]]
self._finish_add(num_bytes_to_add, num_partition_bytes_to_add)
self._initialized = True |
async def set_presence(self, status: str = "online", ignore_cache: bool = False):
"""
Set the online status of the user. See also: `API reference`_
Args:
status: The online status of the user. Allowed values: "online", "offline", "unavailable".
ignore_cache: Whether or not to set presence even if the cache says the presence is
already set to that value.
.. _API reference:
https://matrix.org/docs/spec/client_server/r0.3.0.html#put-matrix-client-r0-presence-userid-status
"""
await self.ensure_registered()
if not ignore_cache and self.state_store.has_presence(self.mxid, status):
return
content = {
"presence": status
}
resp = await self.client.request("PUT", f"/presence/{self.mxid}/status", content)
self.state_store.set_presence(self.mxid, status) | <ast.AsyncFunctionDef object at 0x7da1b0b30c10> | keyword[async] keyword[def] identifier[set_presence] ( identifier[self] , identifier[status] : identifier[str] = literal[string] , identifier[ignore_cache] : identifier[bool] = keyword[False] ):
literal[string]
keyword[await] identifier[self] . identifier[ensure_registered] ()
keyword[if] keyword[not] identifier[ignore_cache] keyword[and] identifier[self] . identifier[state_store] . identifier[has_presence] ( identifier[self] . identifier[mxid] , identifier[status] ):
keyword[return]
identifier[content] ={
literal[string] : identifier[status]
}
identifier[resp] = keyword[await] identifier[self] . identifier[client] . identifier[request] ( literal[string] , literal[string] , identifier[content] )
identifier[self] . identifier[state_store] . identifier[set_presence] ( identifier[self] . identifier[mxid] , identifier[status] ) | async def set_presence(self, status: str='online', ignore_cache: bool=False):
"""
Set the online status of the user. See also: `API reference`_
Args:
status: The online status of the user. Allowed values: "online", "offline", "unavailable".
ignore_cache: Whether or not to set presence even if the cache says the presence is
already set to that value.
.. _API reference:
https://matrix.org/docs/spec/client_server/r0.3.0.html#put-matrix-client-r0-presence-userid-status
"""
await self.ensure_registered()
if not ignore_cache and self.state_store.has_presence(self.mxid, status):
return # depends on [control=['if'], data=[]]
content = {'presence': status}
resp = await self.client.request('PUT', f'/presence/{self.mxid}/status', content)
self.state_store.set_presence(self.mxid, status) |
def delete_country_by_id(cls, country_id, **kwargs):
"""Delete Country
Delete an instance of Country by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_country_by_id(country_id, async=True)
>>> result = thread.get()
:param async bool
:param str country_id: ID of country to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_country_by_id_with_http_info(country_id, **kwargs)
else:
(data) = cls._delete_country_by_id_with_http_info(country_id, **kwargs)
return data | def function[delete_country_by_id, parameter[cls, country_id]]:
constant[Delete Country
Delete an instance of Country by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_country_by_id(country_id, async=True)
>>> result = thread.get()
:param async bool
:param str country_id: ID of country to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async]]] begin[:]
return[call[name[cls]._delete_country_by_id_with_http_info, parameter[name[country_id]]]] | keyword[def] identifier[delete_country_by_id] ( identifier[cls] , identifier[country_id] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[cls] . identifier[_delete_country_by_id_with_http_info] ( identifier[country_id] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[cls] . identifier[_delete_country_by_id_with_http_info] ( identifier[country_id] ,** identifier[kwargs] )
keyword[return] identifier[data] | def delete_country_by_id(cls, country_id, **kwargs):
"""Delete Country
Delete an instance of Country by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_country_by_id(country_id, async=True)
>>> result = thread.get()
:param async bool
:param str country_id: ID of country to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_country_by_id_with_http_info(country_id, **kwargs) # depends on [control=['if'], data=[]]
else:
data = cls._delete_country_by_id_with_http_info(country_id, **kwargs)
return data |
def parse(readDataInstance, nDebugEntries):
"""
Returns a new L{ImageDebugDirectories} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: A L{ReadData} object with data to be parsed as a L{ImageDebugDirectories} object.
@type nDebugEntries: int
@param nDebugEntries: Number of L{ImageDebugDirectory} objects in the C{readDataInstance} object.
@rtype: L{ImageDebugDirectories}
@return: A new L{ImageDebugDirectories} object.
@raise DataLengthException: If not enough data to read in the C{readDataInstance} object.
"""
dbgEntries = ImageDebugDirectories()
dataLength = len(readDataInstance)
toRead = nDebugEntries * consts.SIZEOF_IMAGE_DEBUG_ENTRY32
if dataLength >= toRead:
for i in range(nDebugEntries):
dbgEntry = ImageDebugDirectory.parse(readDataInstance)
dbgEntries.append(dbgEntry)
else:
raise excep.DataLengthException("Not enough bytes to read.")
return dbgEntries | def function[parse, parameter[readDataInstance, nDebugEntries]]:
constant[
Returns a new L{ImageDebugDirectories} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: A L{ReadData} object with data to be parsed as a L{ImageDebugDirectories} object.
@type nDebugEntries: int
@param nDebugEntries: Number of L{ImageDebugDirectory} objects in the C{readDataInstance} object.
@rtype: L{ImageDebugDirectories}
@return: A new L{ImageDebugDirectories} object.
@raise DataLengthException: If not enough data to read in the C{readDataInstance} object.
]
variable[dbgEntries] assign[=] call[name[ImageDebugDirectories], parameter[]]
variable[dataLength] assign[=] call[name[len], parameter[name[readDataInstance]]]
variable[toRead] assign[=] binary_operation[name[nDebugEntries] * name[consts].SIZEOF_IMAGE_DEBUG_ENTRY32]
if compare[name[dataLength] greater_or_equal[>=] name[toRead]] begin[:]
for taget[name[i]] in starred[call[name[range], parameter[name[nDebugEntries]]]] begin[:]
variable[dbgEntry] assign[=] call[name[ImageDebugDirectory].parse, parameter[name[readDataInstance]]]
call[name[dbgEntries].append, parameter[name[dbgEntry]]]
return[name[dbgEntries]] | keyword[def] identifier[parse] ( identifier[readDataInstance] , identifier[nDebugEntries] ):
literal[string]
identifier[dbgEntries] = identifier[ImageDebugDirectories] ()
identifier[dataLength] = identifier[len] ( identifier[readDataInstance] )
identifier[toRead] = identifier[nDebugEntries] * identifier[consts] . identifier[SIZEOF_IMAGE_DEBUG_ENTRY32]
keyword[if] identifier[dataLength] >= identifier[toRead] :
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[nDebugEntries] ):
identifier[dbgEntry] = identifier[ImageDebugDirectory] . identifier[parse] ( identifier[readDataInstance] )
identifier[dbgEntries] . identifier[append] ( identifier[dbgEntry] )
keyword[else] :
keyword[raise] identifier[excep] . identifier[DataLengthException] ( literal[string] )
keyword[return] identifier[dbgEntries] | def parse(readDataInstance, nDebugEntries):
"""
Returns a new L{ImageDebugDirectories} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: A L{ReadData} object with data to be parsed as a L{ImageDebugDirectories} object.
@type nDebugEntries: int
@param nDebugEntries: Number of L{ImageDebugDirectory} objects in the C{readDataInstance} object.
@rtype: L{ImageDebugDirectories}
@return: A new L{ImageDebugDirectories} object.
@raise DataLengthException: If not enough data to read in the C{readDataInstance} object.
"""
dbgEntries = ImageDebugDirectories()
dataLength = len(readDataInstance)
toRead = nDebugEntries * consts.SIZEOF_IMAGE_DEBUG_ENTRY32
if dataLength >= toRead:
for i in range(nDebugEntries):
dbgEntry = ImageDebugDirectory.parse(readDataInstance)
dbgEntries.append(dbgEntry) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
raise excep.DataLengthException('Not enough bytes to read.')
return dbgEntries |
def merge_dict(dict1, dict2):
# type: (dict, dict) -> dict
"""Recursively merge dictionaries: dict2 on to dict1. This differs
from dict.update() in that values that are dicts are recursively merged.
Note that only dict value types are merged, not lists, etc.
:param dict dict1: dictionary to merge to
:param dict dict2: dictionary to merge with
:rtype: dict
:return: merged dictionary
"""
if not isinstance(dict1, dict) or not isinstance(dict2, dict):
raise ValueError('dict1 or dict2 is not a dictionary')
result = copy.deepcopy(dict1)
for k, v in dict2.items():
if k in result and isinstance(result[k], dict):
result[k] = merge_dict(result[k], v)
else:
result[k] = copy.deepcopy(v)
return result | def function[merge_dict, parameter[dict1, dict2]]:
constant[Recursively merge dictionaries: dict2 on to dict1. This differs
from dict.update() in that values that are dicts are recursively merged.
Note that only dict value types are merged, not lists, etc.
:param dict dict1: dictionary to merge to
:param dict dict2: dictionary to merge with
:rtype: dict
:return: merged dictionary
]
if <ast.BoolOp object at 0x7da20c991780> begin[:]
<ast.Raise object at 0x7da20c991c30>
variable[result] assign[=] call[name[copy].deepcopy, parameter[name[dict1]]]
for taget[tuple[[<ast.Name object at 0x7da20c990c70>, <ast.Name object at 0x7da20c990b50>]]] in starred[call[name[dict2].items, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da20c9909a0> begin[:]
call[name[result]][name[k]] assign[=] call[name[merge_dict], parameter[call[name[result]][name[k]], name[v]]]
return[name[result]] | keyword[def] identifier[merge_dict] ( identifier[dict1] , identifier[dict2] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[dict1] , identifier[dict] ) keyword[or] keyword[not] identifier[isinstance] ( identifier[dict2] , identifier[dict] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[result] = identifier[copy] . identifier[deepcopy] ( identifier[dict1] )
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[dict2] . identifier[items] ():
keyword[if] identifier[k] keyword[in] identifier[result] keyword[and] identifier[isinstance] ( identifier[result] [ identifier[k] ], identifier[dict] ):
identifier[result] [ identifier[k] ]= identifier[merge_dict] ( identifier[result] [ identifier[k] ], identifier[v] )
keyword[else] :
identifier[result] [ identifier[k] ]= identifier[copy] . identifier[deepcopy] ( identifier[v] )
keyword[return] identifier[result] | def merge_dict(dict1, dict2):
# type: (dict, dict) -> dict
'Recursively merge dictionaries: dict2 on to dict1. This differs\n from dict.update() in that values that are dicts are recursively merged.\n Note that only dict value types are merged, not lists, etc.\n\n :param dict dict1: dictionary to merge to\n :param dict dict2: dictionary to merge with\n :rtype: dict\n :return: merged dictionary\n '
if not isinstance(dict1, dict) or not isinstance(dict2, dict):
raise ValueError('dict1 or dict2 is not a dictionary') # depends on [control=['if'], data=[]]
result = copy.deepcopy(dict1)
for (k, v) in dict2.items():
if k in result and isinstance(result[k], dict):
result[k] = merge_dict(result[k], v) # depends on [control=['if'], data=[]]
else:
result[k] = copy.deepcopy(v) # depends on [control=['for'], data=[]]
return result |
def _reset(self):
"""
Reset the state of mail object.
"""
log.debug("Reset all variables")
self._attachments = []
self._text_plain = []
self._text_html = []
self._defects = []
self._defects_categories = set()
self._has_defects = False | def function[_reset, parameter[self]]:
constant[
Reset the state of mail object.
]
call[name[log].debug, parameter[constant[Reset all variables]]]
name[self]._attachments assign[=] list[[]]
name[self]._text_plain assign[=] list[[]]
name[self]._text_html assign[=] list[[]]
name[self]._defects assign[=] list[[]]
name[self]._defects_categories assign[=] call[name[set], parameter[]]
name[self]._has_defects assign[=] constant[False] | keyword[def] identifier[_reset] ( identifier[self] ):
literal[string]
identifier[log] . identifier[debug] ( literal[string] )
identifier[self] . identifier[_attachments] =[]
identifier[self] . identifier[_text_plain] =[]
identifier[self] . identifier[_text_html] =[]
identifier[self] . identifier[_defects] =[]
identifier[self] . identifier[_defects_categories] = identifier[set] ()
identifier[self] . identifier[_has_defects] = keyword[False] | def _reset(self):
"""
Reset the state of mail object.
"""
log.debug('Reset all variables')
self._attachments = []
self._text_plain = []
self._text_html = []
self._defects = []
self._defects_categories = set()
self._has_defects = False |
def get(self, **kwargs):
"""
:param texteRecherche:
:param numAmend:
:param idArticle:
:param idAuteur:
:param idDossierLegislatif:
:param idExamen:
:param idExamens:
:param periodeParlementaire:
:param dateDebut:
:param dateFin:
:param rows:
:param start:
:param sort:
"""
params = self.default_params.copy()
params.update(kwargs)
start = time.time()
response = requests.get(self.base_url, params=params)
end = time.time()
LOGGER.debug(
'fetched amendements with search params: %s in %0.2f s',
params,
end - start
)
return parse_amendements_summary(response.url, response.json()) | def function[get, parameter[self]]:
constant[
:param texteRecherche:
:param numAmend:
:param idArticle:
:param idAuteur:
:param idDossierLegislatif:
:param idExamen:
:param idExamens:
:param periodeParlementaire:
:param dateDebut:
:param dateFin:
:param rows:
:param start:
:param sort:
]
variable[params] assign[=] call[name[self].default_params.copy, parameter[]]
call[name[params].update, parameter[name[kwargs]]]
variable[start] assign[=] call[name[time].time, parameter[]]
variable[response] assign[=] call[name[requests].get, parameter[name[self].base_url]]
variable[end] assign[=] call[name[time].time, parameter[]]
call[name[LOGGER].debug, parameter[constant[fetched amendements with search params: %s in %0.2f s], name[params], binary_operation[name[end] - name[start]]]]
return[call[name[parse_amendements_summary], parameter[name[response].url, call[name[response].json, parameter[]]]]] | keyword[def] identifier[get] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[params] = identifier[self] . identifier[default_params] . identifier[copy] ()
identifier[params] . identifier[update] ( identifier[kwargs] )
identifier[start] = identifier[time] . identifier[time] ()
identifier[response] = identifier[requests] . identifier[get] ( identifier[self] . identifier[base_url] , identifier[params] = identifier[params] )
identifier[end] = identifier[time] . identifier[time] ()
identifier[LOGGER] . identifier[debug] (
literal[string] ,
identifier[params] ,
identifier[end] - identifier[start]
)
keyword[return] identifier[parse_amendements_summary] ( identifier[response] . identifier[url] , identifier[response] . identifier[json] ()) | def get(self, **kwargs):
"""
:param texteRecherche:
:param numAmend:
:param idArticle:
:param idAuteur:
:param idDossierLegislatif:
:param idExamen:
:param idExamens:
:param periodeParlementaire:
:param dateDebut:
:param dateFin:
:param rows:
:param start:
:param sort:
"""
params = self.default_params.copy()
params.update(kwargs)
start = time.time()
response = requests.get(self.base_url, params=params)
end = time.time()
LOGGER.debug('fetched amendements with search params: %s in %0.2f s', params, end - start)
return parse_amendements_summary(response.url, response.json()) |
def fromMessage(cls, message):
"""Generate a ServerError instance, extracting the error text
and the error code from the message."""
error_text = message.getArg(
OPENID_NS, 'error', '<no error message supplied>')
error_code = message.getArg(OPENID_NS, 'error_code')
return cls(error_text, error_code, message) | def function[fromMessage, parameter[cls, message]]:
constant[Generate a ServerError instance, extracting the error text
and the error code from the message.]
variable[error_text] assign[=] call[name[message].getArg, parameter[name[OPENID_NS], constant[error], constant[<no error message supplied>]]]
variable[error_code] assign[=] call[name[message].getArg, parameter[name[OPENID_NS], constant[error_code]]]
return[call[name[cls], parameter[name[error_text], name[error_code], name[message]]]] | keyword[def] identifier[fromMessage] ( identifier[cls] , identifier[message] ):
literal[string]
identifier[error_text] = identifier[message] . identifier[getArg] (
identifier[OPENID_NS] , literal[string] , literal[string] )
identifier[error_code] = identifier[message] . identifier[getArg] ( identifier[OPENID_NS] , literal[string] )
keyword[return] identifier[cls] ( identifier[error_text] , identifier[error_code] , identifier[message] ) | def fromMessage(cls, message):
"""Generate a ServerError instance, extracting the error text
and the error code from the message."""
error_text = message.getArg(OPENID_NS, 'error', '<no error message supplied>')
error_code = message.getArg(OPENID_NS, 'error_code')
return cls(error_text, error_code, message) |
def is_bootstrapped(metadata):
"""Return True if cihai is correctly bootstrapped."""
fields = UNIHAN_FIELDS + DEFAULT_COLUMNS
if TABLE_NAME in metadata.tables.keys():
table = metadata.tables[TABLE_NAME]
if set(fields) == set(c.name for c in table.columns):
return True
else:
return False
else:
return False | def function[is_bootstrapped, parameter[metadata]]:
constant[Return True if cihai is correctly bootstrapped.]
variable[fields] assign[=] binary_operation[name[UNIHAN_FIELDS] + name[DEFAULT_COLUMNS]]
if compare[name[TABLE_NAME] in call[name[metadata].tables.keys, parameter[]]] begin[:]
variable[table] assign[=] call[name[metadata].tables][name[TABLE_NAME]]
if compare[call[name[set], parameter[name[fields]]] equal[==] call[name[set], parameter[<ast.GeneratorExp object at 0x7da1b19092a0>]]] begin[:]
return[constant[True]] | keyword[def] identifier[is_bootstrapped] ( identifier[metadata] ):
literal[string]
identifier[fields] = identifier[UNIHAN_FIELDS] + identifier[DEFAULT_COLUMNS]
keyword[if] identifier[TABLE_NAME] keyword[in] identifier[metadata] . identifier[tables] . identifier[keys] ():
identifier[table] = identifier[metadata] . identifier[tables] [ identifier[TABLE_NAME] ]
keyword[if] identifier[set] ( identifier[fields] )== identifier[set] ( identifier[c] . identifier[name] keyword[for] identifier[c] keyword[in] identifier[table] . identifier[columns] ):
keyword[return] keyword[True]
keyword[else] :
keyword[return] keyword[False]
keyword[else] :
keyword[return] keyword[False] | def is_bootstrapped(metadata):
"""Return True if cihai is correctly bootstrapped."""
fields = UNIHAN_FIELDS + DEFAULT_COLUMNS
if TABLE_NAME in metadata.tables.keys():
table = metadata.tables[TABLE_NAME]
if set(fields) == set((c.name for c in table.columns)):
return True # depends on [control=['if'], data=[]]
else:
return False # depends on [control=['if'], data=['TABLE_NAME']]
else:
return False |
def do_set(self, line):
"""set [parameter [value]] set (without parameters): Display the value of all
session variables.
set <session variable>: Display the value of a single session variable. set
<session variable> <value>: Set the value of a session variable.
"""
session_parameter, value = self._split_args(line, 0, 2)
if value is None:
self._command_processor.get_session().print_variable(session_parameter)
else:
self._command_processor.get_session().set_with_conversion(
session_parameter, value
)
self._print_info_if_verbose(
'Set session variable {} to "{}"'.format(session_parameter, value)
) | def function[do_set, parameter[self, line]]:
constant[set [parameter [value]] set (without parameters): Display the value of all
session variables.
set <session variable>: Display the value of a single session variable. set
<session variable> <value>: Set the value of a session variable.
]
<ast.Tuple object at 0x7da20c992350> assign[=] call[name[self]._split_args, parameter[name[line], constant[0], constant[2]]]
if compare[name[value] is constant[None]] begin[:]
call[call[name[self]._command_processor.get_session, parameter[]].print_variable, parameter[name[session_parameter]]] | keyword[def] identifier[do_set] ( identifier[self] , identifier[line] ):
literal[string]
identifier[session_parameter] , identifier[value] = identifier[self] . identifier[_split_args] ( identifier[line] , literal[int] , literal[int] )
keyword[if] identifier[value] keyword[is] keyword[None] :
identifier[self] . identifier[_command_processor] . identifier[get_session] (). identifier[print_variable] ( identifier[session_parameter] )
keyword[else] :
identifier[self] . identifier[_command_processor] . identifier[get_session] (). identifier[set_with_conversion] (
identifier[session_parameter] , identifier[value]
)
identifier[self] . identifier[_print_info_if_verbose] (
literal[string] . identifier[format] ( identifier[session_parameter] , identifier[value] )
) | def do_set(self, line):
"""set [parameter [value]] set (without parameters): Display the value of all
session variables.
set <session variable>: Display the value of a single session variable. set
<session variable> <value>: Set the value of a session variable.
"""
(session_parameter, value) = self._split_args(line, 0, 2)
if value is None:
self._command_processor.get_session().print_variable(session_parameter) # depends on [control=['if'], data=[]]
else:
self._command_processor.get_session().set_with_conversion(session_parameter, value)
self._print_info_if_verbose('Set session variable {} to "{}"'.format(session_parameter, value)) |
def getApplicationsTransitionStateNameFromEnum(self, state):
"""Returns a string for an application transition state"""
fn = self.function_table.getApplicationsTransitionStateNameFromEnum
result = fn(state)
return result | def function[getApplicationsTransitionStateNameFromEnum, parameter[self, state]]:
constant[Returns a string for an application transition state]
variable[fn] assign[=] name[self].function_table.getApplicationsTransitionStateNameFromEnum
variable[result] assign[=] call[name[fn], parameter[name[state]]]
return[name[result]] | keyword[def] identifier[getApplicationsTransitionStateNameFromEnum] ( identifier[self] , identifier[state] ):
literal[string]
identifier[fn] = identifier[self] . identifier[function_table] . identifier[getApplicationsTransitionStateNameFromEnum]
identifier[result] = identifier[fn] ( identifier[state] )
keyword[return] identifier[result] | def getApplicationsTransitionStateNameFromEnum(self, state):
"""Returns a string for an application transition state"""
fn = self.function_table.getApplicationsTransitionStateNameFromEnum
result = fn(state)
return result |
def get_account(self, headers=None, prefix=None, delimiter=None,
marker=None, end_marker=None, limit=None, query=None,
cdn=False, decode_json=True):
"""
GETs the account and returns the results. This is done to list
the containers for the account. Some useful headers are also
returned:
=========================== =================================
x-account-bytes-used Object storage used for the
account, in bytes.
x-account-container-count The number of containers in the
account.
x-account-object-count The number of objects in the
account.
=========================== =================================
Also, any user headers beginning with x-account-meta- are
returned.
These values can be delayed depending the Swift cluster.
:param headers: Additional headers to send with the request.
:param prefix: The prefix container names must match to be
listed.
:param delimiter: The delimiter for the listing. Delimiters
indicate how far to progress through container names
before "rolling them up". For instance, a delimiter='.'
query on an account with the containers::
one.one
one.two
two
three.one
would return the JSON value of::
[{'subdir': 'one.'},
{'count': 0, 'bytes': 0, 'name': 'two'},
{'subdir': 'three.'}]
Using this with prefix can allow you to traverse a psuedo
hierarchy.
:param marker: Only container names after this marker will be
returned. Swift returns a limited number of containers
per request (often 10,000). To get the next batch of
names, you issue another query with the marker set to the
last name you received. You can continue to issue
requests until you receive no more names.
:param end_marker: Only container names before this marker will be
returned.
:param limit: Limits the size of the list returned per
request. The default and maximum depends on the Swift
cluster (usually 10,000).
:param query: Set to a dict of query values to send on the
query string of the request.
:param cdn: If set True, the CDN management interface will be
used.
:param decode_json: If set False, the usual decoding of the
JSON response will be skipped and the raw contents will
be returned instead.
:returns: A tuple of (status, reason, headers, contents).
:status: is an int for the HTTP status code.
:reason: is the str for the HTTP status (ex: "Ok").
:headers: is a dict with all lowercase keys of the HTTP
headers; if a header has multiple values, it will be a
list.
:contents: is the decoded JSON response or the raw str
for the HTTP body.
"""
query = dict(query or {})
query['format'] = 'json'
if prefix:
query['prefix'] = prefix
if delimiter:
query['delimiter'] = delimiter
if marker:
query['marker'] = marker
if end_marker:
query['end_marker'] = end_marker
if limit:
query['limit'] = limit
return self.request(
'GET', '', '', headers, decode_json=decode_json, query=query,
cdn=cdn) | def function[get_account, parameter[self, headers, prefix, delimiter, marker, end_marker, limit, query, cdn, decode_json]]:
constant[
GETs the account and returns the results. This is done to list
the containers for the account. Some useful headers are also
returned:
=========================== =================================
x-account-bytes-used Object storage used for the
account, in bytes.
x-account-container-count The number of containers in the
account.
x-account-object-count The number of objects in the
account.
=========================== =================================
Also, any user headers beginning with x-account-meta- are
returned.
These values can be delayed depending the Swift cluster.
:param headers: Additional headers to send with the request.
:param prefix: The prefix container names must match to be
listed.
:param delimiter: The delimiter for the listing. Delimiters
indicate how far to progress through container names
before "rolling them up". For instance, a delimiter='.'
query on an account with the containers::
one.one
one.two
two
three.one
would return the JSON value of::
[{'subdir': 'one.'},
{'count': 0, 'bytes': 0, 'name': 'two'},
{'subdir': 'three.'}]
Using this with prefix can allow you to traverse a psuedo
hierarchy.
:param marker: Only container names after this marker will be
returned. Swift returns a limited number of containers
per request (often 10,000). To get the next batch of
names, you issue another query with the marker set to the
last name you received. You can continue to issue
requests until you receive no more names.
:param end_marker: Only container names before this marker will be
returned.
:param limit: Limits the size of the list returned per
request. The default and maximum depends on the Swift
cluster (usually 10,000).
:param query: Set to a dict of query values to send on the
query string of the request.
:param cdn: If set True, the CDN management interface will be
used.
:param decode_json: If set False, the usual decoding of the
JSON response will be skipped and the raw contents will
be returned instead.
:returns: A tuple of (status, reason, headers, contents).
:status: is an int for the HTTP status code.
:reason: is the str for the HTTP status (ex: "Ok").
:headers: is a dict with all lowercase keys of the HTTP
headers; if a header has multiple values, it will be a
list.
:contents: is the decoded JSON response or the raw str
for the HTTP body.
]
variable[query] assign[=] call[name[dict], parameter[<ast.BoolOp object at 0x7da20c796860>]]
call[name[query]][constant[format]] assign[=] constant[json]
if name[prefix] begin[:]
call[name[query]][constant[prefix]] assign[=] name[prefix]
if name[delimiter] begin[:]
call[name[query]][constant[delimiter]] assign[=] name[delimiter]
if name[marker] begin[:]
call[name[query]][constant[marker]] assign[=] name[marker]
if name[end_marker] begin[:]
call[name[query]][constant[end_marker]] assign[=] name[end_marker]
if name[limit] begin[:]
call[name[query]][constant[limit]] assign[=] name[limit]
return[call[name[self].request, parameter[constant[GET], constant[], constant[], name[headers]]]] | keyword[def] identifier[get_account] ( identifier[self] , identifier[headers] = keyword[None] , identifier[prefix] = keyword[None] , identifier[delimiter] = keyword[None] ,
identifier[marker] = keyword[None] , identifier[end_marker] = keyword[None] , identifier[limit] = keyword[None] , identifier[query] = keyword[None] ,
identifier[cdn] = keyword[False] , identifier[decode_json] = keyword[True] ):
literal[string]
identifier[query] = identifier[dict] ( identifier[query] keyword[or] {})
identifier[query] [ literal[string] ]= literal[string]
keyword[if] identifier[prefix] :
identifier[query] [ literal[string] ]= identifier[prefix]
keyword[if] identifier[delimiter] :
identifier[query] [ literal[string] ]= identifier[delimiter]
keyword[if] identifier[marker] :
identifier[query] [ literal[string] ]= identifier[marker]
keyword[if] identifier[end_marker] :
identifier[query] [ literal[string] ]= identifier[end_marker]
keyword[if] identifier[limit] :
identifier[query] [ literal[string] ]= identifier[limit]
keyword[return] identifier[self] . identifier[request] (
literal[string] , literal[string] , literal[string] , identifier[headers] , identifier[decode_json] = identifier[decode_json] , identifier[query] = identifier[query] ,
identifier[cdn] = identifier[cdn] ) | def get_account(self, headers=None, prefix=None, delimiter=None, marker=None, end_marker=None, limit=None, query=None, cdn=False, decode_json=True):
"""
GETs the account and returns the results. This is done to list
the containers for the account. Some useful headers are also
returned:
=========================== =================================
x-account-bytes-used Object storage used for the
account, in bytes.
x-account-container-count The number of containers in the
account.
x-account-object-count The number of objects in the
account.
=========================== =================================
Also, any user headers beginning with x-account-meta- are
returned.
These values can be delayed depending the Swift cluster.
:param headers: Additional headers to send with the request.
:param prefix: The prefix container names must match to be
listed.
:param delimiter: The delimiter for the listing. Delimiters
indicate how far to progress through container names
before "rolling them up". For instance, a delimiter='.'
query on an account with the containers::
one.one
one.two
two
three.one
would return the JSON value of::
[{'subdir': 'one.'},
{'count': 0, 'bytes': 0, 'name': 'two'},
{'subdir': 'three.'}]
Using this with prefix can allow you to traverse a psuedo
hierarchy.
:param marker: Only container names after this marker will be
returned. Swift returns a limited number of containers
per request (often 10,000). To get the next batch of
names, you issue another query with the marker set to the
last name you received. You can continue to issue
requests until you receive no more names.
:param end_marker: Only container names before this marker will be
returned.
:param limit: Limits the size of the list returned per
request. The default and maximum depends on the Swift
cluster (usually 10,000).
:param query: Set to a dict of query values to send on the
query string of the request.
:param cdn: If set True, the CDN management interface will be
used.
:param decode_json: If set False, the usual decoding of the
JSON response will be skipped and the raw contents will
be returned instead.
:returns: A tuple of (status, reason, headers, contents).
:status: is an int for the HTTP status code.
:reason: is the str for the HTTP status (ex: "Ok").
:headers: is a dict with all lowercase keys of the HTTP
headers; if a header has multiple values, it will be a
list.
:contents: is the decoded JSON response or the raw str
for the HTTP body.
"""
query = dict(query or {})
query['format'] = 'json'
if prefix:
query['prefix'] = prefix # depends on [control=['if'], data=[]]
if delimiter:
query['delimiter'] = delimiter # depends on [control=['if'], data=[]]
if marker:
query['marker'] = marker # depends on [control=['if'], data=[]]
if end_marker:
query['end_marker'] = end_marker # depends on [control=['if'], data=[]]
if limit:
query['limit'] = limit # depends on [control=['if'], data=[]]
return self.request('GET', '', '', headers, decode_json=decode_json, query=query, cdn=cdn) |
def xml_marshal_complete_multipart_upload(uploaded_parts):
"""
Marshal's complete multipart upload request based on *uploaded_parts*.
:param uploaded_parts: List of all uploaded parts, ordered by part number.
:return: Marshalled XML data.
"""
root = s3_xml.Element('CompleteMultipartUpload', {'xmlns': _S3_NAMESPACE})
for uploaded_part in uploaded_parts:
part_number = uploaded_part.part_number
part = s3_xml.SubElement(root, 'Part')
part_num = s3_xml.SubElement(part, 'PartNumber')
part_num.text = str(part_number)
etag = s3_xml.SubElement(part, 'ETag')
etag.text = '"' + uploaded_part.etag + '"'
data = io.BytesIO()
s3_xml.ElementTree(root).write(data, encoding=None,
xml_declaration=False)
return data.getvalue() | def function[xml_marshal_complete_multipart_upload, parameter[uploaded_parts]]:
constant[
Marshal's complete multipart upload request based on *uploaded_parts*.
:param uploaded_parts: List of all uploaded parts, ordered by part number.
:return: Marshalled XML data.
]
variable[root] assign[=] call[name[s3_xml].Element, parameter[constant[CompleteMultipartUpload], dictionary[[<ast.Constant object at 0x7da1b1e6acb0>], [<ast.Name object at 0x7da1b1e690f0>]]]]
for taget[name[uploaded_part]] in starred[name[uploaded_parts]] begin[:]
variable[part_number] assign[=] name[uploaded_part].part_number
variable[part] assign[=] call[name[s3_xml].SubElement, parameter[name[root], constant[Part]]]
variable[part_num] assign[=] call[name[s3_xml].SubElement, parameter[name[part], constant[PartNumber]]]
name[part_num].text assign[=] call[name[str], parameter[name[part_number]]]
variable[etag] assign[=] call[name[s3_xml].SubElement, parameter[name[part], constant[ETag]]]
name[etag].text assign[=] binary_operation[binary_operation[constant["] + name[uploaded_part].etag] + constant["]]
variable[data] assign[=] call[name[io].BytesIO, parameter[]]
call[call[name[s3_xml].ElementTree, parameter[name[root]]].write, parameter[name[data]]]
return[call[name[data].getvalue, parameter[]]] | keyword[def] identifier[xml_marshal_complete_multipart_upload] ( identifier[uploaded_parts] ):
literal[string]
identifier[root] = identifier[s3_xml] . identifier[Element] ( literal[string] ,{ literal[string] : identifier[_S3_NAMESPACE] })
keyword[for] identifier[uploaded_part] keyword[in] identifier[uploaded_parts] :
identifier[part_number] = identifier[uploaded_part] . identifier[part_number]
identifier[part] = identifier[s3_xml] . identifier[SubElement] ( identifier[root] , literal[string] )
identifier[part_num] = identifier[s3_xml] . identifier[SubElement] ( identifier[part] , literal[string] )
identifier[part_num] . identifier[text] = identifier[str] ( identifier[part_number] )
identifier[etag] = identifier[s3_xml] . identifier[SubElement] ( identifier[part] , literal[string] )
identifier[etag] . identifier[text] = literal[string] + identifier[uploaded_part] . identifier[etag] + literal[string]
identifier[data] = identifier[io] . identifier[BytesIO] ()
identifier[s3_xml] . identifier[ElementTree] ( identifier[root] ). identifier[write] ( identifier[data] , identifier[encoding] = keyword[None] ,
identifier[xml_declaration] = keyword[False] )
keyword[return] identifier[data] . identifier[getvalue] () | def xml_marshal_complete_multipart_upload(uploaded_parts):
"""
Marshal's complete multipart upload request based on *uploaded_parts*.
:param uploaded_parts: List of all uploaded parts, ordered by part number.
:return: Marshalled XML data.
"""
root = s3_xml.Element('CompleteMultipartUpload', {'xmlns': _S3_NAMESPACE})
for uploaded_part in uploaded_parts:
part_number = uploaded_part.part_number
part = s3_xml.SubElement(root, 'Part')
part_num = s3_xml.SubElement(part, 'PartNumber')
part_num.text = str(part_number)
etag = s3_xml.SubElement(part, 'ETag')
etag.text = '"' + uploaded_part.etag + '"'
data = io.BytesIO()
s3_xml.ElementTree(root).write(data, encoding=None, xml_declaration=False) # depends on [control=['for'], data=['uploaded_part']]
return data.getvalue() |
def drop_account(self, account_cookie):
"""删除一个account
Arguments:
account_cookie {[type]} -- [description]
Raises:
RuntimeError -- [description]
"""
if account_cookie in self.account_list:
res = self.account_list.remove(account_cookie)
self.cash.append(
self.cash[-1] + self.get_account_by_cookie(res).init_cash)
return True
else:
raise RuntimeError(
'account {} is not in the portfolio'.format(account_cookie)
) | def function[drop_account, parameter[self, account_cookie]]:
constant[删除一个account
Arguments:
account_cookie {[type]} -- [description]
Raises:
RuntimeError -- [description]
]
if compare[name[account_cookie] in name[self].account_list] begin[:]
variable[res] assign[=] call[name[self].account_list.remove, parameter[name[account_cookie]]]
call[name[self].cash.append, parameter[binary_operation[call[name[self].cash][<ast.UnaryOp object at 0x7da1b1f77bb0>] + call[name[self].get_account_by_cookie, parameter[name[res]]].init_cash]]]
return[constant[True]] | keyword[def] identifier[drop_account] ( identifier[self] , identifier[account_cookie] ):
literal[string]
keyword[if] identifier[account_cookie] keyword[in] identifier[self] . identifier[account_list] :
identifier[res] = identifier[self] . identifier[account_list] . identifier[remove] ( identifier[account_cookie] )
identifier[self] . identifier[cash] . identifier[append] (
identifier[self] . identifier[cash] [- literal[int] ]+ identifier[self] . identifier[get_account_by_cookie] ( identifier[res] ). identifier[init_cash] )
keyword[return] keyword[True]
keyword[else] :
keyword[raise] identifier[RuntimeError] (
literal[string] . identifier[format] ( identifier[account_cookie] )
) | def drop_account(self, account_cookie):
"""删除一个account
Arguments:
account_cookie {[type]} -- [description]
Raises:
RuntimeError -- [description]
"""
if account_cookie in self.account_list:
res = self.account_list.remove(account_cookie)
self.cash.append(self.cash[-1] + self.get_account_by_cookie(res).init_cash)
return True # depends on [control=['if'], data=['account_cookie']]
else:
raise RuntimeError('account {} is not in the portfolio'.format(account_cookie)) |
def connect_edges(graph):
"""
Given a Graph element containing abstract edges compute edge
segments directly connecting the source and target nodes. This
operation just uses internal HoloViews operations and will be a
lot slower than the pandas equivalent.
"""
paths = []
for start, end in graph.array(graph.kdims):
start_ds = graph.nodes[:, :, start]
end_ds = graph.nodes[:, :, end]
if not len(start_ds) or not len(end_ds):
raise ValueError('Could not find node positions for all edges')
start = start_ds.array(start_ds.kdims[:2])
end = end_ds.array(end_ds.kdims[:2])
paths.append(np.array([start[0], end[0]]))
return paths | def function[connect_edges, parameter[graph]]:
constant[
Given a Graph element containing abstract edges compute edge
segments directly connecting the source and target nodes. This
operation just uses internal HoloViews operations and will be a
lot slower than the pandas equivalent.
]
variable[paths] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da18dc058a0>, <ast.Name object at 0x7da18dc045e0>]]] in starred[call[name[graph].array, parameter[name[graph].kdims]]] begin[:]
variable[start_ds] assign[=] call[name[graph].nodes][tuple[[<ast.Slice object at 0x7da18dc069b0>, <ast.Slice object at 0x7da18dc05e70>, <ast.Name object at 0x7da18dc07d60>]]]
variable[end_ds] assign[=] call[name[graph].nodes][tuple[[<ast.Slice object at 0x7da18dc047c0>, <ast.Slice object at 0x7da18dc05ea0>, <ast.Name object at 0x7da18dc04d00>]]]
if <ast.BoolOp object at 0x7da18dc07550> begin[:]
<ast.Raise object at 0x7da18dc07160>
variable[start] assign[=] call[name[start_ds].array, parameter[call[name[start_ds].kdims][<ast.Slice object at 0x7da18dc04160>]]]
variable[end] assign[=] call[name[end_ds].array, parameter[call[name[end_ds].kdims][<ast.Slice object at 0x7da18dc06080>]]]
call[name[paths].append, parameter[call[name[np].array, parameter[list[[<ast.Subscript object at 0x7da18dc079a0>, <ast.Subscript object at 0x7da18dc04910>]]]]]]
return[name[paths]] | keyword[def] identifier[connect_edges] ( identifier[graph] ):
literal[string]
identifier[paths] =[]
keyword[for] identifier[start] , identifier[end] keyword[in] identifier[graph] . identifier[array] ( identifier[graph] . identifier[kdims] ):
identifier[start_ds] = identifier[graph] . identifier[nodes] [:,:, identifier[start] ]
identifier[end_ds] = identifier[graph] . identifier[nodes] [:,:, identifier[end] ]
keyword[if] keyword[not] identifier[len] ( identifier[start_ds] ) keyword[or] keyword[not] identifier[len] ( identifier[end_ds] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[start] = identifier[start_ds] . identifier[array] ( identifier[start_ds] . identifier[kdims] [: literal[int] ])
identifier[end] = identifier[end_ds] . identifier[array] ( identifier[end_ds] . identifier[kdims] [: literal[int] ])
identifier[paths] . identifier[append] ( identifier[np] . identifier[array] ([ identifier[start] [ literal[int] ], identifier[end] [ literal[int] ]]))
keyword[return] identifier[paths] | def connect_edges(graph):
"""
Given a Graph element containing abstract edges compute edge
segments directly connecting the source and target nodes. This
operation just uses internal HoloViews operations and will be a
lot slower than the pandas equivalent.
"""
paths = []
for (start, end) in graph.array(graph.kdims):
start_ds = graph.nodes[:, :, start]
end_ds = graph.nodes[:, :, end]
if not len(start_ds) or not len(end_ds):
raise ValueError('Could not find node positions for all edges') # depends on [control=['if'], data=[]]
start = start_ds.array(start_ds.kdims[:2])
end = end_ds.array(end_ds.kdims[:2])
paths.append(np.array([start[0], end[0]])) # depends on [control=['for'], data=[]]
return paths |
def _setup_variant_regions(data, out_dir):
"""Ensure we have variant regions for calling, using transcript if not present.
Respects noalt_calling by removing additional contigs to improve
speeds.
"""
vr_file = dd.get_variant_regions(data)
if not vr_file:
vr_file = regions.get_sv_bed(data, "transcripts", out_dir=out_dir)
contigs = set([c.name for c in ref.file_contigs(dd.get_ref_file(data))])
out_file = os.path.join(utils.safe_makedir(os.path.join(dd.get_work_dir(data), "bedprep")),
"%s-rnaseq_clean.bed" % utils.splitext_plus(os.path.basename(vr_file))[0])
if not utils.file_uptodate(out_file, vr_file):
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
with shared.bedtools_tmpdir(data):
for r in pybedtools.BedTool(vr_file):
if r.chrom in contigs:
if chromhacks.is_nonalt(r.chrom):
out_handle.write(str(r))
data = dd.set_variant_regions(data, out_file)
return data | def function[_setup_variant_regions, parameter[data, out_dir]]:
constant[Ensure we have variant regions for calling, using transcript if not present.
Respects noalt_calling by removing additional contigs to improve
speeds.
]
variable[vr_file] assign[=] call[name[dd].get_variant_regions, parameter[name[data]]]
if <ast.UnaryOp object at 0x7da1b18ab310> begin[:]
variable[vr_file] assign[=] call[name[regions].get_sv_bed, parameter[name[data], constant[transcripts]]]
variable[contigs] assign[=] call[name[set], parameter[<ast.ListComp object at 0x7da1b18a89a0>]]
variable[out_file] assign[=] call[name[os].path.join, parameter[call[name[utils].safe_makedir, parameter[call[name[os].path.join, parameter[call[name[dd].get_work_dir, parameter[name[data]]], constant[bedprep]]]]], binary_operation[constant[%s-rnaseq_clean.bed] <ast.Mod object at 0x7da2590d6920> call[call[name[utils].splitext_plus, parameter[call[name[os].path.basename, parameter[name[vr_file]]]]]][constant[0]]]]]
if <ast.UnaryOp object at 0x7da1b18aae90> begin[:]
with call[name[file_transaction], parameter[name[data], name[out_file]]] begin[:]
with call[name[open], parameter[name[tx_out_file], constant[w]]] begin[:]
with call[name[shared].bedtools_tmpdir, parameter[name[data]]] begin[:]
for taget[name[r]] in starred[call[name[pybedtools].BedTool, parameter[name[vr_file]]]] begin[:]
if compare[name[r].chrom in name[contigs]] begin[:]
if call[name[chromhacks].is_nonalt, parameter[name[r].chrom]] begin[:]
call[name[out_handle].write, parameter[call[name[str], parameter[name[r]]]]]
variable[data] assign[=] call[name[dd].set_variant_regions, parameter[name[data], name[out_file]]]
return[name[data]] | keyword[def] identifier[_setup_variant_regions] ( identifier[data] , identifier[out_dir] ):
literal[string]
identifier[vr_file] = identifier[dd] . identifier[get_variant_regions] ( identifier[data] )
keyword[if] keyword[not] identifier[vr_file] :
identifier[vr_file] = identifier[regions] . identifier[get_sv_bed] ( identifier[data] , literal[string] , identifier[out_dir] = identifier[out_dir] )
identifier[contigs] = identifier[set] ([ identifier[c] . identifier[name] keyword[for] identifier[c] keyword[in] identifier[ref] . identifier[file_contigs] ( identifier[dd] . identifier[get_ref_file] ( identifier[data] ))])
identifier[out_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[utils] . identifier[safe_makedir] ( identifier[os] . identifier[path] . identifier[join] ( identifier[dd] . identifier[get_work_dir] ( identifier[data] ), literal[string] )),
literal[string] % identifier[utils] . identifier[splitext_plus] ( identifier[os] . identifier[path] . identifier[basename] ( identifier[vr_file] ))[ literal[int] ])
keyword[if] keyword[not] identifier[utils] . identifier[file_uptodate] ( identifier[out_file] , identifier[vr_file] ):
keyword[with] identifier[file_transaction] ( identifier[data] , identifier[out_file] ) keyword[as] identifier[tx_out_file] :
keyword[with] identifier[open] ( identifier[tx_out_file] , literal[string] ) keyword[as] identifier[out_handle] :
keyword[with] identifier[shared] . identifier[bedtools_tmpdir] ( identifier[data] ):
keyword[for] identifier[r] keyword[in] identifier[pybedtools] . identifier[BedTool] ( identifier[vr_file] ):
keyword[if] identifier[r] . identifier[chrom] keyword[in] identifier[contigs] :
keyword[if] identifier[chromhacks] . identifier[is_nonalt] ( identifier[r] . identifier[chrom] ):
identifier[out_handle] . identifier[write] ( identifier[str] ( identifier[r] ))
identifier[data] = identifier[dd] . identifier[set_variant_regions] ( identifier[data] , identifier[out_file] )
keyword[return] identifier[data] | def _setup_variant_regions(data, out_dir):
"""Ensure we have variant regions for calling, using transcript if not present.
Respects noalt_calling by removing additional contigs to improve
speeds.
"""
vr_file = dd.get_variant_regions(data)
if not vr_file:
vr_file = regions.get_sv_bed(data, 'transcripts', out_dir=out_dir) # depends on [control=['if'], data=[]]
contigs = set([c.name for c in ref.file_contigs(dd.get_ref_file(data))])
out_file = os.path.join(utils.safe_makedir(os.path.join(dd.get_work_dir(data), 'bedprep')), '%s-rnaseq_clean.bed' % utils.splitext_plus(os.path.basename(vr_file))[0])
if not utils.file_uptodate(out_file, vr_file):
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, 'w') as out_handle:
with shared.bedtools_tmpdir(data):
for r in pybedtools.BedTool(vr_file):
if r.chrom in contigs:
if chromhacks.is_nonalt(r.chrom):
out_handle.write(str(r)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['r']] # depends on [control=['with'], data=[]] # depends on [control=['with'], data=['out_handle']] # depends on [control=['with'], data=['tx_out_file']] # depends on [control=['if'], data=[]]
data = dd.set_variant_regions(data, out_file)
return data |
def import_sql_table(connection_url, table, username, password, columns=None, optimize=True, fetch_mode=None):
"""
Import SQL table to H2OFrame in memory.
Assumes that the SQL table is not being updated and is stable.
Runs multiple SELECT SQL queries concurrently for parallel ingestion.
Be sure to start the h2o.jar in the terminal with your downloaded JDBC driver in the classpath::
java -cp <path_to_h2o_jar>:<path_to_jdbc_driver_jar> water.H2OApp
Also see :func:`import_sql_select`.
Currently supported SQL databases are MySQL, PostgreSQL, MariaDB, Hive, Oracle and Microsoft SQL.
:param connection_url: URL of the SQL database connection as specified by the Java Database Connectivity (JDBC)
Driver. For example, "jdbc:mysql://localhost:3306/menagerie?&useSSL=false"
:param table: name of SQL table
:param columns: a list of column names to import from SQL table. Default is to import all columns.
:param username: username for SQL server
:param password: password for SQL server
:param optimize: DEPRECATED. Ignored - use fetch_mode instead. Optimize import of SQL table for faster imports.
:param fetch_mode: Set to DISTRIBUTED to enable distributed import. Set to SINGLE to force a sequential read by a single node
from the database.
:returns: an :class:`H2OFrame` containing data of the specified SQL table.
:examples:
>>> conn_url = "jdbc:mysql://172.16.2.178:3306/ingestSQL?&useSSL=false"
>>> table = "citibike20k"
>>> username = "root"
>>> password = "abc123"
>>> my_citibike_data = h2o.import_sql_table(conn_url, table, username, password)
"""
assert_is_type(connection_url, str)
assert_is_type(table, str)
assert_is_type(username, str)
assert_is_type(password, str)
assert_is_type(columns, [str], None)
assert_is_type(optimize, bool)
assert_is_type(fetch_mode, str, None)
p = {"connection_url": connection_url, "table": table, "username": username, "password": password,
"fetch_mode": fetch_mode}
if columns:
p["columns"] = ", ".join(columns)
j = H2OJob(api("POST /99/ImportSQLTable", data=p), "Import SQL Table").poll()
return get_frame(j.dest_key) | def function[import_sql_table, parameter[connection_url, table, username, password, columns, optimize, fetch_mode]]:
constant[
Import SQL table to H2OFrame in memory.
Assumes that the SQL table is not being updated and is stable.
Runs multiple SELECT SQL queries concurrently for parallel ingestion.
Be sure to start the h2o.jar in the terminal with your downloaded JDBC driver in the classpath::
java -cp <path_to_h2o_jar>:<path_to_jdbc_driver_jar> water.H2OApp
Also see :func:`import_sql_select`.
Currently supported SQL databases are MySQL, PostgreSQL, MariaDB, Hive, Oracle and Microsoft SQL.
:param connection_url: URL of the SQL database connection as specified by the Java Database Connectivity (JDBC)
Driver. For example, "jdbc:mysql://localhost:3306/menagerie?&useSSL=false"
:param table: name of SQL table
:param columns: a list of column names to import from SQL table. Default is to import all columns.
:param username: username for SQL server
:param password: password for SQL server
:param optimize: DEPRECATED. Ignored - use fetch_mode instead. Optimize import of SQL table for faster imports.
:param fetch_mode: Set to DISTRIBUTED to enable distributed import. Set to SINGLE to force a sequential read by a single node
from the database.
:returns: an :class:`H2OFrame` containing data of the specified SQL table.
:examples:
>>> conn_url = "jdbc:mysql://172.16.2.178:3306/ingestSQL?&useSSL=false"
>>> table = "citibike20k"
>>> username = "root"
>>> password = "abc123"
>>> my_citibike_data = h2o.import_sql_table(conn_url, table, username, password)
]
call[name[assert_is_type], parameter[name[connection_url], name[str]]]
call[name[assert_is_type], parameter[name[table], name[str]]]
call[name[assert_is_type], parameter[name[username], name[str]]]
call[name[assert_is_type], parameter[name[password], name[str]]]
call[name[assert_is_type], parameter[name[columns], list[[<ast.Name object at 0x7da1b0356980>]], constant[None]]]
call[name[assert_is_type], parameter[name[optimize], name[bool]]]
call[name[assert_is_type], parameter[name[fetch_mode], name[str], constant[None]]]
variable[p] assign[=] dictionary[[<ast.Constant object at 0x7da1b0355420>, <ast.Constant object at 0x7da1b03571f0>, <ast.Constant object at 0x7da1b0355030>, <ast.Constant object at 0x7da1b0356890>, <ast.Constant object at 0x7da1b0354b50>], [<ast.Name object at 0x7da1b0354220>, <ast.Name object at 0x7da1b0357250>, <ast.Name object at 0x7da1b0357310>, <ast.Name object at 0x7da1b0354580>, <ast.Name object at 0x7da1b0356050>]]
if name[columns] begin[:]
call[name[p]][constant[columns]] assign[=] call[constant[, ].join, parameter[name[columns]]]
variable[j] assign[=] call[call[name[H2OJob], parameter[call[name[api], parameter[constant[POST /99/ImportSQLTable]]], constant[Import SQL Table]]].poll, parameter[]]
return[call[name[get_frame], parameter[name[j].dest_key]]] | keyword[def] identifier[import_sql_table] ( identifier[connection_url] , identifier[table] , identifier[username] , identifier[password] , identifier[columns] = keyword[None] , identifier[optimize] = keyword[True] , identifier[fetch_mode] = keyword[None] ):
literal[string]
identifier[assert_is_type] ( identifier[connection_url] , identifier[str] )
identifier[assert_is_type] ( identifier[table] , identifier[str] )
identifier[assert_is_type] ( identifier[username] , identifier[str] )
identifier[assert_is_type] ( identifier[password] , identifier[str] )
identifier[assert_is_type] ( identifier[columns] ,[ identifier[str] ], keyword[None] )
identifier[assert_is_type] ( identifier[optimize] , identifier[bool] )
identifier[assert_is_type] ( identifier[fetch_mode] , identifier[str] , keyword[None] )
identifier[p] ={ literal[string] : identifier[connection_url] , literal[string] : identifier[table] , literal[string] : identifier[username] , literal[string] : identifier[password] ,
literal[string] : identifier[fetch_mode] }
keyword[if] identifier[columns] :
identifier[p] [ literal[string] ]= literal[string] . identifier[join] ( identifier[columns] )
identifier[j] = identifier[H2OJob] ( identifier[api] ( literal[string] , identifier[data] = identifier[p] ), literal[string] ). identifier[poll] ()
keyword[return] identifier[get_frame] ( identifier[j] . identifier[dest_key] ) | def import_sql_table(connection_url, table, username, password, columns=None, optimize=True, fetch_mode=None):
"""
Import SQL table to H2OFrame in memory.
Assumes that the SQL table is not being updated and is stable.
Runs multiple SELECT SQL queries concurrently for parallel ingestion.
Be sure to start the h2o.jar in the terminal with your downloaded JDBC driver in the classpath::
java -cp <path_to_h2o_jar>:<path_to_jdbc_driver_jar> water.H2OApp
Also see :func:`import_sql_select`.
Currently supported SQL databases are MySQL, PostgreSQL, MariaDB, Hive, Oracle and Microsoft SQL.
:param connection_url: URL of the SQL database connection as specified by the Java Database Connectivity (JDBC)
Driver. For example, "jdbc:mysql://localhost:3306/menagerie?&useSSL=false"
:param table: name of SQL table
:param columns: a list of column names to import from SQL table. Default is to import all columns.
:param username: username for SQL server
:param password: password for SQL server
:param optimize: DEPRECATED. Ignored - use fetch_mode instead. Optimize import of SQL table for faster imports.
:param fetch_mode: Set to DISTRIBUTED to enable distributed import. Set to SINGLE to force a sequential read by a single node
from the database.
:returns: an :class:`H2OFrame` containing data of the specified SQL table.
:examples:
>>> conn_url = "jdbc:mysql://172.16.2.178:3306/ingestSQL?&useSSL=false"
>>> table = "citibike20k"
>>> username = "root"
>>> password = "abc123"
>>> my_citibike_data = h2o.import_sql_table(conn_url, table, username, password)
"""
assert_is_type(connection_url, str)
assert_is_type(table, str)
assert_is_type(username, str)
assert_is_type(password, str)
assert_is_type(columns, [str], None)
assert_is_type(optimize, bool)
assert_is_type(fetch_mode, str, None)
p = {'connection_url': connection_url, 'table': table, 'username': username, 'password': password, 'fetch_mode': fetch_mode}
if columns:
p['columns'] = ', '.join(columns) # depends on [control=['if'], data=[]]
j = H2OJob(api('POST /99/ImportSQLTable', data=p), 'Import SQL Table').poll()
return get_frame(j.dest_key) |
def posterior_step(logposts, dim):
"""Finds the last time a chain made a jump > dim/2.
Parameters
----------
logposts : array
1D array of values that are proportional to the log posterior values.
dim : int
The dimension of the parameter space.
Returns
-------
int
The index of the last time the logpost made a jump > dim/2. If that
never happened, returns 0.
"""
if logposts.ndim > 1:
raise ValueError("logposts must be a 1D array")
criteria = dim/2.
dp = numpy.diff(logposts)
indices = numpy.where(dp >= criteria)[0]
if indices.size > 0:
idx = indices[-1] + 1
else:
idx = 0
return idx | def function[posterior_step, parameter[logposts, dim]]:
constant[Finds the last time a chain made a jump > dim/2.
Parameters
----------
logposts : array
1D array of values that are proportional to the log posterior values.
dim : int
The dimension of the parameter space.
Returns
-------
int
The index of the last time the logpost made a jump > dim/2. If that
never happened, returns 0.
]
if compare[name[logposts].ndim greater[>] constant[1]] begin[:]
<ast.Raise object at 0x7da18f811990>
variable[criteria] assign[=] binary_operation[name[dim] / constant[2.0]]
variable[dp] assign[=] call[name[numpy].diff, parameter[name[logposts]]]
variable[indices] assign[=] call[call[name[numpy].where, parameter[compare[name[dp] greater_or_equal[>=] name[criteria]]]]][constant[0]]
if compare[name[indices].size greater[>] constant[0]] begin[:]
variable[idx] assign[=] binary_operation[call[name[indices]][<ast.UnaryOp object at 0x7da2054a4460>] + constant[1]]
return[name[idx]] | keyword[def] identifier[posterior_step] ( identifier[logposts] , identifier[dim] ):
literal[string]
keyword[if] identifier[logposts] . identifier[ndim] > literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[criteria] = identifier[dim] / literal[int]
identifier[dp] = identifier[numpy] . identifier[diff] ( identifier[logposts] )
identifier[indices] = identifier[numpy] . identifier[where] ( identifier[dp] >= identifier[criteria] )[ literal[int] ]
keyword[if] identifier[indices] . identifier[size] > literal[int] :
identifier[idx] = identifier[indices] [- literal[int] ]+ literal[int]
keyword[else] :
identifier[idx] = literal[int]
keyword[return] identifier[idx] | def posterior_step(logposts, dim):
"""Finds the last time a chain made a jump > dim/2.
Parameters
----------
logposts : array
1D array of values that are proportional to the log posterior values.
dim : int
The dimension of the parameter space.
Returns
-------
int
The index of the last time the logpost made a jump > dim/2. If that
never happened, returns 0.
"""
if logposts.ndim > 1:
raise ValueError('logposts must be a 1D array') # depends on [control=['if'], data=[]]
criteria = dim / 2.0
dp = numpy.diff(logposts)
indices = numpy.where(dp >= criteria)[0]
if indices.size > 0:
idx = indices[-1] + 1 # depends on [control=['if'], data=[]]
else:
idx = 0
return idx |
def clone_g0_inputs_on_ngpus(self, inputs, outputs, g0_inputs):
"""
Clone variables unused by the attack on all GPUs. Specifically, the
ground-truth label, y, has to be preserved until the training step.
:param inputs: A list of dictionaries as the inputs to each step.
:param outputs: A list of dictionaries as the outputs of each step.
:param g0_inputs: Initial variables to be cloned.
:return: Updated inputs and outputs.
"""
assert len(inputs) == len(outputs), (
'Inputs and outputs should have the same number of elements.')
inputs[0].update(g0_inputs)
outputs[0].update(g0_inputs)
# Copy g0_inputs forward
for i in range(1, len(inputs)):
# Create the graph for i'th step of attack
device_name = inputs[i]['x'].device
with tf.device(device_name):
with tf.variable_scope('step%d' % i):
for k, v in g0_inputs.iteritems():
if k not in inputs[i]:
v_copy = clone_variable(k, v)
inputs[i][k] = v_copy
outputs[i][k] = v_copy
return inputs, outputs | def function[clone_g0_inputs_on_ngpus, parameter[self, inputs, outputs, g0_inputs]]:
constant[
Clone variables unused by the attack on all GPUs. Specifically, the
ground-truth label, y, has to be preserved until the training step.
:param inputs: A list of dictionaries as the inputs to each step.
:param outputs: A list of dictionaries as the outputs of each step.
:param g0_inputs: Initial variables to be cloned.
:return: Updated inputs and outputs.
]
assert[compare[call[name[len], parameter[name[inputs]]] equal[==] call[name[len], parameter[name[outputs]]]]]
call[call[name[inputs]][constant[0]].update, parameter[name[g0_inputs]]]
call[call[name[outputs]][constant[0]].update, parameter[name[g0_inputs]]]
for taget[name[i]] in starred[call[name[range], parameter[constant[1], call[name[len], parameter[name[inputs]]]]]] begin[:]
variable[device_name] assign[=] call[call[name[inputs]][name[i]]][constant[x]].device
with call[name[tf].device, parameter[name[device_name]]] begin[:]
with call[name[tf].variable_scope, parameter[binary_operation[constant[step%d] <ast.Mod object at 0x7da2590d6920> name[i]]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da2044c03d0>, <ast.Name object at 0x7da2044c2050>]]] in starred[call[name[g0_inputs].iteritems, parameter[]]] begin[:]
if compare[name[k] <ast.NotIn object at 0x7da2590d7190> call[name[inputs]][name[i]]] begin[:]
variable[v_copy] assign[=] call[name[clone_variable], parameter[name[k], name[v]]]
call[call[name[inputs]][name[i]]][name[k]] assign[=] name[v_copy]
call[call[name[outputs]][name[i]]][name[k]] assign[=] name[v_copy]
return[tuple[[<ast.Name object at 0x7da2044c3280>, <ast.Name object at 0x7da2044c1240>]]] | keyword[def] identifier[clone_g0_inputs_on_ngpus] ( identifier[self] , identifier[inputs] , identifier[outputs] , identifier[g0_inputs] ):
literal[string]
keyword[assert] identifier[len] ( identifier[inputs] )== identifier[len] ( identifier[outputs] ),(
literal[string] )
identifier[inputs] [ literal[int] ]. identifier[update] ( identifier[g0_inputs] )
identifier[outputs] [ literal[int] ]. identifier[update] ( identifier[g0_inputs] )
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[inputs] )):
identifier[device_name] = identifier[inputs] [ identifier[i] ][ literal[string] ]. identifier[device]
keyword[with] identifier[tf] . identifier[device] ( identifier[device_name] ):
keyword[with] identifier[tf] . identifier[variable_scope] ( literal[string] % identifier[i] ):
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[g0_inputs] . identifier[iteritems] ():
keyword[if] identifier[k] keyword[not] keyword[in] identifier[inputs] [ identifier[i] ]:
identifier[v_copy] = identifier[clone_variable] ( identifier[k] , identifier[v] )
identifier[inputs] [ identifier[i] ][ identifier[k] ]= identifier[v_copy]
identifier[outputs] [ identifier[i] ][ identifier[k] ]= identifier[v_copy]
keyword[return] identifier[inputs] , identifier[outputs] | def clone_g0_inputs_on_ngpus(self, inputs, outputs, g0_inputs):
"""
Clone variables unused by the attack on all GPUs. Specifically, the
ground-truth label, y, has to be preserved until the training step.
:param inputs: A list of dictionaries as the inputs to each step.
:param outputs: A list of dictionaries as the outputs of each step.
:param g0_inputs: Initial variables to be cloned.
:return: Updated inputs and outputs.
"""
assert len(inputs) == len(outputs), 'Inputs and outputs should have the same number of elements.'
inputs[0].update(g0_inputs)
outputs[0].update(g0_inputs)
# Copy g0_inputs forward
for i in range(1, len(inputs)):
# Create the graph for i'th step of attack
device_name = inputs[i]['x'].device
with tf.device(device_name):
with tf.variable_scope('step%d' % i):
for (k, v) in g0_inputs.iteritems():
if k not in inputs[i]:
v_copy = clone_variable(k, v)
inputs[i][k] = v_copy
outputs[i][k] = v_copy # depends on [control=['if'], data=['k']] # depends on [control=['for'], data=[]] # depends on [control=['with'], data=[]] # depends on [control=['with'], data=[]] # depends on [control=['for'], data=['i']]
return (inputs, outputs) |
def resize(img, size, interpolation=Image.BILINEAR):
r"""Resize the input PIL Image to the given size.
Args:
img (PIL Image): Image to be resized.
size (sequence or int): Desired output size. If size is a sequence like
(h, w), the output size will be matched to this. If size is an int,
the smaller edge of the image will be matched to this number maintaing
the aspect ratio. i.e, if height > width, then image will be rescaled to
:math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)`
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``
Returns:
PIL Image: Resized image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
if not (isinstance(size, int) or (isinstance(size, Iterable) and len(size) == 2)):
raise TypeError('Got inappropriate size arg: {}'.format(size))
if isinstance(size, int):
w, h = img.size
if (w <= h and w == size) or (h <= w and h == size):
return img
if w < h:
ow = size
oh = int(size * h / w)
return img.resize((ow, oh), interpolation)
else:
oh = size
ow = int(size * w / h)
return img.resize((ow, oh), interpolation)
else:
return img.resize(size[::-1], interpolation) | def function[resize, parameter[img, size, interpolation]]:
constant[Resize the input PIL Image to the given size.
Args:
img (PIL Image): Image to be resized.
size (sequence or int): Desired output size. If size is a sequence like
(h, w), the output size will be matched to this. If size is an int,
the smaller edge of the image will be matched to this number maintaing
the aspect ratio. i.e, if height > width, then image will be rescaled to
:math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)`
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``
Returns:
PIL Image: Resized image.
]
if <ast.UnaryOp object at 0x7da1b052a0b0> begin[:]
<ast.Raise object at 0x7da1b052add0>
if <ast.UnaryOp object at 0x7da1b052b7c0> begin[:]
<ast.Raise object at 0x7da1b05284c0>
if call[name[isinstance], parameter[name[size], name[int]]] begin[:]
<ast.Tuple object at 0x7da1b052b970> assign[=] name[img].size
if <ast.BoolOp object at 0x7da1b05293f0> begin[:]
return[name[img]]
if compare[name[w] less[<] name[h]] begin[:]
variable[ow] assign[=] name[size]
variable[oh] assign[=] call[name[int], parameter[binary_operation[binary_operation[name[size] * name[h]] / name[w]]]]
return[call[name[img].resize, parameter[tuple[[<ast.Name object at 0x7da1b03f8880>, <ast.Name object at 0x7da1b03f8280>]], name[interpolation]]]] | keyword[def] identifier[resize] ( identifier[img] , identifier[size] , identifier[interpolation] = identifier[Image] . identifier[BILINEAR] ):
literal[string]
keyword[if] keyword[not] identifier[_is_pil_image] ( identifier[img] ):
keyword[raise] identifier[TypeError] ( literal[string] . identifier[format] ( identifier[type] ( identifier[img] )))
keyword[if] keyword[not] ( identifier[isinstance] ( identifier[size] , identifier[int] ) keyword[or] ( identifier[isinstance] ( identifier[size] , identifier[Iterable] ) keyword[and] identifier[len] ( identifier[size] )== literal[int] )):
keyword[raise] identifier[TypeError] ( literal[string] . identifier[format] ( identifier[size] ))
keyword[if] identifier[isinstance] ( identifier[size] , identifier[int] ):
identifier[w] , identifier[h] = identifier[img] . identifier[size]
keyword[if] ( identifier[w] <= identifier[h] keyword[and] identifier[w] == identifier[size] ) keyword[or] ( identifier[h] <= identifier[w] keyword[and] identifier[h] == identifier[size] ):
keyword[return] identifier[img]
keyword[if] identifier[w] < identifier[h] :
identifier[ow] = identifier[size]
identifier[oh] = identifier[int] ( identifier[size] * identifier[h] / identifier[w] )
keyword[return] identifier[img] . identifier[resize] (( identifier[ow] , identifier[oh] ), identifier[interpolation] )
keyword[else] :
identifier[oh] = identifier[size]
identifier[ow] = identifier[int] ( identifier[size] * identifier[w] / identifier[h] )
keyword[return] identifier[img] . identifier[resize] (( identifier[ow] , identifier[oh] ), identifier[interpolation] )
keyword[else] :
keyword[return] identifier[img] . identifier[resize] ( identifier[size] [::- literal[int] ], identifier[interpolation] ) | def resize(img, size, interpolation=Image.BILINEAR):
"""Resize the input PIL Image to the given size.
Args:
img (PIL Image): Image to be resized.
size (sequence or int): Desired output size. If size is a sequence like
(h, w), the output size will be matched to this. If size is an int,
the smaller edge of the image will be matched to this number maintaing
the aspect ratio. i.e, if height > width, then image will be rescaled to
:math:`\\left(\\text{size} \\times \\frac{\\text{height}}{\\text{width}}, \\text{size}\\right)`
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``
Returns:
PIL Image: Resized image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img))) # depends on [control=['if'], data=[]]
if not (isinstance(size, int) or (isinstance(size, Iterable) and len(size) == 2)):
raise TypeError('Got inappropriate size arg: {}'.format(size)) # depends on [control=['if'], data=[]]
if isinstance(size, int):
(w, h) = img.size
if w <= h and w == size or (h <= w and h == size):
return img # depends on [control=['if'], data=[]]
if w < h:
ow = size
oh = int(size * h / w)
return img.resize((ow, oh), interpolation) # depends on [control=['if'], data=['w', 'h']]
else:
oh = size
ow = int(size * w / h)
return img.resize((ow, oh), interpolation) # depends on [control=['if'], data=[]]
else:
return img.resize(size[::-1], interpolation) |
def csv_row_to_transaction(index, row, source_encoding="latin1",
date_format="%d-%m-%Y", thousand_sep=".", decimal_sep=","):
"""
Parses a row of strings to a ``Transaction`` object.
Args:
index: The index of this row in the original CSV file. Used for
sorting ``Transaction``s by their order of appearance.
row: The row containing strings for [transfer_date, posted_date,
message, money_amount, money_total].
source_encoding: The encoding that will be used to decode strings
to UTF-8.
date_format: The format of dates in this row.
thousand_sep: The thousand separator in money amounts.
decimal_sep: The decimal separator in money amounts.
Returns:
A ``Transaction`` object.
"""
xfer, posted, message, amount, total = row
xfer = Parse.date(xfer)
posted = Parse.date(posted)
message = Parse.to_utf8(message, source_encoding)
amount = Parse.money(amount)
total = Parse.money(total)
return Transaction(index, xfer, posted, message, amount, total) | def function[csv_row_to_transaction, parameter[index, row, source_encoding, date_format, thousand_sep, decimal_sep]]:
constant[
Parses a row of strings to a ``Transaction`` object.
Args:
index: The index of this row in the original CSV file. Used for
sorting ``Transaction``s by their order of appearance.
row: The row containing strings for [transfer_date, posted_date,
message, money_amount, money_total].
source_encoding: The encoding that will be used to decode strings
to UTF-8.
date_format: The format of dates in this row.
thousand_sep: The thousand separator in money amounts.
decimal_sep: The decimal separator in money amounts.
Returns:
A ``Transaction`` object.
]
<ast.Tuple object at 0x7da1b16d5750> assign[=] name[row]
variable[xfer] assign[=] call[name[Parse].date, parameter[name[xfer]]]
variable[posted] assign[=] call[name[Parse].date, parameter[name[posted]]]
variable[message] assign[=] call[name[Parse].to_utf8, parameter[name[message], name[source_encoding]]]
variable[amount] assign[=] call[name[Parse].money, parameter[name[amount]]]
variable[total] assign[=] call[name[Parse].money, parameter[name[total]]]
return[call[name[Transaction], parameter[name[index], name[xfer], name[posted], name[message], name[amount], name[total]]]] | keyword[def] identifier[csv_row_to_transaction] ( identifier[index] , identifier[row] , identifier[source_encoding] = literal[string] ,
identifier[date_format] = literal[string] , identifier[thousand_sep] = literal[string] , identifier[decimal_sep] = literal[string] ):
literal[string]
identifier[xfer] , identifier[posted] , identifier[message] , identifier[amount] , identifier[total] = identifier[row]
identifier[xfer] = identifier[Parse] . identifier[date] ( identifier[xfer] )
identifier[posted] = identifier[Parse] . identifier[date] ( identifier[posted] )
identifier[message] = identifier[Parse] . identifier[to_utf8] ( identifier[message] , identifier[source_encoding] )
identifier[amount] = identifier[Parse] . identifier[money] ( identifier[amount] )
identifier[total] = identifier[Parse] . identifier[money] ( identifier[total] )
keyword[return] identifier[Transaction] ( identifier[index] , identifier[xfer] , identifier[posted] , identifier[message] , identifier[amount] , identifier[total] ) | def csv_row_to_transaction(index, row, source_encoding='latin1', date_format='%d-%m-%Y', thousand_sep='.', decimal_sep=','):
"""
Parses a row of strings to a ``Transaction`` object.
Args:
index: The index of this row in the original CSV file. Used for
sorting ``Transaction``s by their order of appearance.
row: The row containing strings for [transfer_date, posted_date,
message, money_amount, money_total].
source_encoding: The encoding that will be used to decode strings
to UTF-8.
date_format: The format of dates in this row.
thousand_sep: The thousand separator in money amounts.
decimal_sep: The decimal separator in money amounts.
Returns:
A ``Transaction`` object.
"""
(xfer, posted, message, amount, total) = row
xfer = Parse.date(xfer)
posted = Parse.date(posted)
message = Parse.to_utf8(message, source_encoding)
amount = Parse.money(amount)
total = Parse.money(total)
return Transaction(index, xfer, posted, message, amount, total) |
def create_build_graph(self, target_roots, build_root=None):
"""Construct and return a `BuildGraph` given a set of input specs.
:param TargetRoots target_roots: The targets root of the request.
:param string build_root: The build root.
:returns: A tuple of (BuildGraph, AddressMapper).
"""
logger.debug('target_roots are: %r', target_roots)
graph = LegacyBuildGraph.create(self.scheduler_session, self.build_file_aliases)
logger.debug('build_graph is: %s', graph)
# Ensure the entire generator is unrolled.
for _ in graph.inject_roots_closure(target_roots):
pass
address_mapper = LegacyAddressMapper(self.scheduler_session, build_root or get_buildroot())
logger.debug('address_mapper is: %s', address_mapper)
return graph, address_mapper | def function[create_build_graph, parameter[self, target_roots, build_root]]:
constant[Construct and return a `BuildGraph` given a set of input specs.
:param TargetRoots target_roots: The targets root of the request.
:param string build_root: The build root.
:returns: A tuple of (BuildGraph, AddressMapper).
]
call[name[logger].debug, parameter[constant[target_roots are: %r], name[target_roots]]]
variable[graph] assign[=] call[name[LegacyBuildGraph].create, parameter[name[self].scheduler_session, name[self].build_file_aliases]]
call[name[logger].debug, parameter[constant[build_graph is: %s], name[graph]]]
for taget[name[_]] in starred[call[name[graph].inject_roots_closure, parameter[name[target_roots]]]] begin[:]
pass
variable[address_mapper] assign[=] call[name[LegacyAddressMapper], parameter[name[self].scheduler_session, <ast.BoolOp object at 0x7da1b224baf0>]]
call[name[logger].debug, parameter[constant[address_mapper is: %s], name[address_mapper]]]
return[tuple[[<ast.Name object at 0x7da1b224a350>, <ast.Name object at 0x7da1b224a5f0>]]] | keyword[def] identifier[create_build_graph] ( identifier[self] , identifier[target_roots] , identifier[build_root] = keyword[None] ):
literal[string]
identifier[logger] . identifier[debug] ( literal[string] , identifier[target_roots] )
identifier[graph] = identifier[LegacyBuildGraph] . identifier[create] ( identifier[self] . identifier[scheduler_session] , identifier[self] . identifier[build_file_aliases] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[graph] )
keyword[for] identifier[_] keyword[in] identifier[graph] . identifier[inject_roots_closure] ( identifier[target_roots] ):
keyword[pass]
identifier[address_mapper] = identifier[LegacyAddressMapper] ( identifier[self] . identifier[scheduler_session] , identifier[build_root] keyword[or] identifier[get_buildroot] ())
identifier[logger] . identifier[debug] ( literal[string] , identifier[address_mapper] )
keyword[return] identifier[graph] , identifier[address_mapper] | def create_build_graph(self, target_roots, build_root=None):
"""Construct and return a `BuildGraph` given a set of input specs.
:param TargetRoots target_roots: The targets root of the request.
:param string build_root: The build root.
:returns: A tuple of (BuildGraph, AddressMapper).
"""
logger.debug('target_roots are: %r', target_roots)
graph = LegacyBuildGraph.create(self.scheduler_session, self.build_file_aliases)
logger.debug('build_graph is: %s', graph)
# Ensure the entire generator is unrolled.
for _ in graph.inject_roots_closure(target_roots):
pass # depends on [control=['for'], data=[]]
address_mapper = LegacyAddressMapper(self.scheduler_session, build_root or get_buildroot())
logger.debug('address_mapper is: %s', address_mapper)
return (graph, address_mapper) |
def on_connect(self, user):
""" Todo connect """
self.user = user
self.logger.info("connected as %s", user)
if not isinstance(self.con_connect, type(None)):
self.con_connect(user) | def function[on_connect, parameter[self, user]]:
constant[ Todo connect ]
name[self].user assign[=] name[user]
call[name[self].logger.info, parameter[constant[connected as %s], name[user]]]
if <ast.UnaryOp object at 0x7da1b0a9f0d0> begin[:]
call[name[self].con_connect, parameter[name[user]]] | keyword[def] identifier[on_connect] ( identifier[self] , identifier[user] ):
literal[string]
identifier[self] . identifier[user] = identifier[user]
identifier[self] . identifier[logger] . identifier[info] ( literal[string] , identifier[user] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[self] . identifier[con_connect] , identifier[type] ( keyword[None] )):
identifier[self] . identifier[con_connect] ( identifier[user] ) | def on_connect(self, user):
""" Todo connect """
self.user = user
self.logger.info('connected as %s', user)
if not isinstance(self.con_connect, type(None)):
self.con_connect(user) # depends on [control=['if'], data=[]] |
def outline(self, face_ids=None, **kwargs):
"""
Given a list of face indexes find the outline of those
faces and return it as a Path3D.
The outline is defined here as every edge which is only
included by a single triangle.
Note that this implies a non-watertight mesh as the
outline of a watertight mesh is an empty path.
Parameters
----------
face_ids : (n,) int
Indices to compute the outline of.
If None, outline of full mesh will be computed.
**kwargs: passed to Path3D constructor
Returns
----------
path : Path3D
Curve in 3D of the outline
"""
from .path.exchange.misc import faces_to_path
from .path.exchange.load import _create_path
path = _create_path(**faces_to_path(self,
face_ids,
**kwargs))
return path | def function[outline, parameter[self, face_ids]]:
constant[
Given a list of face indexes find the outline of those
faces and return it as a Path3D.
The outline is defined here as every edge which is only
included by a single triangle.
Note that this implies a non-watertight mesh as the
outline of a watertight mesh is an empty path.
Parameters
----------
face_ids : (n,) int
Indices to compute the outline of.
If None, outline of full mesh will be computed.
**kwargs: passed to Path3D constructor
Returns
----------
path : Path3D
Curve in 3D of the outline
]
from relative_module[path.exchange.misc] import module[faces_to_path]
from relative_module[path.exchange.load] import module[_create_path]
variable[path] assign[=] call[name[_create_path], parameter[]]
return[name[path]] | keyword[def] identifier[outline] ( identifier[self] , identifier[face_ids] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[from] . identifier[path] . identifier[exchange] . identifier[misc] keyword[import] identifier[faces_to_path]
keyword[from] . identifier[path] . identifier[exchange] . identifier[load] keyword[import] identifier[_create_path]
identifier[path] = identifier[_create_path] (** identifier[faces_to_path] ( identifier[self] ,
identifier[face_ids] ,
** identifier[kwargs] ))
keyword[return] identifier[path] | def outline(self, face_ids=None, **kwargs):
"""
Given a list of face indexes find the outline of those
faces and return it as a Path3D.
The outline is defined here as every edge which is only
included by a single triangle.
Note that this implies a non-watertight mesh as the
outline of a watertight mesh is an empty path.
Parameters
----------
face_ids : (n,) int
Indices to compute the outline of.
If None, outline of full mesh will be computed.
**kwargs: passed to Path3D constructor
Returns
----------
path : Path3D
Curve in 3D of the outline
"""
from .path.exchange.misc import faces_to_path
from .path.exchange.load import _create_path
path = _create_path(**faces_to_path(self, face_ids, **kwargs))
return path |
def convert_type(self, type):
"""Convert type to SQL
"""
# Default dialect
mapping = {
'any': sa.Text,
'array': None,
'boolean': sa.Boolean,
'date': sa.Date,
'datetime': sa.DateTime,
'duration': None,
'geojson': None,
'geopoint': None,
'integer': sa.Integer,
'number': sa.Float,
'object': None,
'string': sa.Text,
'time': sa.Time,
'year': sa.Integer,
'yearmonth': None,
}
# Postgresql dialect
if self.__dialect == 'postgresql':
mapping.update({
'array': JSONB,
'geojson': JSONB,
'number': sa.Numeric,
'object': JSONB,
})
# Not supported type
if type not in mapping:
message = 'Field type "%s" is not supported'
raise tableschema.exceptions.StorageError(message % type)
return mapping[type] | def function[convert_type, parameter[self, type]]:
constant[Convert type to SQL
]
variable[mapping] assign[=] dictionary[[<ast.Constant object at 0x7da1b26ac2b0>, <ast.Constant object at 0x7da1b26ad7b0>, <ast.Constant object at 0x7da1b26aceb0>, <ast.Constant object at 0x7da1b26af4f0>, <ast.Constant object at 0x7da1b26ae4a0>, <ast.Constant object at 0x7da1b26affa0>, <ast.Constant object at 0x7da1b26aea40>, <ast.Constant object at 0x7da1b26afc10>, <ast.Constant object at 0x7da1b26aeec0>, <ast.Constant object at 0x7da1b26afd30>, <ast.Constant object at 0x7da1b26aeb00>, <ast.Constant object at 0x7da1b26ad690>, <ast.Constant object at 0x7da1b26af1f0>, <ast.Constant object at 0x7da1b26af910>, <ast.Constant object at 0x7da1b26adbd0>], [<ast.Attribute object at 0x7da1b26accd0>, <ast.Constant object at 0x7da1b26ac820>, <ast.Attribute object at 0x7da1b26ad4e0>, <ast.Attribute object at 0x7da1b26ae980>, <ast.Attribute object at 0x7da1b26ad630>, <ast.Constant object at 0x7da1b26ad840>, <ast.Constant object at 0x7da1b26af1c0>, <ast.Constant object at 0x7da1b26ade10>, <ast.Attribute object at 0x7da1b26aee90>, <ast.Attribute object at 0x7da1b26acd90>, <ast.Constant object at 0x7da1b26ae9b0>, <ast.Attribute object at 0x7da1b26aff40>, <ast.Attribute object at 0x7da1b26ae530>, <ast.Attribute object at 0x7da1b26af400>, <ast.Constant object at 0x7da1b26ad090>]]
if compare[name[self].__dialect equal[==] constant[postgresql]] begin[:]
call[name[mapping].update, parameter[dictionary[[<ast.Constant object at 0x7da1b26ae500>, <ast.Constant object at 0x7da1b26af190>, <ast.Constant object at 0x7da204567100>, <ast.Constant object at 0x7da204566290>], [<ast.Name object at 0x7da204566530>, <ast.Name object at 0x7da204567a30>, <ast.Attribute object at 0x7da2045668f0>, <ast.Name object at 0x7da204566d10>]]]]
if compare[name[type] <ast.NotIn object at 0x7da2590d7190> name[mapping]] begin[:]
variable[message] assign[=] constant[Field type "%s" is not supported]
<ast.Raise object at 0x7da204567370>
return[call[name[mapping]][name[type]]] | keyword[def] identifier[convert_type] ( identifier[self] , identifier[type] ):
literal[string]
identifier[mapping] ={
literal[string] : identifier[sa] . identifier[Text] ,
literal[string] : keyword[None] ,
literal[string] : identifier[sa] . identifier[Boolean] ,
literal[string] : identifier[sa] . identifier[Date] ,
literal[string] : identifier[sa] . identifier[DateTime] ,
literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
literal[string] : identifier[sa] . identifier[Integer] ,
literal[string] : identifier[sa] . identifier[Float] ,
literal[string] : keyword[None] ,
literal[string] : identifier[sa] . identifier[Text] ,
literal[string] : identifier[sa] . identifier[Time] ,
literal[string] : identifier[sa] . identifier[Integer] ,
literal[string] : keyword[None] ,
}
keyword[if] identifier[self] . identifier[__dialect] == literal[string] :
identifier[mapping] . identifier[update] ({
literal[string] : identifier[JSONB] ,
literal[string] : identifier[JSONB] ,
literal[string] : identifier[sa] . identifier[Numeric] ,
literal[string] : identifier[JSONB] ,
})
keyword[if] identifier[type] keyword[not] keyword[in] identifier[mapping] :
identifier[message] = literal[string]
keyword[raise] identifier[tableschema] . identifier[exceptions] . identifier[StorageError] ( identifier[message] % identifier[type] )
keyword[return] identifier[mapping] [ identifier[type] ] | def convert_type(self, type):
"""Convert type to SQL
"""
# Default dialect
mapping = {'any': sa.Text, 'array': None, 'boolean': sa.Boolean, 'date': sa.Date, 'datetime': sa.DateTime, 'duration': None, 'geojson': None, 'geopoint': None, 'integer': sa.Integer, 'number': sa.Float, 'object': None, 'string': sa.Text, 'time': sa.Time, 'year': sa.Integer, 'yearmonth': None}
# Postgresql dialect
if self.__dialect == 'postgresql':
mapping.update({'array': JSONB, 'geojson': JSONB, 'number': sa.Numeric, 'object': JSONB}) # depends on [control=['if'], data=[]]
# Not supported type
if type not in mapping:
message = 'Field type "%s" is not supported'
raise tableschema.exceptions.StorageError(message % type) # depends on [control=['if'], data=['type']]
return mapping[type] |
def compute_mem_overhead(self):
"""Returns memory overhead."""
self.mem_overhead = (self._process.memory_info().rss -
builtins.initial_rss_size) | def function[compute_mem_overhead, parameter[self]]:
constant[Returns memory overhead.]
name[self].mem_overhead assign[=] binary_operation[call[name[self]._process.memory_info, parameter[]].rss - name[builtins].initial_rss_size] | keyword[def] identifier[compute_mem_overhead] ( identifier[self] ):
literal[string]
identifier[self] . identifier[mem_overhead] =( identifier[self] . identifier[_process] . identifier[memory_info] (). identifier[rss] -
identifier[builtins] . identifier[initial_rss_size] ) | def compute_mem_overhead(self):
"""Returns memory overhead."""
self.mem_overhead = self._process.memory_info().rss - builtins.initial_rss_size |
def should_raptorize(self, req, resp):
""" Determine if this request should be raptorized. Boolean. """
if resp.status != "200 OK":
return False
content_type = resp.headers.get('Content-Type', 'text/plain').lower()
if not 'html' in content_type:
return False
if random.random() > self.random_chance:
return False
if self.only_on_april_1st:
now = datetime.datetime.now()
if now.month != 20 and now.day != 1:
return False
return True | def function[should_raptorize, parameter[self, req, resp]]:
constant[ Determine if this request should be raptorized. Boolean. ]
if compare[name[resp].status not_equal[!=] constant[200 OK]] begin[:]
return[constant[False]]
variable[content_type] assign[=] call[call[name[resp].headers.get, parameter[constant[Content-Type], constant[text/plain]]].lower, parameter[]]
if <ast.UnaryOp object at 0x7da18f723bb0> begin[:]
return[constant[False]]
if compare[call[name[random].random, parameter[]] greater[>] name[self].random_chance] begin[:]
return[constant[False]]
if name[self].only_on_april_1st begin[:]
variable[now] assign[=] call[name[datetime].datetime.now, parameter[]]
if <ast.BoolOp object at 0x7da1b0bda9e0> begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[should_raptorize] ( identifier[self] , identifier[req] , identifier[resp] ):
literal[string]
keyword[if] identifier[resp] . identifier[status] != literal[string] :
keyword[return] keyword[False]
identifier[content_type] = identifier[resp] . identifier[headers] . identifier[get] ( literal[string] , literal[string] ). identifier[lower] ()
keyword[if] keyword[not] literal[string] keyword[in] identifier[content_type] :
keyword[return] keyword[False]
keyword[if] identifier[random] . identifier[random] ()> identifier[self] . identifier[random_chance] :
keyword[return] keyword[False]
keyword[if] identifier[self] . identifier[only_on_april_1st] :
identifier[now] = identifier[datetime] . identifier[datetime] . identifier[now] ()
keyword[if] identifier[now] . identifier[month] != literal[int] keyword[and] identifier[now] . identifier[day] != literal[int] :
keyword[return] keyword[False]
keyword[return] keyword[True] | def should_raptorize(self, req, resp):
""" Determine if this request should be raptorized. Boolean. """
if resp.status != '200 OK':
return False # depends on [control=['if'], data=[]]
content_type = resp.headers.get('Content-Type', 'text/plain').lower()
if not 'html' in content_type:
return False # depends on [control=['if'], data=[]]
if random.random() > self.random_chance:
return False # depends on [control=['if'], data=[]]
if self.only_on_april_1st:
now = datetime.datetime.now()
if now.month != 20 and now.day != 1:
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return True |
def map(requests, stream=True, pool=None, size=1, exception_handler=None):
"""Concurrently converts a list of Requests to Responses.
:param requests: a collection of Request objects.
:param stream: If False, the content will not be downloaded immediately.
:param size: Specifies the number of workers to run at a time. If 1, no parallel processing.
:param exception_handler: Callback function, called when exception occured. Params: Request, Exception
"""
pool = pool if pool else Pool(size)
requests = list(requests)
requests = pool.map(send, requests)
ret = []
for request in requests:
if request.response is not None:
ret.append(request.response)
elif exception_handler and hasattr(request, 'exception'):
ret.append(exception_handler(request, request.exception))
else:
ret.append(None)
if not pool:
pool.close()
return ret | def function[map, parameter[requests, stream, pool, size, exception_handler]]:
constant[Concurrently converts a list of Requests to Responses.
:param requests: a collection of Request objects.
:param stream: If False, the content will not be downloaded immediately.
:param size: Specifies the number of workers to run at a time. If 1, no parallel processing.
:param exception_handler: Callback function, called when exception occured. Params: Request, Exception
]
variable[pool] assign[=] <ast.IfExp object at 0x7da1b03957e0>
variable[requests] assign[=] call[name[list], parameter[name[requests]]]
variable[requests] assign[=] call[name[pool].map, parameter[name[send], name[requests]]]
variable[ret] assign[=] list[[]]
for taget[name[request]] in starred[name[requests]] begin[:]
if compare[name[request].response is_not constant[None]] begin[:]
call[name[ret].append, parameter[name[request].response]]
if <ast.UnaryOp object at 0x7da1b0380280> begin[:]
call[name[pool].close, parameter[]]
return[name[ret]] | keyword[def] identifier[map] ( identifier[requests] , identifier[stream] = keyword[True] , identifier[pool] = keyword[None] , identifier[size] = literal[int] , identifier[exception_handler] = keyword[None] ):
literal[string]
identifier[pool] = identifier[pool] keyword[if] identifier[pool] keyword[else] identifier[Pool] ( identifier[size] )
identifier[requests] = identifier[list] ( identifier[requests] )
identifier[requests] = identifier[pool] . identifier[map] ( identifier[send] , identifier[requests] )
identifier[ret] =[]
keyword[for] identifier[request] keyword[in] identifier[requests] :
keyword[if] identifier[request] . identifier[response] keyword[is] keyword[not] keyword[None] :
identifier[ret] . identifier[append] ( identifier[request] . identifier[response] )
keyword[elif] identifier[exception_handler] keyword[and] identifier[hasattr] ( identifier[request] , literal[string] ):
identifier[ret] . identifier[append] ( identifier[exception_handler] ( identifier[request] , identifier[request] . identifier[exception] ))
keyword[else] :
identifier[ret] . identifier[append] ( keyword[None] )
keyword[if] keyword[not] identifier[pool] :
identifier[pool] . identifier[close] ()
keyword[return] identifier[ret] | def map(requests, stream=True, pool=None, size=1, exception_handler=None):
"""Concurrently converts a list of Requests to Responses.
:param requests: a collection of Request objects.
:param stream: If False, the content will not be downloaded immediately.
:param size: Specifies the number of workers to run at a time. If 1, no parallel processing.
:param exception_handler: Callback function, called when exception occured. Params: Request, Exception
"""
pool = pool if pool else Pool(size)
requests = list(requests)
requests = pool.map(send, requests)
ret = []
for request in requests:
if request.response is not None:
ret.append(request.response) # depends on [control=['if'], data=[]]
elif exception_handler and hasattr(request, 'exception'):
ret.append(exception_handler(request, request.exception)) # depends on [control=['if'], data=[]]
else:
ret.append(None) # depends on [control=['for'], data=['request']]
if not pool:
pool.close() # depends on [control=['if'], data=[]]
return ret |
def closing(self, cancelable=False):
"""Exit tasks"""
if self.already_closed or self.is_starting_up:
return True
if cancelable and CONF.get('main', 'prompt_on_exit'):
reply = QMessageBox.critical(self, 'Spyder',
'Do you really want to exit?',
QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.No:
return False
prefix = 'window' + '/'
self.save_current_window_settings(prefix)
if CONF.get('main', 'single_instance') and self.open_files_server:
self.open_files_server.close()
for plugin in (self.widgetlist + self.thirdparty_plugins):
plugin.close_window()
if not plugin.closing_plugin(cancelable):
return False
self.dialog_manager.close_all()
if self.toolbars_visible:
self.save_visible_toolbars()
self.lspmanager.shutdown()
self.already_closed = True
return True | def function[closing, parameter[self, cancelable]]:
constant[Exit tasks]
if <ast.BoolOp object at 0x7da18bcc87f0> begin[:]
return[constant[True]]
if <ast.BoolOp object at 0x7da18bcc8eb0> begin[:]
variable[reply] assign[=] call[name[QMessageBox].critical, parameter[name[self], constant[Spyder], constant[Do you really want to exit?], name[QMessageBox].Yes, name[QMessageBox].No]]
if compare[name[reply] equal[==] name[QMessageBox].No] begin[:]
return[constant[False]]
variable[prefix] assign[=] binary_operation[constant[window] + constant[/]]
call[name[self].save_current_window_settings, parameter[name[prefix]]]
if <ast.BoolOp object at 0x7da18bcc9d20> begin[:]
call[name[self].open_files_server.close, parameter[]]
for taget[name[plugin]] in starred[binary_operation[name[self].widgetlist + name[self].thirdparty_plugins]] begin[:]
call[name[plugin].close_window, parameter[]]
if <ast.UnaryOp object at 0x7da18bcc8a00> begin[:]
return[constant[False]]
call[name[self].dialog_manager.close_all, parameter[]]
if name[self].toolbars_visible begin[:]
call[name[self].save_visible_toolbars, parameter[]]
call[name[self].lspmanager.shutdown, parameter[]]
name[self].already_closed assign[=] constant[True]
return[constant[True]] | keyword[def] identifier[closing] ( identifier[self] , identifier[cancelable] = keyword[False] ):
literal[string]
keyword[if] identifier[self] . identifier[already_closed] keyword[or] identifier[self] . identifier[is_starting_up] :
keyword[return] keyword[True]
keyword[if] identifier[cancelable] keyword[and] identifier[CONF] . identifier[get] ( literal[string] , literal[string] ):
identifier[reply] = identifier[QMessageBox] . identifier[critical] ( identifier[self] , literal[string] ,
literal[string] ,
identifier[QMessageBox] . identifier[Yes] , identifier[QMessageBox] . identifier[No] )
keyword[if] identifier[reply] == identifier[QMessageBox] . identifier[No] :
keyword[return] keyword[False]
identifier[prefix] = literal[string] + literal[string]
identifier[self] . identifier[save_current_window_settings] ( identifier[prefix] )
keyword[if] identifier[CONF] . identifier[get] ( literal[string] , literal[string] ) keyword[and] identifier[self] . identifier[open_files_server] :
identifier[self] . identifier[open_files_server] . identifier[close] ()
keyword[for] identifier[plugin] keyword[in] ( identifier[self] . identifier[widgetlist] + identifier[self] . identifier[thirdparty_plugins] ):
identifier[plugin] . identifier[close_window] ()
keyword[if] keyword[not] identifier[plugin] . identifier[closing_plugin] ( identifier[cancelable] ):
keyword[return] keyword[False]
identifier[self] . identifier[dialog_manager] . identifier[close_all] ()
keyword[if] identifier[self] . identifier[toolbars_visible] :
identifier[self] . identifier[save_visible_toolbars] ()
identifier[self] . identifier[lspmanager] . identifier[shutdown] ()
identifier[self] . identifier[already_closed] = keyword[True]
keyword[return] keyword[True] | def closing(self, cancelable=False):
"""Exit tasks"""
if self.already_closed or self.is_starting_up:
return True # depends on [control=['if'], data=[]]
if cancelable and CONF.get('main', 'prompt_on_exit'):
reply = QMessageBox.critical(self, 'Spyder', 'Do you really want to exit?', QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.No:
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
prefix = 'window' + '/'
self.save_current_window_settings(prefix)
if CONF.get('main', 'single_instance') and self.open_files_server:
self.open_files_server.close() # depends on [control=['if'], data=[]]
for plugin in self.widgetlist + self.thirdparty_plugins:
plugin.close_window()
if not plugin.closing_plugin(cancelable):
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['plugin']]
self.dialog_manager.close_all()
if self.toolbars_visible:
self.save_visible_toolbars() # depends on [control=['if'], data=[]]
self.lspmanager.shutdown()
self.already_closed = True
return True |
def make_tempfile (self, want='handle', resolution='try_unlink', suffix='', **kwargs):
"""Get a context manager that creates and cleans up a uniquely-named temporary
file with a name similar to this path.
This function returns a context manager that creates a secure
temporary file with a path similar to *self*. In particular, if
``str(self)`` is something like ``foo/bar``, the path of the temporary
file will be something like ``foo/bar.ame8_2``.
The object returned by the context manager depends on the *want* argument:
``"handle"``
An open file-like object is returned. This is the object returned by
:class:`tempfile.NamedTemporaryFile`. Its name on the filesystem is
accessible as a string as its `name` attribute, or (a customization here)
as a :class:`Path` instance as its `path` attribute.
``"path"``
The temporary file is created as in ``"handle"``, but is then immediately
closed. A :class:`Path` instance pointing to the path of the temporary file is
instead returned.
If an exception occurs inside the context manager block, the temporary file is
left lying around. Otherwise, what happens to it upon exit from the context
manager depends on the *resolution* argument:
``"try_unlink"``
Call :meth:`try_unlink` on the temporary file — no exception is raised if
the file did not exist.
``"unlink"``
Call :meth:`unlink` on the temporary file — an exception is raised if
the file did not exist.
``"keep"``
The temporary file is left lying around.
``"overwrite"``
The temporary file is :meth:`rename`-d to overwrite *self*.
For instance, when rewriting important files, it’s typical to write
the new data to a temporary file, and only rename the temporary file
to the final destination at the end — that way, if a problem happens
while writing the new data, the original file is left unmodified;
otherwise you’d be stuck with a partially-written version of the file.
This pattern can be accomplished with::
p = Path ('path/to/important/file')
with p.make_tempfile (resolution='overwrite', mode='wt') as h:
print ('important stuff goes here', file=h)
The *suffix* argument is appended to the temporary file name after the
random portion. It defaults to the empty string. If you want it to
operate as a typical filename suffix, include a leading ``"."``.
Other **kwargs** are passed to :class:`tempfile.NamedTemporaryFile`.
"""
if want not in ('handle', 'path'):
raise ValueError ('unrecognized make_tempfile() "want" mode %r' % (want,))
if resolution not in ('unlink', 'try_unlink', 'keep', 'overwrite'):
raise ValueError ('unrecognized make_tempfile() "resolution" mode %r' % (resolution,))
return Path._PathTempfileContextManager (self, want, resolution, suffix, kwargs) | def function[make_tempfile, parameter[self, want, resolution, suffix]]:
constant[Get a context manager that creates and cleans up a uniquely-named temporary
file with a name similar to this path.
This function returns a context manager that creates a secure
temporary file with a path similar to *self*. In particular, if
``str(self)`` is something like ``foo/bar``, the path of the temporary
file will be something like ``foo/bar.ame8_2``.
The object returned by the context manager depends on the *want* argument:
``"handle"``
An open file-like object is returned. This is the object returned by
:class:`tempfile.NamedTemporaryFile`. Its name on the filesystem is
accessible as a string as its `name` attribute, or (a customization here)
as a :class:`Path` instance as its `path` attribute.
``"path"``
The temporary file is created as in ``"handle"``, but is then immediately
closed. A :class:`Path` instance pointing to the path of the temporary file is
instead returned.
If an exception occurs inside the context manager block, the temporary file is
left lying around. Otherwise, what happens to it upon exit from the context
manager depends on the *resolution* argument:
``"try_unlink"``
Call :meth:`try_unlink` on the temporary file — no exception is raised if
the file did not exist.
``"unlink"``
Call :meth:`unlink` on the temporary file — an exception is raised if
the file did not exist.
``"keep"``
The temporary file is left lying around.
``"overwrite"``
The temporary file is :meth:`rename`-d to overwrite *self*.
For instance, when rewriting important files, it’s typical to write
the new data to a temporary file, and only rename the temporary file
to the final destination at the end — that way, if a problem happens
while writing the new data, the original file is left unmodified;
otherwise you’d be stuck with a partially-written version of the file.
This pattern can be accomplished with::
p = Path ('path/to/important/file')
with p.make_tempfile (resolution='overwrite', mode='wt') as h:
print ('important stuff goes here', file=h)
The *suffix* argument is appended to the temporary file name after the
random portion. It defaults to the empty string. If you want it to
operate as a typical filename suffix, include a leading ``"."``.
Other **kwargs** are passed to :class:`tempfile.NamedTemporaryFile`.
]
if compare[name[want] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da2054a7640>, <ast.Constant object at 0x7da2054a6590>]]] begin[:]
<ast.Raise object at 0x7da2054a4160>
if compare[name[resolution] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da1b27b6350>, <ast.Constant object at 0x7da1b27b47f0>, <ast.Constant object at 0x7da1b27b7190>, <ast.Constant object at 0x7da1b27b51b0>]]] begin[:]
<ast.Raise object at 0x7da1b27b4d00>
return[call[name[Path]._PathTempfileContextManager, parameter[name[self], name[want], name[resolution], name[suffix], name[kwargs]]]] | keyword[def] identifier[make_tempfile] ( identifier[self] , identifier[want] = literal[string] , identifier[resolution] = literal[string] , identifier[suffix] = literal[string] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[want] keyword[not] keyword[in] ( literal[string] , literal[string] ):
keyword[raise] identifier[ValueError] ( literal[string] %( identifier[want] ,))
keyword[if] identifier[resolution] keyword[not] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] ):
keyword[raise] identifier[ValueError] ( literal[string] %( identifier[resolution] ,))
keyword[return] identifier[Path] . identifier[_PathTempfileContextManager] ( identifier[self] , identifier[want] , identifier[resolution] , identifier[suffix] , identifier[kwargs] ) | def make_tempfile(self, want='handle', resolution='try_unlink', suffix='', **kwargs):
"""Get a context manager that creates and cleans up a uniquely-named temporary
file with a name similar to this path.
This function returns a context manager that creates a secure
temporary file with a path similar to *self*. In particular, if
``str(self)`` is something like ``foo/bar``, the path of the temporary
file will be something like ``foo/bar.ame8_2``.
The object returned by the context manager depends on the *want* argument:
``"handle"``
An open file-like object is returned. This is the object returned by
:class:`tempfile.NamedTemporaryFile`. Its name on the filesystem is
accessible as a string as its `name` attribute, or (a customization here)
as a :class:`Path` instance as its `path` attribute.
``"path"``
The temporary file is created as in ``"handle"``, but is then immediately
closed. A :class:`Path` instance pointing to the path of the temporary file is
instead returned.
If an exception occurs inside the context manager block, the temporary file is
left lying around. Otherwise, what happens to it upon exit from the context
manager depends on the *resolution* argument:
``"try_unlink"``
Call :meth:`try_unlink` on the temporary file — no exception is raised if
the file did not exist.
``"unlink"``
Call :meth:`unlink` on the temporary file — an exception is raised if
the file did not exist.
``"keep"``
The temporary file is left lying around.
``"overwrite"``
The temporary file is :meth:`rename`-d to overwrite *self*.
For instance, when rewriting important files, it’s typical to write
the new data to a temporary file, and only rename the temporary file
to the final destination at the end — that way, if a problem happens
while writing the new data, the original file is left unmodified;
otherwise you’d be stuck with a partially-written version of the file.
This pattern can be accomplished with::
p = Path ('path/to/important/file')
with p.make_tempfile (resolution='overwrite', mode='wt') as h:
print ('important stuff goes here', file=h)
The *suffix* argument is appended to the temporary file name after the
random portion. It defaults to the empty string. If you want it to
operate as a typical filename suffix, include a leading ``"."``.
Other **kwargs** are passed to :class:`tempfile.NamedTemporaryFile`.
"""
if want not in ('handle', 'path'):
raise ValueError('unrecognized make_tempfile() "want" mode %r' % (want,)) # depends on [control=['if'], data=['want']]
if resolution not in ('unlink', 'try_unlink', 'keep', 'overwrite'):
raise ValueError('unrecognized make_tempfile() "resolution" mode %r' % (resolution,)) # depends on [control=['if'], data=['resolution']]
return Path._PathTempfileContextManager(self, want, resolution, suffix, kwargs) |
def select(self, selector):
'''Transforms each element of a sequence into a new form.
Each element is transformed through a selector function to produce a
value for each value in the source sequence. The generated sequence is
lazily evaluated.
Args:
selector: A unary function mapping a value in the source sequence
to the corresponding value in the generated generated sequence.
The argument of the selector function (which can have any name)
is,
Args:
element: The value of the element
Returns:
The selected value derived from the element value
Returns:
A generated sequence whose elements are the result of invoking the
selector function on each element of the source sequence.
'''
return self._create(self._pool.imap_unordered(selector, iter(self),
self._chunksize)) | def function[select, parameter[self, selector]]:
constant[Transforms each element of a sequence into a new form.
Each element is transformed through a selector function to produce a
value for each value in the source sequence. The generated sequence is
lazily evaluated.
Args:
selector: A unary function mapping a value in the source sequence
to the corresponding value in the generated generated sequence.
The argument of the selector function (which can have any name)
is,
Args:
element: The value of the element
Returns:
The selected value derived from the element value
Returns:
A generated sequence whose elements are the result of invoking the
selector function on each element of the source sequence.
]
return[call[name[self]._create, parameter[call[name[self]._pool.imap_unordered, parameter[name[selector], call[name[iter], parameter[name[self]]], name[self]._chunksize]]]]] | keyword[def] identifier[select] ( identifier[self] , identifier[selector] ):
literal[string]
keyword[return] identifier[self] . identifier[_create] ( identifier[self] . identifier[_pool] . identifier[imap_unordered] ( identifier[selector] , identifier[iter] ( identifier[self] ),
identifier[self] . identifier[_chunksize] )) | def select(self, selector):
"""Transforms each element of a sequence into a new form.
Each element is transformed through a selector function to produce a
value for each value in the source sequence. The generated sequence is
lazily evaluated.
Args:
selector: A unary function mapping a value in the source sequence
to the corresponding value in the generated generated sequence.
The argument of the selector function (which can have any name)
is,
Args:
element: The value of the element
Returns:
The selected value derived from the element value
Returns:
A generated sequence whose elements are the result of invoking the
selector function on each element of the source sequence.
"""
return self._create(self._pool.imap_unordered(selector, iter(self), self._chunksize)) |
def _open_xarray_dataset(self, val, chunks=CHUNK_SIZE):
"""Read the band in blocks."""
dask_arr = from_sds(val, chunks=chunks)
attrs = val.attributes()
return xr.DataArray(dask_arr, dims=('y', 'x'),
attrs=attrs) | def function[_open_xarray_dataset, parameter[self, val, chunks]]:
constant[Read the band in blocks.]
variable[dask_arr] assign[=] call[name[from_sds], parameter[name[val]]]
variable[attrs] assign[=] call[name[val].attributes, parameter[]]
return[call[name[xr].DataArray, parameter[name[dask_arr]]]] | keyword[def] identifier[_open_xarray_dataset] ( identifier[self] , identifier[val] , identifier[chunks] = identifier[CHUNK_SIZE] ):
literal[string]
identifier[dask_arr] = identifier[from_sds] ( identifier[val] , identifier[chunks] = identifier[chunks] )
identifier[attrs] = identifier[val] . identifier[attributes] ()
keyword[return] identifier[xr] . identifier[DataArray] ( identifier[dask_arr] , identifier[dims] =( literal[string] , literal[string] ),
identifier[attrs] = identifier[attrs] ) | def _open_xarray_dataset(self, val, chunks=CHUNK_SIZE):
"""Read the band in blocks."""
dask_arr = from_sds(val, chunks=chunks)
attrs = val.attributes()
return xr.DataArray(dask_arr, dims=('y', 'x'), attrs=attrs) |
def message_checksum(msg):
'''calculate a 8-bit checksum of the key fields of a message, so we
can detect incompatible XML changes'''
from .mavcrc import x25crc
crc = x25crc()
crc.accumulate_str(msg.name + ' ')
# in order to allow for extensions the crc does not include
# any field extensions
crc_end = msg.base_fields()
for i in range(crc_end):
f = msg.ordered_fields[i]
crc.accumulate_str(f.type + ' ')
crc.accumulate_str(f.name + ' ')
if f.array_length:
crc.accumulate([f.array_length])
return (crc.crc&0xFF) ^ (crc.crc>>8) | def function[message_checksum, parameter[msg]]:
constant[calculate a 8-bit checksum of the key fields of a message, so we
can detect incompatible XML changes]
from relative_module[mavcrc] import module[x25crc]
variable[crc] assign[=] call[name[x25crc], parameter[]]
call[name[crc].accumulate_str, parameter[binary_operation[name[msg].name + constant[ ]]]]
variable[crc_end] assign[=] call[name[msg].base_fields, parameter[]]
for taget[name[i]] in starred[call[name[range], parameter[name[crc_end]]]] begin[:]
variable[f] assign[=] call[name[msg].ordered_fields][name[i]]
call[name[crc].accumulate_str, parameter[binary_operation[name[f].type + constant[ ]]]]
call[name[crc].accumulate_str, parameter[binary_operation[name[f].name + constant[ ]]]]
if name[f].array_length begin[:]
call[name[crc].accumulate, parameter[list[[<ast.Attribute object at 0x7da20c9928c0>]]]]
return[binary_operation[binary_operation[name[crc].crc <ast.BitAnd object at 0x7da2590d6b60> constant[255]] <ast.BitXor object at 0x7da2590d6b00> binary_operation[name[crc].crc <ast.RShift object at 0x7da2590d6a40> constant[8]]]] | keyword[def] identifier[message_checksum] ( identifier[msg] ):
literal[string]
keyword[from] . identifier[mavcrc] keyword[import] identifier[x25crc]
identifier[crc] = identifier[x25crc] ()
identifier[crc] . identifier[accumulate_str] ( identifier[msg] . identifier[name] + literal[string] )
identifier[crc_end] = identifier[msg] . identifier[base_fields] ()
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[crc_end] ):
identifier[f] = identifier[msg] . identifier[ordered_fields] [ identifier[i] ]
identifier[crc] . identifier[accumulate_str] ( identifier[f] . identifier[type] + literal[string] )
identifier[crc] . identifier[accumulate_str] ( identifier[f] . identifier[name] + literal[string] )
keyword[if] identifier[f] . identifier[array_length] :
identifier[crc] . identifier[accumulate] ([ identifier[f] . identifier[array_length] ])
keyword[return] ( identifier[crc] . identifier[crc] & literal[int] )^( identifier[crc] . identifier[crc] >> literal[int] ) | def message_checksum(msg):
"""calculate a 8-bit checksum of the key fields of a message, so we
can detect incompatible XML changes"""
from .mavcrc import x25crc
crc = x25crc()
crc.accumulate_str(msg.name + ' ')
# in order to allow for extensions the crc does not include
# any field extensions
crc_end = msg.base_fields()
for i in range(crc_end):
f = msg.ordered_fields[i]
crc.accumulate_str(f.type + ' ')
crc.accumulate_str(f.name + ' ')
if f.array_length:
crc.accumulate([f.array_length]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
return crc.crc & 255 ^ crc.crc >> 8 |
def new_tx(self, *args, **kwargs):
""" Let's obtain a new txbuffer
:returns int txid: id of the new txbuffer
"""
builder = self.transactionbuilder_class(
*args, blockchain_instance=self, **kwargs
)
self._txbuffers.append(builder)
return builder | def function[new_tx, parameter[self]]:
constant[ Let's obtain a new txbuffer
:returns int txid: id of the new txbuffer
]
variable[builder] assign[=] call[name[self].transactionbuilder_class, parameter[<ast.Starred object at 0x7da1b0109750>]]
call[name[self]._txbuffers.append, parameter[name[builder]]]
return[name[builder]] | keyword[def] identifier[new_tx] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[builder] = identifier[self] . identifier[transactionbuilder_class] (
* identifier[args] , identifier[blockchain_instance] = identifier[self] ,** identifier[kwargs]
)
identifier[self] . identifier[_txbuffers] . identifier[append] ( identifier[builder] )
keyword[return] identifier[builder] | def new_tx(self, *args, **kwargs):
""" Let's obtain a new txbuffer
:returns int txid: id of the new txbuffer
"""
builder = self.transactionbuilder_class(*args, blockchain_instance=self, **kwargs)
self._txbuffers.append(builder)
return builder |
def get_all_scores(self, motifs, dbmotifs, match, metric, combine,
pval=False, parallel=True, trim=None, ncpus=None):
"""Pairwise comparison of a set of motifs compared to reference motifs.
Parameters
----------
motifs : list
List of Motif instances.
dbmotifs : list
List of Motif instances.
match : str
Match can be "partial", "subtotal" or "total". Not all metrics use
this.
metric : str
Distance metric.
combine : str
Combine positional scores using "mean" or "sum". Not all metrics
use this.
pval : bool , optional
Calculate p-vale of match.
parallel : bool , optional
Use multiprocessing for parallel execution. True by default.
trim : float or None
If a float value is specified, motifs are trimmed used this IC
cutoff before comparison.
ncpus : int or None
Specifies the number of cores to use for parallel execution.
Returns
-------
scores : dict
Dictionary with scores.
"""
# trim motifs first, if specified
if trim:
for m in motifs:
m.trim(trim)
for m in dbmotifs:
m.trim(trim)
# hash of result scores
scores = {}
if parallel:
# Divide the job into big chunks, to keep parallel overhead to minimum
# Number of chunks = number of processors available
if ncpus is None:
ncpus = int(MotifConfig().get_default_params()["ncpus"])
pool = Pool(processes=ncpus, maxtasksperchild=1000)
batch_len = len(dbmotifs) // ncpus
if batch_len <= 0:
batch_len = 1
jobs = []
for i in range(0, len(dbmotifs), batch_len):
# submit jobs to the job server
p = pool.apply_async(_get_all_scores,
args=(self, motifs, dbmotifs[i: i + batch_len], match, metric, combine, pval))
jobs.append(p)
pool.close()
for job in jobs:
# Get the job result
result = job.get()
# and update the result score
for m1,v in result.items():
for m2, s in v.items():
if m1 not in scores:
scores[m1] = {}
scores[m1][m2] = s
pool.join()
else:
# Do the whole thing at once if we don't want parallel
scores = _get_all_scores(self, motifs, dbmotifs, match, metric, combine, pval)
return scores | def function[get_all_scores, parameter[self, motifs, dbmotifs, match, metric, combine, pval, parallel, trim, ncpus]]:
constant[Pairwise comparison of a set of motifs compared to reference motifs.
Parameters
----------
motifs : list
List of Motif instances.
dbmotifs : list
List of Motif instances.
match : str
Match can be "partial", "subtotal" or "total". Not all metrics use
this.
metric : str
Distance metric.
combine : str
Combine positional scores using "mean" or "sum". Not all metrics
use this.
pval : bool , optional
Calculate p-vale of match.
parallel : bool , optional
Use multiprocessing for parallel execution. True by default.
trim : float or None
If a float value is specified, motifs are trimmed used this IC
cutoff before comparison.
ncpus : int or None
Specifies the number of cores to use for parallel execution.
Returns
-------
scores : dict
Dictionary with scores.
]
if name[trim] begin[:]
for taget[name[m]] in starred[name[motifs]] begin[:]
call[name[m].trim, parameter[name[trim]]]
for taget[name[m]] in starred[name[dbmotifs]] begin[:]
call[name[m].trim, parameter[name[trim]]]
variable[scores] assign[=] dictionary[[], []]
if name[parallel] begin[:]
if compare[name[ncpus] is constant[None]] begin[:]
variable[ncpus] assign[=] call[name[int], parameter[call[call[call[name[MotifConfig], parameter[]].get_default_params, parameter[]]][constant[ncpus]]]]
variable[pool] assign[=] call[name[Pool], parameter[]]
variable[batch_len] assign[=] binary_operation[call[name[len], parameter[name[dbmotifs]]] <ast.FloorDiv object at 0x7da2590d6bc0> name[ncpus]]
if compare[name[batch_len] less_or_equal[<=] constant[0]] begin[:]
variable[batch_len] assign[=] constant[1]
variable[jobs] assign[=] list[[]]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], call[name[len], parameter[name[dbmotifs]]], name[batch_len]]]] begin[:]
variable[p] assign[=] call[name[pool].apply_async, parameter[name[_get_all_scores]]]
call[name[jobs].append, parameter[name[p]]]
call[name[pool].close, parameter[]]
for taget[name[job]] in starred[name[jobs]] begin[:]
variable[result] assign[=] call[name[job].get, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b0fec4c0>, <ast.Name object at 0x7da1b0fecdc0>]]] in starred[call[name[result].items, parameter[]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b0feffa0>, <ast.Name object at 0x7da1b0feecb0>]]] in starred[call[name[v].items, parameter[]]] begin[:]
if compare[name[m1] <ast.NotIn object at 0x7da2590d7190> name[scores]] begin[:]
call[name[scores]][name[m1]] assign[=] dictionary[[], []]
call[call[name[scores]][name[m1]]][name[m2]] assign[=] name[s]
call[name[pool].join, parameter[]]
return[name[scores]] | keyword[def] identifier[get_all_scores] ( identifier[self] , identifier[motifs] , identifier[dbmotifs] , identifier[match] , identifier[metric] , identifier[combine] ,
identifier[pval] = keyword[False] , identifier[parallel] = keyword[True] , identifier[trim] = keyword[None] , identifier[ncpus] = keyword[None] ):
literal[string]
keyword[if] identifier[trim] :
keyword[for] identifier[m] keyword[in] identifier[motifs] :
identifier[m] . identifier[trim] ( identifier[trim] )
keyword[for] identifier[m] keyword[in] identifier[dbmotifs] :
identifier[m] . identifier[trim] ( identifier[trim] )
identifier[scores] ={}
keyword[if] identifier[parallel] :
keyword[if] identifier[ncpus] keyword[is] keyword[None] :
identifier[ncpus] = identifier[int] ( identifier[MotifConfig] (). identifier[get_default_params] ()[ literal[string] ])
identifier[pool] = identifier[Pool] ( identifier[processes] = identifier[ncpus] , identifier[maxtasksperchild] = literal[int] )
identifier[batch_len] = identifier[len] ( identifier[dbmotifs] )// identifier[ncpus]
keyword[if] identifier[batch_len] <= literal[int] :
identifier[batch_len] = literal[int]
identifier[jobs] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[dbmotifs] ), identifier[batch_len] ):
identifier[p] = identifier[pool] . identifier[apply_async] ( identifier[_get_all_scores] ,
identifier[args] =( identifier[self] , identifier[motifs] , identifier[dbmotifs] [ identifier[i] : identifier[i] + identifier[batch_len] ], identifier[match] , identifier[metric] , identifier[combine] , identifier[pval] ))
identifier[jobs] . identifier[append] ( identifier[p] )
identifier[pool] . identifier[close] ()
keyword[for] identifier[job] keyword[in] identifier[jobs] :
identifier[result] = identifier[job] . identifier[get] ()
keyword[for] identifier[m1] , identifier[v] keyword[in] identifier[result] . identifier[items] ():
keyword[for] identifier[m2] , identifier[s] keyword[in] identifier[v] . identifier[items] ():
keyword[if] identifier[m1] keyword[not] keyword[in] identifier[scores] :
identifier[scores] [ identifier[m1] ]={}
identifier[scores] [ identifier[m1] ][ identifier[m2] ]= identifier[s]
identifier[pool] . identifier[join] ()
keyword[else] :
identifier[scores] = identifier[_get_all_scores] ( identifier[self] , identifier[motifs] , identifier[dbmotifs] , identifier[match] , identifier[metric] , identifier[combine] , identifier[pval] )
keyword[return] identifier[scores] | def get_all_scores(self, motifs, dbmotifs, match, metric, combine, pval=False, parallel=True, trim=None, ncpus=None):
"""Pairwise comparison of a set of motifs compared to reference motifs.
Parameters
----------
motifs : list
List of Motif instances.
dbmotifs : list
List of Motif instances.
match : str
Match can be "partial", "subtotal" or "total". Not all metrics use
this.
metric : str
Distance metric.
combine : str
Combine positional scores using "mean" or "sum". Not all metrics
use this.
pval : bool , optional
Calculate p-vale of match.
parallel : bool , optional
Use multiprocessing for parallel execution. True by default.
trim : float or None
If a float value is specified, motifs are trimmed used this IC
cutoff before comparison.
ncpus : int or None
Specifies the number of cores to use for parallel execution.
Returns
-------
scores : dict
Dictionary with scores.
"""
# trim motifs first, if specified
if trim:
for m in motifs:
m.trim(trim) # depends on [control=['for'], data=['m']]
for m in dbmotifs:
m.trim(trim) # depends on [control=['for'], data=['m']] # depends on [control=['if'], data=[]]
# hash of result scores
scores = {}
if parallel:
# Divide the job into big chunks, to keep parallel overhead to minimum
# Number of chunks = number of processors available
if ncpus is None:
ncpus = int(MotifConfig().get_default_params()['ncpus']) # depends on [control=['if'], data=['ncpus']]
pool = Pool(processes=ncpus, maxtasksperchild=1000)
batch_len = len(dbmotifs) // ncpus
if batch_len <= 0:
batch_len = 1 # depends on [control=['if'], data=['batch_len']]
jobs = []
for i in range(0, len(dbmotifs), batch_len):
# submit jobs to the job server
p = pool.apply_async(_get_all_scores, args=(self, motifs, dbmotifs[i:i + batch_len], match, metric, combine, pval))
jobs.append(p) # depends on [control=['for'], data=['i']]
pool.close()
for job in jobs:
# Get the job result
result = job.get()
# and update the result score
for (m1, v) in result.items():
for (m2, s) in v.items():
if m1 not in scores:
scores[m1] = {} # depends on [control=['if'], data=['m1', 'scores']]
scores[m1][m2] = s # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['job']]
pool.join() # depends on [control=['if'], data=[]]
else:
# Do the whole thing at once if we don't want parallel
scores = _get_all_scores(self, motifs, dbmotifs, match, metric, combine, pval)
return scores |
def fetch(channels, start, end, type=None, dtype=None, allow_tape=None,
connection=None, host=None, port=None, pad=None, scaled=True,
verbose=False, series_class=TimeSeries):
# host and port keywords are used by the decorator only
# pylint: disable=unused-argument
"""Fetch a dict of data series from NDS2
This method sits underneath `TimeSeries.fetch` and related methods,
and isn't really designed to be called directly.
"""
# set ALLOW_DATA_ON_TAPE
if allow_tape is not None:
set_parameter(connection, 'ALLOW_DATA_ON_TAPE', str(allow_tape),
verbose=verbose)
type = _parse_nds_enum_dict_param(channels, 'type', type)
dtype = _parse_nds_enum_dict_param(channels, 'dtype', dtype)
# verify channels exist
print_verbose("Checking channels list against NDS2 database...", end=' ',
verbose=verbose)
utype = reduce(operator.or_, type.values()) # logical OR of types
udtype = reduce(operator.or_, dtype.values())
epoch = (start, end) if connection.get_protocol() > 1 else None
ndschannels = io_nds2.find_channels(channels, connection=connection,
epoch=epoch, type=utype, dtype=udtype,
unique=True)
names = [Channel.from_nds2(c).ndsname for c in ndschannels]
print_verbose('done', verbose=verbose)
# handle minute trend timing
if (any(c.endswith('m-trend') for c in names) and
(start % 60 or end % 60)):
warnings.warn("Requested at least one minute trend, but "
"start and stop GPS times are not multiples of "
"60. Times will be expanded outwards to compensate")
start, end = io_nds2.minute_trend_times(start, end)
# get data availability
span = SegmentList([Segment(start, end)])
if pad is None:
qsegs = span
gap = 'raise'
elif connection.get_protocol() == 1:
qsegs = span
gap = 'pad'
else:
print_verbose("Querying for data availability...", end=' ',
verbose=verbose)
pad = float(pad)
gap = 'pad'
qsegs = _get_data_segments(ndschannels, start, end, connection) & span
print_verbose('done\nFound {0} viable segments of data with {1:.2f}% '
'coverage'.format(len(qsegs),
abs(qsegs) / abs(span) * 100),
verbose=verbose)
if span - qsegs:
warnings.warn("Gaps were found in data available from {0}, "
"but will be padded with {1}".format(
connection.get_host(), pad))
# query for each segment
out = series_class.DictClass()
desc = verbose if isinstance(verbose, str) else 'Downloading data'
with progress_bar(total=float(abs(qsegs)), desc=desc,
unit='s', disable=not bool(verbose)) as bar:
for seg in qsegs:
total = 0.
for buffers in connection.iterate(int(seg[0]), int(seg[1]), names):
for buffer_, chan in zip(buffers, channels):
series = series_class.from_nds2_buffer(
buffer_,
scaled=scaled,
copy=chan not in out, # only copy if first buffer
)
out.append({chan: series}, pad=pad, gap=gap)
new = buffer_.length / buffer_.channel.sample_rate
total += new
bar.update(new)
# sometimes NDS2 returns no data at all
if not total and gap != 'pad':
raise RuntimeError("no data received from {0} for {1}".format(
connection.get_host(), seg))
# finalise timeseries to make sure each channel has the correct limits
# only if user asked to pad gaps
if pad is not None:
for chan, ndschan in zip(channels, ndschannels):
try:
ts = out[chan]
except KeyError:
out[chan] = _create_series(ndschan, pad, start, end,
series_class=series_class)
else:
out[chan] = _pad_series(ts, pad, start, end)
return out | def function[fetch, parameter[channels, start, end, type, dtype, allow_tape, connection, host, port, pad, scaled, verbose, series_class]]:
constant[Fetch a dict of data series from NDS2
This method sits underneath `TimeSeries.fetch` and related methods,
and isn't really designed to be called directly.
]
if compare[name[allow_tape] is_not constant[None]] begin[:]
call[name[set_parameter], parameter[name[connection], constant[ALLOW_DATA_ON_TAPE], call[name[str], parameter[name[allow_tape]]]]]
variable[type] assign[=] call[name[_parse_nds_enum_dict_param], parameter[name[channels], constant[type], name[type]]]
variable[dtype] assign[=] call[name[_parse_nds_enum_dict_param], parameter[name[channels], constant[dtype], name[dtype]]]
call[name[print_verbose], parameter[constant[Checking channels list against NDS2 database...]]]
variable[utype] assign[=] call[name[reduce], parameter[name[operator].or_, call[name[type].values, parameter[]]]]
variable[udtype] assign[=] call[name[reduce], parameter[name[operator].or_, call[name[dtype].values, parameter[]]]]
variable[epoch] assign[=] <ast.IfExp object at 0x7da18bc703a0>
variable[ndschannels] assign[=] call[name[io_nds2].find_channels, parameter[name[channels]]]
variable[names] assign[=] <ast.ListComp object at 0x7da18bc71bd0>
call[name[print_verbose], parameter[constant[done]]]
if <ast.BoolOp object at 0x7da18bc73070> begin[:]
call[name[warnings].warn, parameter[constant[Requested at least one minute trend, but start and stop GPS times are not multiples of 60. Times will be expanded outwards to compensate]]]
<ast.Tuple object at 0x7da18bc73400> assign[=] call[name[io_nds2].minute_trend_times, parameter[name[start], name[end]]]
variable[span] assign[=] call[name[SegmentList], parameter[list[[<ast.Call object at 0x7da18bc73490>]]]]
if compare[name[pad] is constant[None]] begin[:]
variable[qsegs] assign[=] name[span]
variable[gap] assign[=] constant[raise]
variable[out] assign[=] call[name[series_class].DictClass, parameter[]]
variable[desc] assign[=] <ast.IfExp object at 0x7da18bc70730>
with call[name[progress_bar], parameter[]] begin[:]
for taget[name[seg]] in starred[name[qsegs]] begin[:]
variable[total] assign[=] constant[0.0]
for taget[name[buffers]] in starred[call[name[connection].iterate, parameter[call[name[int], parameter[call[name[seg]][constant[0]]]], call[name[int], parameter[call[name[seg]][constant[1]]]], name[names]]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da18bc72740>, <ast.Name object at 0x7da18bc725f0>]]] in starred[call[name[zip], parameter[name[buffers], name[channels]]]] begin[:]
variable[series] assign[=] call[name[series_class].from_nds2_buffer, parameter[name[buffer_]]]
call[name[out].append, parameter[dictionary[[<ast.Name object at 0x7da20e9b3160>], [<ast.Name object at 0x7da20e9b0e80>]]]]
variable[new] assign[=] binary_operation[name[buffer_].length / name[buffer_].channel.sample_rate]
<ast.AugAssign object at 0x7da20e9b1de0>
call[name[bar].update, parameter[name[new]]]
if <ast.BoolOp object at 0x7da20e9b1930> begin[:]
<ast.Raise object at 0x7da20e9b19c0>
if compare[name[pad] is_not constant[None]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da20e9b3eb0>, <ast.Name object at 0x7da20e9b35b0>]]] in starred[call[name[zip], parameter[name[channels], name[ndschannels]]]] begin[:]
<ast.Try object at 0x7da20e9b1330>
return[name[out]] | keyword[def] identifier[fetch] ( identifier[channels] , identifier[start] , identifier[end] , identifier[type] = keyword[None] , identifier[dtype] = keyword[None] , identifier[allow_tape] = keyword[None] ,
identifier[connection] = keyword[None] , identifier[host] = keyword[None] , identifier[port] = keyword[None] , identifier[pad] = keyword[None] , identifier[scaled] = keyword[True] ,
identifier[verbose] = keyword[False] , identifier[series_class] = identifier[TimeSeries] ):
literal[string]
keyword[if] identifier[allow_tape] keyword[is] keyword[not] keyword[None] :
identifier[set_parameter] ( identifier[connection] , literal[string] , identifier[str] ( identifier[allow_tape] ),
identifier[verbose] = identifier[verbose] )
identifier[type] = identifier[_parse_nds_enum_dict_param] ( identifier[channels] , literal[string] , identifier[type] )
identifier[dtype] = identifier[_parse_nds_enum_dict_param] ( identifier[channels] , literal[string] , identifier[dtype] )
identifier[print_verbose] ( literal[string] , identifier[end] = literal[string] ,
identifier[verbose] = identifier[verbose] )
identifier[utype] = identifier[reduce] ( identifier[operator] . identifier[or_] , identifier[type] . identifier[values] ())
identifier[udtype] = identifier[reduce] ( identifier[operator] . identifier[or_] , identifier[dtype] . identifier[values] ())
identifier[epoch] =( identifier[start] , identifier[end] ) keyword[if] identifier[connection] . identifier[get_protocol] ()> literal[int] keyword[else] keyword[None]
identifier[ndschannels] = identifier[io_nds2] . identifier[find_channels] ( identifier[channels] , identifier[connection] = identifier[connection] ,
identifier[epoch] = identifier[epoch] , identifier[type] = identifier[utype] , identifier[dtype] = identifier[udtype] ,
identifier[unique] = keyword[True] )
identifier[names] =[ identifier[Channel] . identifier[from_nds2] ( identifier[c] ). identifier[ndsname] keyword[for] identifier[c] keyword[in] identifier[ndschannels] ]
identifier[print_verbose] ( literal[string] , identifier[verbose] = identifier[verbose] )
keyword[if] ( identifier[any] ( identifier[c] . identifier[endswith] ( literal[string] ) keyword[for] identifier[c] keyword[in] identifier[names] ) keyword[and]
( identifier[start] % literal[int] keyword[or] identifier[end] % literal[int] )):
identifier[warnings] . identifier[warn] ( literal[string]
literal[string]
literal[string] )
identifier[start] , identifier[end] = identifier[io_nds2] . identifier[minute_trend_times] ( identifier[start] , identifier[end] )
identifier[span] = identifier[SegmentList] ([ identifier[Segment] ( identifier[start] , identifier[end] )])
keyword[if] identifier[pad] keyword[is] keyword[None] :
identifier[qsegs] = identifier[span]
identifier[gap] = literal[string]
keyword[elif] identifier[connection] . identifier[get_protocol] ()== literal[int] :
identifier[qsegs] = identifier[span]
identifier[gap] = literal[string]
keyword[else] :
identifier[print_verbose] ( literal[string] , identifier[end] = literal[string] ,
identifier[verbose] = identifier[verbose] )
identifier[pad] = identifier[float] ( identifier[pad] )
identifier[gap] = literal[string]
identifier[qsegs] = identifier[_get_data_segments] ( identifier[ndschannels] , identifier[start] , identifier[end] , identifier[connection] )& identifier[span]
identifier[print_verbose] ( literal[string]
literal[string] . identifier[format] ( identifier[len] ( identifier[qsegs] ),
identifier[abs] ( identifier[qsegs] )/ identifier[abs] ( identifier[span] )* literal[int] ),
identifier[verbose] = identifier[verbose] )
keyword[if] identifier[span] - identifier[qsegs] :
identifier[warnings] . identifier[warn] ( literal[string]
literal[string] . identifier[format] (
identifier[connection] . identifier[get_host] (), identifier[pad] ))
identifier[out] = identifier[series_class] . identifier[DictClass] ()
identifier[desc] = identifier[verbose] keyword[if] identifier[isinstance] ( identifier[verbose] , identifier[str] ) keyword[else] literal[string]
keyword[with] identifier[progress_bar] ( identifier[total] = identifier[float] ( identifier[abs] ( identifier[qsegs] )), identifier[desc] = identifier[desc] ,
identifier[unit] = literal[string] , identifier[disable] = keyword[not] identifier[bool] ( identifier[verbose] )) keyword[as] identifier[bar] :
keyword[for] identifier[seg] keyword[in] identifier[qsegs] :
identifier[total] = literal[int]
keyword[for] identifier[buffers] keyword[in] identifier[connection] . identifier[iterate] ( identifier[int] ( identifier[seg] [ literal[int] ]), identifier[int] ( identifier[seg] [ literal[int] ]), identifier[names] ):
keyword[for] identifier[buffer_] , identifier[chan] keyword[in] identifier[zip] ( identifier[buffers] , identifier[channels] ):
identifier[series] = identifier[series_class] . identifier[from_nds2_buffer] (
identifier[buffer_] ,
identifier[scaled] = identifier[scaled] ,
identifier[copy] = identifier[chan] keyword[not] keyword[in] identifier[out] ,
)
identifier[out] . identifier[append] ({ identifier[chan] : identifier[series] }, identifier[pad] = identifier[pad] , identifier[gap] = identifier[gap] )
identifier[new] = identifier[buffer_] . identifier[length] / identifier[buffer_] . identifier[channel] . identifier[sample_rate]
identifier[total] += identifier[new]
identifier[bar] . identifier[update] ( identifier[new] )
keyword[if] keyword[not] identifier[total] keyword[and] identifier[gap] != literal[string] :
keyword[raise] identifier[RuntimeError] ( literal[string] . identifier[format] (
identifier[connection] . identifier[get_host] (), identifier[seg] ))
keyword[if] identifier[pad] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[chan] , identifier[ndschan] keyword[in] identifier[zip] ( identifier[channels] , identifier[ndschannels] ):
keyword[try] :
identifier[ts] = identifier[out] [ identifier[chan] ]
keyword[except] identifier[KeyError] :
identifier[out] [ identifier[chan] ]= identifier[_create_series] ( identifier[ndschan] , identifier[pad] , identifier[start] , identifier[end] ,
identifier[series_class] = identifier[series_class] )
keyword[else] :
identifier[out] [ identifier[chan] ]= identifier[_pad_series] ( identifier[ts] , identifier[pad] , identifier[start] , identifier[end] )
keyword[return] identifier[out] | def fetch(channels, start, end, type=None, dtype=None, allow_tape=None, connection=None, host=None, port=None, pad=None, scaled=True, verbose=False, series_class=TimeSeries):
# host and port keywords are used by the decorator only
# pylint: disable=unused-argument
"Fetch a dict of data series from NDS2\n\n This method sits underneath `TimeSeries.fetch` and related methods,\n and isn't really designed to be called directly.\n "
# set ALLOW_DATA_ON_TAPE
if allow_tape is not None:
set_parameter(connection, 'ALLOW_DATA_ON_TAPE', str(allow_tape), verbose=verbose) # depends on [control=['if'], data=['allow_tape']]
type = _parse_nds_enum_dict_param(channels, 'type', type)
dtype = _parse_nds_enum_dict_param(channels, 'dtype', dtype)
# verify channels exist
print_verbose('Checking channels list against NDS2 database...', end=' ', verbose=verbose)
utype = reduce(operator.or_, type.values()) # logical OR of types
udtype = reduce(operator.or_, dtype.values())
epoch = (start, end) if connection.get_protocol() > 1 else None
ndschannels = io_nds2.find_channels(channels, connection=connection, epoch=epoch, type=utype, dtype=udtype, unique=True)
names = [Channel.from_nds2(c).ndsname for c in ndschannels]
print_verbose('done', verbose=verbose)
# handle minute trend timing
if any((c.endswith('m-trend') for c in names)) and (start % 60 or end % 60):
warnings.warn('Requested at least one minute trend, but start and stop GPS times are not multiples of 60. Times will be expanded outwards to compensate')
(start, end) = io_nds2.minute_trend_times(start, end) # depends on [control=['if'], data=[]]
# get data availability
span = SegmentList([Segment(start, end)])
if pad is None:
qsegs = span
gap = 'raise' # depends on [control=['if'], data=[]]
elif connection.get_protocol() == 1:
qsegs = span
gap = 'pad' # depends on [control=['if'], data=[]]
else:
print_verbose('Querying for data availability...', end=' ', verbose=verbose)
pad = float(pad)
gap = 'pad'
qsegs = _get_data_segments(ndschannels, start, end, connection) & span
print_verbose('done\nFound {0} viable segments of data with {1:.2f}% coverage'.format(len(qsegs), abs(qsegs) / abs(span) * 100), verbose=verbose)
if span - qsegs:
warnings.warn('Gaps were found in data available from {0}, but will be padded with {1}'.format(connection.get_host(), pad)) # depends on [control=['if'], data=[]]
# query for each segment
out = series_class.DictClass()
desc = verbose if isinstance(verbose, str) else 'Downloading data'
with progress_bar(total=float(abs(qsegs)), desc=desc, unit='s', disable=not bool(verbose)) as bar:
for seg in qsegs:
total = 0.0
for buffers in connection.iterate(int(seg[0]), int(seg[1]), names):
for (buffer_, chan) in zip(buffers, channels): # only copy if first buffer
series = series_class.from_nds2_buffer(buffer_, scaled=scaled, copy=chan not in out)
out.append({chan: series}, pad=pad, gap=gap) # depends on [control=['for'], data=[]]
new = buffer_.length / buffer_.channel.sample_rate
total += new
bar.update(new) # depends on [control=['for'], data=['buffers']]
# sometimes NDS2 returns no data at all
if not total and gap != 'pad':
raise RuntimeError('no data received from {0} for {1}'.format(connection.get_host(), seg)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['seg']] # depends on [control=['with'], data=['bar']]
# finalise timeseries to make sure each channel has the correct limits
# only if user asked to pad gaps
if pad is not None:
for (chan, ndschan) in zip(channels, ndschannels):
try:
ts = out[chan] # depends on [control=['try'], data=[]]
except KeyError:
out[chan] = _create_series(ndschan, pad, start, end, series_class=series_class) # depends on [control=['except'], data=[]]
else:
out[chan] = _pad_series(ts, pad, start, end) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=['pad']]
return out |
def CreateDefaultPartition(client, ad_group_id):
"""Creates a default partition.
Args:
client: an AdWordsClient instance.
ad_group_id: an integer ID for an ad group.
"""
ad_group_criterion_service = client.GetService('AdGroupCriterionService',
version='v201809')
operations = [{
'operator': 'ADD',
'operand': {
'xsi_type': 'BiddableAdGroupCriterion',
'adGroupId': ad_group_id,
# Make sure that caseValue and parentCriterionId are left unspecified.
# This makes this partition as generic as possible to use as a
# fallback when others don't match.
'criterion': {
'xsi_type': 'ProductPartition',
'partitionType': 'UNIT'
},
'biddingStrategyConfiguration': {
'bids': [{
'xsi_type': 'CpcBid',
'bid': {
'microAmount': 500000
}
}]
}
}
}]
ad_group_criterion = ad_group_criterion_service.mutate(operations)['value'][0]
print ('Ad group criterion with ID "%d" in ad group with ID "%d" was added.'
% (ad_group_criterion['criterion']['id'],
ad_group_criterion['adGroupId'])) | def function[CreateDefaultPartition, parameter[client, ad_group_id]]:
constant[Creates a default partition.
Args:
client: an AdWordsClient instance.
ad_group_id: an integer ID for an ad group.
]
variable[ad_group_criterion_service] assign[=] call[name[client].GetService, parameter[constant[AdGroupCriterionService]]]
variable[operations] assign[=] list[[<ast.Dict object at 0x7da1b1b0cf40>]]
variable[ad_group_criterion] assign[=] call[call[call[name[ad_group_criterion_service].mutate, parameter[name[operations]]]][constant[value]]][constant[0]]
call[name[print], parameter[binary_operation[constant[Ad group criterion with ID "%d" in ad group with ID "%d" was added.] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da1b1b0d0c0>, <ast.Subscript object at 0x7da1b1b0ebc0>]]]]] | keyword[def] identifier[CreateDefaultPartition] ( identifier[client] , identifier[ad_group_id] ):
literal[string]
identifier[ad_group_criterion_service] = identifier[client] . identifier[GetService] ( literal[string] ,
identifier[version] = literal[string] )
identifier[operations] =[{
literal[string] : literal[string] ,
literal[string] :{
literal[string] : literal[string] ,
literal[string] : identifier[ad_group_id] ,
literal[string] :{
literal[string] : literal[string] ,
literal[string] : literal[string]
},
literal[string] :{
literal[string] :[{
literal[string] : literal[string] ,
literal[string] :{
literal[string] : literal[int]
}
}]
}
}
}]
identifier[ad_group_criterion] = identifier[ad_group_criterion_service] . identifier[mutate] ( identifier[operations] )[ literal[string] ][ literal[int] ]
identifier[print] ( literal[string]
%( identifier[ad_group_criterion] [ literal[string] ][ literal[string] ],
identifier[ad_group_criterion] [ literal[string] ])) | def CreateDefaultPartition(client, ad_group_id):
"""Creates a default partition.
Args:
client: an AdWordsClient instance.
ad_group_id: an integer ID for an ad group.
"""
ad_group_criterion_service = client.GetService('AdGroupCriterionService', version='v201809')
# Make sure that caseValue and parentCriterionId are left unspecified.
# This makes this partition as generic as possible to use as a
# fallback when others don't match.
operations = [{'operator': 'ADD', 'operand': {'xsi_type': 'BiddableAdGroupCriterion', 'adGroupId': ad_group_id, 'criterion': {'xsi_type': 'ProductPartition', 'partitionType': 'UNIT'}, 'biddingStrategyConfiguration': {'bids': [{'xsi_type': 'CpcBid', 'bid': {'microAmount': 500000}}]}}}]
ad_group_criterion = ad_group_criterion_service.mutate(operations)['value'][0]
print('Ad group criterion with ID "%d" in ad group with ID "%d" was added.' % (ad_group_criterion['criterion']['id'], ad_group_criterion['adGroupId'])) |
def save(self, filename=None):
""" Saves the runtime configuration to disk.
Parameters
----------
filename: str or None, default=None
writeable path to configuration filename.
If None, use default location and filename.
"""
if not filename:
filename = self.DEFAULT_CONFIG_FILE_NAME
else:
filename = str(filename)
# try to extract the path from filename and use is as cfg_dir
head, tail = os.path.split(filename)
if head:
self._cfg_dir = head
# we are search for .cfg files in cfg_dir so make sure it contains the proper extension.
base, ext = os.path.splitext(tail)
if ext != ".cfg":
filename += ".cfg"
# if we have no cfg dir, try to create it first. Return if it failed.
if not self.cfg_dir or not os.path.isdir(self.cfg_dir) or not os.stat(self.cfg_dir) != os.W_OK:
try:
self.cfg_dir = self.DEFAULT_CONFIG_DIR
except ConfigDirectoryException as cde:
print(Config._format_msg('Could not create configuration directory "{dir}"! config.save() failed.'
' Please set a writeable location with config.cfg_dir = val. Error was {exc}'
.format(dir=self.cfg_dir, exc=cde)))
return
filename = os.path.join(self.cfg_dir, filename)
try:
with open(filename, 'w') as fh:
self._conf_values.write(fh)
except IOError as ioe:
print(Config._format_msg("Save failed with error %s" % ioe)) | def function[save, parameter[self, filename]]:
constant[ Saves the runtime configuration to disk.
Parameters
----------
filename: str or None, default=None
writeable path to configuration filename.
If None, use default location and filename.
]
if <ast.UnaryOp object at 0x7da18f720310> begin[:]
variable[filename] assign[=] name[self].DEFAULT_CONFIG_FILE_NAME
if <ast.BoolOp object at 0x7da18f7232e0> begin[:]
<ast.Try object at 0x7da18f723250>
variable[filename] assign[=] call[name[os].path.join, parameter[name[self].cfg_dir, name[filename]]]
<ast.Try object at 0x7da18f7239d0> | keyword[def] identifier[save] ( identifier[self] , identifier[filename] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[filename] :
identifier[filename] = identifier[self] . identifier[DEFAULT_CONFIG_FILE_NAME]
keyword[else] :
identifier[filename] = identifier[str] ( identifier[filename] )
identifier[head] , identifier[tail] = identifier[os] . identifier[path] . identifier[split] ( identifier[filename] )
keyword[if] identifier[head] :
identifier[self] . identifier[_cfg_dir] = identifier[head]
identifier[base] , identifier[ext] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[tail] )
keyword[if] identifier[ext] != literal[string] :
identifier[filename] += literal[string]
keyword[if] keyword[not] identifier[self] . identifier[cfg_dir] keyword[or] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[self] . identifier[cfg_dir] ) keyword[or] keyword[not] identifier[os] . identifier[stat] ( identifier[self] . identifier[cfg_dir] )!= identifier[os] . identifier[W_OK] :
keyword[try] :
identifier[self] . identifier[cfg_dir] = identifier[self] . identifier[DEFAULT_CONFIG_DIR]
keyword[except] identifier[ConfigDirectoryException] keyword[as] identifier[cde] :
identifier[print] ( identifier[Config] . identifier[_format_msg] ( literal[string]
literal[string]
. identifier[format] ( identifier[dir] = identifier[self] . identifier[cfg_dir] , identifier[exc] = identifier[cde] )))
keyword[return]
identifier[filename] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[cfg_dir] , identifier[filename] )
keyword[try] :
keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[fh] :
identifier[self] . identifier[_conf_values] . identifier[write] ( identifier[fh] )
keyword[except] identifier[IOError] keyword[as] identifier[ioe] :
identifier[print] ( identifier[Config] . identifier[_format_msg] ( literal[string] % identifier[ioe] )) | def save(self, filename=None):
""" Saves the runtime configuration to disk.
Parameters
----------
filename: str or None, default=None
writeable path to configuration filename.
If None, use default location and filename.
"""
if not filename:
filename = self.DEFAULT_CONFIG_FILE_NAME # depends on [control=['if'], data=[]]
else:
filename = str(filename)
# try to extract the path from filename and use is as cfg_dir
(head, tail) = os.path.split(filename)
if head:
self._cfg_dir = head # depends on [control=['if'], data=[]]
# we are search for .cfg files in cfg_dir so make sure it contains the proper extension.
(base, ext) = os.path.splitext(tail)
if ext != '.cfg':
filename += '.cfg' # depends on [control=['if'], data=[]]
# if we have no cfg dir, try to create it first. Return if it failed.
if not self.cfg_dir or not os.path.isdir(self.cfg_dir) or (not os.stat(self.cfg_dir) != os.W_OK):
try:
self.cfg_dir = self.DEFAULT_CONFIG_DIR # depends on [control=['try'], data=[]]
except ConfigDirectoryException as cde:
print(Config._format_msg('Could not create configuration directory "{dir}"! config.save() failed. Please set a writeable location with config.cfg_dir = val. Error was {exc}'.format(dir=self.cfg_dir, exc=cde)))
return # depends on [control=['except'], data=['cde']] # depends on [control=['if'], data=[]]
filename = os.path.join(self.cfg_dir, filename)
try:
with open(filename, 'w') as fh:
self._conf_values.write(fh) # depends on [control=['with'], data=['fh']] # depends on [control=['try'], data=[]]
except IOError as ioe:
print(Config._format_msg('Save failed with error %s' % ioe)) # depends on [control=['except'], data=['ioe']] |
def get_tokenizer(fhp # type: Optional[field_formats.FieldHashingProperties]
):
# type: (...) -> Callable[[Text, Optional[Text]], Iterable[Text]]
""" Get tokeniser function from the hash settings.
This function takes a FieldHashingProperties object. It returns a
function that takes a string and tokenises based on those properties.
"""
def dummy(word, ignore=None):
# type: (Text, Optional[Text]) -> Iterable[Text]
"""
Null tokenizer returns empty Iterable.
FieldSpec Ignore has hashing_properties = None
and get_tokenizer has to return something for this case,
even though it's never called. An alternative would be to
use an Optional[Callable]].
:param word: not used
:param ignore: not used
:return: empty Iterable
"""
return ('' for i in range(0))
if not fhp:
return dummy
n = fhp.ngram
if n < 0:
raise ValueError('`n` in `n`-gram must be non-negative.')
positional = fhp.positional
def tok(word, ignore=None):
# type: (Text, Optional[Text]) -> Iterable[Text]
""" Produce `n`-grams of `word`.
:param word: The string to tokenize.
:param ignore: The substring whose occurrences we remove from
`word` before tokenization.
:return: Tuple of n-gram strings.
"""
if ignore is not None:
word = word.replace(ignore, '')
if n > 1:
word = ' {} '.format(word)
if positional:
# These are 1-indexed.
return ('{} {}'.format(i + 1, word[i:i + n])
for i in range(len(word) - n + 1))
else:
return (word[i:i + n] for i in range(len(word) - n + 1))
return tok | def function[get_tokenizer, parameter[fhp]]:
constant[ Get tokeniser function from the hash settings.
This function takes a FieldHashingProperties object. It returns a
function that takes a string and tokenises based on those properties.
]
def function[dummy, parameter[word, ignore]]:
constant[
Null tokenizer returns empty Iterable.
FieldSpec Ignore has hashing_properties = None
and get_tokenizer has to return something for this case,
even though it's never called. An alternative would be to
use an Optional[Callable]].
:param word: not used
:param ignore: not used
:return: empty Iterable
]
return[<ast.GeneratorExp object at 0x7da20c6e43a0>]
if <ast.UnaryOp object at 0x7da20c6e4be0> begin[:]
return[name[dummy]]
variable[n] assign[=] name[fhp].ngram
if compare[name[n] less[<] constant[0]] begin[:]
<ast.Raise object at 0x7da20c6e4fd0>
variable[positional] assign[=] name[fhp].positional
def function[tok, parameter[word, ignore]]:
constant[ Produce `n`-grams of `word`.
:param word: The string to tokenize.
:param ignore: The substring whose occurrences we remove from
`word` before tokenization.
:return: Tuple of n-gram strings.
]
if compare[name[ignore] is_not constant[None]] begin[:]
variable[word] assign[=] call[name[word].replace, parameter[name[ignore], constant[]]]
if compare[name[n] greater[>] constant[1]] begin[:]
variable[word] assign[=] call[constant[ {} ].format, parameter[name[word]]]
if name[positional] begin[:]
return[<ast.GeneratorExp object at 0x7da2041dbd30>]
return[name[tok]] | keyword[def] identifier[get_tokenizer] ( identifier[fhp]
):
literal[string]
keyword[def] identifier[dummy] ( identifier[word] , identifier[ignore] = keyword[None] ):
literal[string]
keyword[return] ( literal[string] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] ))
keyword[if] keyword[not] identifier[fhp] :
keyword[return] identifier[dummy]
identifier[n] = identifier[fhp] . identifier[ngram]
keyword[if] identifier[n] < literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[positional] = identifier[fhp] . identifier[positional]
keyword[def] identifier[tok] ( identifier[word] , identifier[ignore] = keyword[None] ):
literal[string]
keyword[if] identifier[ignore] keyword[is] keyword[not] keyword[None] :
identifier[word] = identifier[word] . identifier[replace] ( identifier[ignore] , literal[string] )
keyword[if] identifier[n] > literal[int] :
identifier[word] = literal[string] . identifier[format] ( identifier[word] )
keyword[if] identifier[positional] :
keyword[return] ( literal[string] . identifier[format] ( identifier[i] + literal[int] , identifier[word] [ identifier[i] : identifier[i] + identifier[n] ])
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[word] )- identifier[n] + literal[int] ))
keyword[else] :
keyword[return] ( identifier[word] [ identifier[i] : identifier[i] + identifier[n] ] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[word] )- identifier[n] + literal[int] ))
keyword[return] identifier[tok] | def get_tokenizer(fhp): # type: Optional[field_formats.FieldHashingProperties]
# type: (...) -> Callable[[Text, Optional[Text]], Iterable[Text]]
' Get tokeniser function from the hash settings.\n\n This function takes a FieldHashingProperties object. It returns a\n function that takes a string and tokenises based on those properties.\n '
def dummy(word, ignore=None):
# type: (Text, Optional[Text]) -> Iterable[Text]
"\n Null tokenizer returns empty Iterable.\n FieldSpec Ignore has hashing_properties = None\n and get_tokenizer has to return something for this case,\n even though it's never called. An alternative would be to\n use an Optional[Callable]].\n :param word: not used\n :param ignore: not used\n :return: empty Iterable\n "
return ('' for i in range(0))
if not fhp:
return dummy # depends on [control=['if'], data=[]]
n = fhp.ngram
if n < 0:
raise ValueError('`n` in `n`-gram must be non-negative.') # depends on [control=['if'], data=[]]
positional = fhp.positional
def tok(word, ignore=None):
# type: (Text, Optional[Text]) -> Iterable[Text]
' Produce `n`-grams of `word`.\n\n :param word: The string to tokenize.\n :param ignore: The substring whose occurrences we remove from\n `word` before tokenization.\n :return: Tuple of n-gram strings.\n '
if ignore is not None:
word = word.replace(ignore, '') # depends on [control=['if'], data=['ignore']]
if n > 1:
word = ' {} '.format(word) # depends on [control=['if'], data=[]]
if positional:
# These are 1-indexed.
return ('{} {}'.format(i + 1, word[i:i + n]) for i in range(len(word) - n + 1)) # depends on [control=['if'], data=[]]
else:
return (word[i:i + n] for i in range(len(word) - n + 1))
return tok |
def install(name=None, sources=None, saltenv='base', **kwargs):
'''
Install the passed package. Can install packages from the following
sources:
* Locally (package already exists on the minion
* HTTP/HTTPS server
* FTP server
* Salt master
Returns a dict containing the new package names and versions:
.. code-block:: python
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Examples:
.. code-block:: bash
# Installing a data stream pkg that already exists on the minion
salt '*' pkg.install sources='[{"<pkg name>": "/dir/on/minion/<pkg filename>"}]'
salt '*' pkg.install sources='[{"SMClgcc346": "/var/spool/pkg/gcc-3.4.6-sol10-sparc-local.pkg"}]'
# Installing a data stream pkg that exists on the salt master
salt '*' pkg.install sources='[{"<pkg name>": "salt://pkgs/<pkg filename>"}]'
salt '*' pkg.install sources='[{"SMClgcc346": "salt://pkgs/gcc-3.4.6-sol10-sparc-local.pkg"}]'
CLI Example:
.. code-block:: bash
# Installing a data stream pkg that exists on a HTTP server
salt '*' pkg.install sources='[{"<pkg name>": "http://packages.server.com/<pkg filename>"}]'
salt '*' pkg.install sources='[{"SMClgcc346": "http://packages.server.com/gcc-3.4.6-sol10-sparc-local.pkg"}]'
If working with solaris zones and you want to install a package only in the
global zone you can pass 'current_zone_only=True' to salt to have the
package only installed in the global zone. (Behind the scenes this is
passing '-G' to the pkgadd command.) Solaris default when installing a
package in the global zone is to install it in all zones. This overrides
that and installs the package only in the global.
CLI Example:
.. code-block:: bash
# Installing a data stream package only in the global zone:
salt 'global_zone' pkg.install sources='[{"SMClgcc346": "/var/spool/pkg/gcc-3.4.6-sol10-sparc-local.pkg"}]' current_zone_only=True
By default salt automatically provides an adminfile, to automate package
installation, with these options set::
email=
instance=quit
partial=nocheck
runlevel=nocheck
idepend=nocheck
rdepend=nocheck
space=nocheck
setuid=nocheck
conflict=nocheck
action=nocheck
basedir=default
You can override any of these options in two ways. First you can optionally
pass any of the options as a kwarg to the module/state to override the
default value or you can optionally pass the 'admin_source' option
providing your own adminfile to the minions.
Note: You can find all of the possible options to provide to the adminfile
by reading the admin man page:
.. code-block:: bash
man -s 4 admin
CLI Example:
.. code-block:: bash
# Overriding the 'instance' adminfile option when calling the module directly
salt '*' pkg.install sources='[{"<pkg name>": "salt://pkgs/<pkg filename>"}]' instance="overwrite"
SLS Example:
.. code-block:: yaml
# Overriding the 'instance' adminfile option when used in a state
SMClgcc346:
pkg.installed:
- sources:
- SMClgcc346: salt://srv/salt/pkgs/gcc-3.4.6-sol10-sparc-local.pkg
- instance: overwrite
.. note::
The ID declaration is ignored, as the package name is read from the
``sources`` parameter.
CLI Example:
.. code-block:: bash
# Providing your own adminfile when calling the module directly
salt '*' pkg.install sources='[{"<pkg name>": "salt://pkgs/<pkg filename>"}]' admin_source='salt://pkgs/<adminfile filename>'
# Providing your own adminfile when using states
<pkg name>:
pkg.installed:
- sources:
- <pkg name>: salt://pkgs/<pkg filename>
- admin_source: salt://pkgs/<adminfile filename>
.. note::
The ID declaration is ignored, as the package name is read from the
``sources`` parameter.
'''
if salt.utils.data.is_true(kwargs.get('refresh')):
log.warning('\'refresh\' argument not implemented for solarispkg '
'module')
# pkgs is not supported, but must be passed here for API compatibility
pkgs = kwargs.pop('pkgs', None)
try:
pkg_params, pkg_type = __salt__['pkg_resource.parse_targets'](
name, pkgs, sources, **kwargs
)
except MinionError as exc:
raise CommandExecutionError(exc)
if not pkg_params:
return {}
if not sources:
log.error('"sources" param required for solaris pkg_add installs')
return {}
try:
if 'admin_source' in kwargs:
adminfile = __salt__['cp.cache_file'](kwargs['admin_source'], saltenv)
else:
adminfile = _write_adminfile(kwargs)
old = list_pkgs()
cmd_prefix = ['/usr/sbin/pkgadd', '-n', '-a', adminfile]
# Only makes sense in a global zone but works fine in non-globals.
if kwargs.get('current_zone_only') == 'True':
cmd_prefix += '-G '
errors = []
for pkg in pkg_params:
cmd = cmd_prefix + ['-d', pkg, 'all']
# Install the package{s}
out = __salt__['cmd.run_all'](cmd,
output_loglevel='trace',
python_shell=False)
if out['retcode'] != 0 and out['stderr']:
errors.append(out['stderr'])
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if errors:
raise CommandExecutionError(
'Problem encountered installing package(s)',
info={'errors': errors, 'changes': ret}
)
finally:
# Remove the temp adminfile
if 'admin_source' not in kwargs:
try:
os.remove(adminfile)
except (NameError, OSError):
pass
return ret | def function[install, parameter[name, sources, saltenv]]:
constant[
Install the passed package. Can install packages from the following
sources:
* Locally (package already exists on the minion
* HTTP/HTTPS server
* FTP server
* Salt master
Returns a dict containing the new package names and versions:
.. code-block:: python
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Examples:
.. code-block:: bash
# Installing a data stream pkg that already exists on the minion
salt '*' pkg.install sources='[{"<pkg name>": "/dir/on/minion/<pkg filename>"}]'
salt '*' pkg.install sources='[{"SMClgcc346": "/var/spool/pkg/gcc-3.4.6-sol10-sparc-local.pkg"}]'
# Installing a data stream pkg that exists on the salt master
salt '*' pkg.install sources='[{"<pkg name>": "salt://pkgs/<pkg filename>"}]'
salt '*' pkg.install sources='[{"SMClgcc346": "salt://pkgs/gcc-3.4.6-sol10-sparc-local.pkg"}]'
CLI Example:
.. code-block:: bash
# Installing a data stream pkg that exists on a HTTP server
salt '*' pkg.install sources='[{"<pkg name>": "http://packages.server.com/<pkg filename>"}]'
salt '*' pkg.install sources='[{"SMClgcc346": "http://packages.server.com/gcc-3.4.6-sol10-sparc-local.pkg"}]'
If working with solaris zones and you want to install a package only in the
global zone you can pass 'current_zone_only=True' to salt to have the
package only installed in the global zone. (Behind the scenes this is
passing '-G' to the pkgadd command.) Solaris default when installing a
package in the global zone is to install it in all zones. This overrides
that and installs the package only in the global.
CLI Example:
.. code-block:: bash
# Installing a data stream package only in the global zone:
salt 'global_zone' pkg.install sources='[{"SMClgcc346": "/var/spool/pkg/gcc-3.4.6-sol10-sparc-local.pkg"}]' current_zone_only=True
By default salt automatically provides an adminfile, to automate package
installation, with these options set::
email=
instance=quit
partial=nocheck
runlevel=nocheck
idepend=nocheck
rdepend=nocheck
space=nocheck
setuid=nocheck
conflict=nocheck
action=nocheck
basedir=default
You can override any of these options in two ways. First you can optionally
pass any of the options as a kwarg to the module/state to override the
default value or you can optionally pass the 'admin_source' option
providing your own adminfile to the minions.
Note: You can find all of the possible options to provide to the adminfile
by reading the admin man page:
.. code-block:: bash
man -s 4 admin
CLI Example:
.. code-block:: bash
# Overriding the 'instance' adminfile option when calling the module directly
salt '*' pkg.install sources='[{"<pkg name>": "salt://pkgs/<pkg filename>"}]' instance="overwrite"
SLS Example:
.. code-block:: yaml
# Overriding the 'instance' adminfile option when used in a state
SMClgcc346:
pkg.installed:
- sources:
- SMClgcc346: salt://srv/salt/pkgs/gcc-3.4.6-sol10-sparc-local.pkg
- instance: overwrite
.. note::
The ID declaration is ignored, as the package name is read from the
``sources`` parameter.
CLI Example:
.. code-block:: bash
# Providing your own adminfile when calling the module directly
salt '*' pkg.install sources='[{"<pkg name>": "salt://pkgs/<pkg filename>"}]' admin_source='salt://pkgs/<adminfile filename>'
# Providing your own adminfile when using states
<pkg name>:
pkg.installed:
- sources:
- <pkg name>: salt://pkgs/<pkg filename>
- admin_source: salt://pkgs/<adminfile filename>
.. note::
The ID declaration is ignored, as the package name is read from the
``sources`` parameter.
]
if call[name[salt].utils.data.is_true, parameter[call[name[kwargs].get, parameter[constant[refresh]]]]] begin[:]
call[name[log].warning, parameter[constant['refresh' argument not implemented for solarispkg module]]]
variable[pkgs] assign[=] call[name[kwargs].pop, parameter[constant[pkgs], constant[None]]]
<ast.Try object at 0x7da18fe933a0>
if <ast.UnaryOp object at 0x7da18fe93940> begin[:]
return[dictionary[[], []]]
if <ast.UnaryOp object at 0x7da18fe90ee0> begin[:]
call[name[log].error, parameter[constant["sources" param required for solaris pkg_add installs]]]
return[dictionary[[], []]]
<ast.Try object at 0x7da18fe93790>
return[name[ret]] | keyword[def] identifier[install] ( identifier[name] = keyword[None] , identifier[sources] = keyword[None] , identifier[saltenv] = literal[string] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[salt] . identifier[utils] . identifier[data] . identifier[is_true] ( identifier[kwargs] . identifier[get] ( literal[string] )):
identifier[log] . identifier[warning] ( literal[string]
literal[string] )
identifier[pkgs] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[None] )
keyword[try] :
identifier[pkg_params] , identifier[pkg_type] = identifier[__salt__] [ literal[string] ](
identifier[name] , identifier[pkgs] , identifier[sources] ,** identifier[kwargs]
)
keyword[except] identifier[MinionError] keyword[as] identifier[exc] :
keyword[raise] identifier[CommandExecutionError] ( identifier[exc] )
keyword[if] keyword[not] identifier[pkg_params] :
keyword[return] {}
keyword[if] keyword[not] identifier[sources] :
identifier[log] . identifier[error] ( literal[string] )
keyword[return] {}
keyword[try] :
keyword[if] literal[string] keyword[in] identifier[kwargs] :
identifier[adminfile] = identifier[__salt__] [ literal[string] ]( identifier[kwargs] [ literal[string] ], identifier[saltenv] )
keyword[else] :
identifier[adminfile] = identifier[_write_adminfile] ( identifier[kwargs] )
identifier[old] = identifier[list_pkgs] ()
identifier[cmd_prefix] =[ literal[string] , literal[string] , literal[string] , identifier[adminfile] ]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] )== literal[string] :
identifier[cmd_prefix] += literal[string]
identifier[errors] =[]
keyword[for] identifier[pkg] keyword[in] identifier[pkg_params] :
identifier[cmd] = identifier[cmd_prefix] +[ literal[string] , identifier[pkg] , literal[string] ]
identifier[out] = identifier[__salt__] [ literal[string] ]( identifier[cmd] ,
identifier[output_loglevel] = literal[string] ,
identifier[python_shell] = keyword[False] )
keyword[if] identifier[out] [ literal[string] ]!= literal[int] keyword[and] identifier[out] [ literal[string] ]:
identifier[errors] . identifier[append] ( identifier[out] [ literal[string] ])
identifier[__context__] . identifier[pop] ( literal[string] , keyword[None] )
identifier[new] = identifier[list_pkgs] ()
identifier[ret] = identifier[salt] . identifier[utils] . identifier[data] . identifier[compare_dicts] ( identifier[old] , identifier[new] )
keyword[if] identifier[errors] :
keyword[raise] identifier[CommandExecutionError] (
literal[string] ,
identifier[info] ={ literal[string] : identifier[errors] , literal[string] : identifier[ret] }
)
keyword[finally] :
keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] :
keyword[try] :
identifier[os] . identifier[remove] ( identifier[adminfile] )
keyword[except] ( identifier[NameError] , identifier[OSError] ):
keyword[pass]
keyword[return] identifier[ret] | def install(name=None, sources=None, saltenv='base', **kwargs):
"""
Install the passed package. Can install packages from the following
sources:
* Locally (package already exists on the minion
* HTTP/HTTPS server
* FTP server
* Salt master
Returns a dict containing the new package names and versions:
.. code-block:: python
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Examples:
.. code-block:: bash
# Installing a data stream pkg that already exists on the minion
salt '*' pkg.install sources='[{"<pkg name>": "/dir/on/minion/<pkg filename>"}]'
salt '*' pkg.install sources='[{"SMClgcc346": "/var/spool/pkg/gcc-3.4.6-sol10-sparc-local.pkg"}]'
# Installing a data stream pkg that exists on the salt master
salt '*' pkg.install sources='[{"<pkg name>": "salt://pkgs/<pkg filename>"}]'
salt '*' pkg.install sources='[{"SMClgcc346": "salt://pkgs/gcc-3.4.6-sol10-sparc-local.pkg"}]'
CLI Example:
.. code-block:: bash
# Installing a data stream pkg that exists on a HTTP server
salt '*' pkg.install sources='[{"<pkg name>": "http://packages.server.com/<pkg filename>"}]'
salt '*' pkg.install sources='[{"SMClgcc346": "http://packages.server.com/gcc-3.4.6-sol10-sparc-local.pkg"}]'
If working with solaris zones and you want to install a package only in the
global zone you can pass 'current_zone_only=True' to salt to have the
package only installed in the global zone. (Behind the scenes this is
passing '-G' to the pkgadd command.) Solaris default when installing a
package in the global zone is to install it in all zones. This overrides
that and installs the package only in the global.
CLI Example:
.. code-block:: bash
# Installing a data stream package only in the global zone:
salt 'global_zone' pkg.install sources='[{"SMClgcc346": "/var/spool/pkg/gcc-3.4.6-sol10-sparc-local.pkg"}]' current_zone_only=True
By default salt automatically provides an adminfile, to automate package
installation, with these options set::
email=
instance=quit
partial=nocheck
runlevel=nocheck
idepend=nocheck
rdepend=nocheck
space=nocheck
setuid=nocheck
conflict=nocheck
action=nocheck
basedir=default
You can override any of these options in two ways. First you can optionally
pass any of the options as a kwarg to the module/state to override the
default value or you can optionally pass the 'admin_source' option
providing your own adminfile to the minions.
Note: You can find all of the possible options to provide to the adminfile
by reading the admin man page:
.. code-block:: bash
man -s 4 admin
CLI Example:
.. code-block:: bash
# Overriding the 'instance' adminfile option when calling the module directly
salt '*' pkg.install sources='[{"<pkg name>": "salt://pkgs/<pkg filename>"}]' instance="overwrite"
SLS Example:
.. code-block:: yaml
# Overriding the 'instance' adminfile option when used in a state
SMClgcc346:
pkg.installed:
- sources:
- SMClgcc346: salt://srv/salt/pkgs/gcc-3.4.6-sol10-sparc-local.pkg
- instance: overwrite
.. note::
The ID declaration is ignored, as the package name is read from the
``sources`` parameter.
CLI Example:
.. code-block:: bash
# Providing your own adminfile when calling the module directly
salt '*' pkg.install sources='[{"<pkg name>": "salt://pkgs/<pkg filename>"}]' admin_source='salt://pkgs/<adminfile filename>'
# Providing your own adminfile when using states
<pkg name>:
pkg.installed:
- sources:
- <pkg name>: salt://pkgs/<pkg filename>
- admin_source: salt://pkgs/<adminfile filename>
.. note::
The ID declaration is ignored, as the package name is read from the
``sources`` parameter.
"""
if salt.utils.data.is_true(kwargs.get('refresh')):
log.warning("'refresh' argument not implemented for solarispkg module") # depends on [control=['if'], data=[]]
# pkgs is not supported, but must be passed here for API compatibility
pkgs = kwargs.pop('pkgs', None)
try:
(pkg_params, pkg_type) = __salt__['pkg_resource.parse_targets'](name, pkgs, sources, **kwargs) # depends on [control=['try'], data=[]]
except MinionError as exc:
raise CommandExecutionError(exc) # depends on [control=['except'], data=['exc']]
if not pkg_params:
return {} # depends on [control=['if'], data=[]]
if not sources:
log.error('"sources" param required for solaris pkg_add installs')
return {} # depends on [control=['if'], data=[]]
try:
if 'admin_source' in kwargs:
adminfile = __salt__['cp.cache_file'](kwargs['admin_source'], saltenv) # depends on [control=['if'], data=['kwargs']]
else:
adminfile = _write_adminfile(kwargs)
old = list_pkgs()
cmd_prefix = ['/usr/sbin/pkgadd', '-n', '-a', adminfile]
# Only makes sense in a global zone but works fine in non-globals.
if kwargs.get('current_zone_only') == 'True':
cmd_prefix += '-G ' # depends on [control=['if'], data=[]]
errors = []
for pkg in pkg_params:
cmd = cmd_prefix + ['-d', pkg, 'all']
# Install the package{s}
out = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False)
if out['retcode'] != 0 and out['stderr']:
errors.append(out['stderr']) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['pkg']]
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if errors:
raise CommandExecutionError('Problem encountered installing package(s)', info={'errors': errors, 'changes': ret}) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
finally:
# Remove the temp adminfile
if 'admin_source' not in kwargs:
try:
os.remove(adminfile) # depends on [control=['try'], data=[]]
except (NameError, OSError):
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
return ret |
def login(
cls, username=None, password=None, requests_session=None,
rate_limit=None
):
"""Get a session that has authenticated with okcupid.com.
If no username and password is supplied, the ones stored in
:class:`okcupyd.settings` will be used.
:param username: The username to log in with.
:type username: str
:param password: The password to log in with.
:type password: str
:param rate_limit: Average time in seconds to wait between requests to OKC.
:type rate_limit: float
"""
requests_session = requests_session or requests.Session()
session = cls(requests_session, rate_limit)
# settings.USERNAME and settings.PASSWORD should not be made
# the defaults to their respective arguments because doing so
# would prevent this function from picking up any changes made
# to those values after import time.
username = username or settings.USERNAME
password = password or settings.PASSWORD
session.do_login(username, password)
return session | def function[login, parameter[cls, username, password, requests_session, rate_limit]]:
constant[Get a session that has authenticated with okcupid.com.
If no username and password is supplied, the ones stored in
:class:`okcupyd.settings` will be used.
:param username: The username to log in with.
:type username: str
:param password: The password to log in with.
:type password: str
:param rate_limit: Average time in seconds to wait between requests to OKC.
:type rate_limit: float
]
variable[requests_session] assign[=] <ast.BoolOp object at 0x7da1b2828100>
variable[session] assign[=] call[name[cls], parameter[name[requests_session], name[rate_limit]]]
variable[username] assign[=] <ast.BoolOp object at 0x7da1b28294b0>
variable[password] assign[=] <ast.BoolOp object at 0x7da1b282aa70>
call[name[session].do_login, parameter[name[username], name[password]]]
return[name[session]] | keyword[def] identifier[login] (
identifier[cls] , identifier[username] = keyword[None] , identifier[password] = keyword[None] , identifier[requests_session] = keyword[None] ,
identifier[rate_limit] = keyword[None]
):
literal[string]
identifier[requests_session] = identifier[requests_session] keyword[or] identifier[requests] . identifier[Session] ()
identifier[session] = identifier[cls] ( identifier[requests_session] , identifier[rate_limit] )
identifier[username] = identifier[username] keyword[or] identifier[settings] . identifier[USERNAME]
identifier[password] = identifier[password] keyword[or] identifier[settings] . identifier[PASSWORD]
identifier[session] . identifier[do_login] ( identifier[username] , identifier[password] )
keyword[return] identifier[session] | def login(cls, username=None, password=None, requests_session=None, rate_limit=None):
"""Get a session that has authenticated with okcupid.com.
If no username and password is supplied, the ones stored in
:class:`okcupyd.settings` will be used.
:param username: The username to log in with.
:type username: str
:param password: The password to log in with.
:type password: str
:param rate_limit: Average time in seconds to wait between requests to OKC.
:type rate_limit: float
"""
requests_session = requests_session or requests.Session()
session = cls(requests_session, rate_limit)
# settings.USERNAME and settings.PASSWORD should not be made
# the defaults to their respective arguments because doing so
# would prevent this function from picking up any changes made
# to those values after import time.
username = username or settings.USERNAME
password = password or settings.PASSWORD
session.do_login(username, password)
return session |
def start(self):
'''Start the timer and store the start time, this can only be executed
once per instance
It returns the timer instance so it can be chained when instantiating
the timer instance like this:
``timer = Timer('application_name').start()``'''
assert self._start is None, (
'Unable to start, the timer is already running')
self._last = self._start = time.time()
return self | def function[start, parameter[self]]:
constant[Start the timer and store the start time, this can only be executed
once per instance
It returns the timer instance so it can be chained when instantiating
the timer instance like this:
``timer = Timer('application_name').start()``]
assert[compare[name[self]._start is constant[None]]]
name[self]._last assign[=] call[name[time].time, parameter[]]
return[name[self]] | keyword[def] identifier[start] ( identifier[self] ):
literal[string]
keyword[assert] identifier[self] . identifier[_start] keyword[is] keyword[None] ,(
literal[string] )
identifier[self] . identifier[_last] = identifier[self] . identifier[_start] = identifier[time] . identifier[time] ()
keyword[return] identifier[self] | def start(self):
"""Start the timer and store the start time, this can only be executed
once per instance
It returns the timer instance so it can be chained when instantiating
the timer instance like this:
``timer = Timer('application_name').start()``"""
assert self._start is None, 'Unable to start, the timer is already running'
self._last = self._start = time.time()
return self |
def show_qos_queue(self, queue, **_params):
"""Fetches information of a certain queue."""
return self.get(self.qos_queue_path % (queue),
params=_params) | def function[show_qos_queue, parameter[self, queue]]:
constant[Fetches information of a certain queue.]
return[call[name[self].get, parameter[binary_operation[name[self].qos_queue_path <ast.Mod object at 0x7da2590d6920> name[queue]]]]] | keyword[def] identifier[show_qos_queue] ( identifier[self] , identifier[queue] ,** identifier[_params] ):
literal[string]
keyword[return] identifier[self] . identifier[get] ( identifier[self] . identifier[qos_queue_path] %( identifier[queue] ),
identifier[params] = identifier[_params] ) | def show_qos_queue(self, queue, **_params):
"""Fetches information of a certain queue."""
return self.get(self.qos_queue_path % queue, params=_params) |
def alt_names(names: str) -> Callable[..., Any]:
"""Add alternative names to you custom commands.
`names` is a single string with a space separated list of aliases for the
decorated command.
"""
names_split = names.split()
def decorator(func: Callable[..., Any]) -> Callable[..., Any]:
func.alt_names = names_split # type: ignore
return func
return decorator | def function[alt_names, parameter[names]]:
constant[Add alternative names to you custom commands.
`names` is a single string with a space separated list of aliases for the
decorated command.
]
variable[names_split] assign[=] call[name[names].split, parameter[]]
def function[decorator, parameter[func]]:
name[func].alt_names assign[=] name[names_split]
return[name[func]]
return[name[decorator]] | keyword[def] identifier[alt_names] ( identifier[names] : identifier[str] )-> identifier[Callable] [..., identifier[Any] ]:
literal[string]
identifier[names_split] = identifier[names] . identifier[split] ()
keyword[def] identifier[decorator] ( identifier[func] : identifier[Callable] [..., identifier[Any] ])-> identifier[Callable] [..., identifier[Any] ]:
identifier[func] . identifier[alt_names] = identifier[names_split]
keyword[return] identifier[func]
keyword[return] identifier[decorator] | def alt_names(names: str) -> Callable[..., Any]:
"""Add alternative names to you custom commands.
`names` is a single string with a space separated list of aliases for the
decorated command.
"""
names_split = names.split()
def decorator(func: Callable[..., Any]) -> Callable[..., Any]:
func.alt_names = names_split # type: ignore
return func
return decorator |
def _update(self):
"""Update the display of button after querying data from interface"""
self.clear()
self._set_boutons_communs()
if self.interface:
self.addSeparator()
l_actions = self.interface.get_actions_toolbar()
self._set_boutons_interface(l_actions) | def function[_update, parameter[self]]:
constant[Update the display of button after querying data from interface]
call[name[self].clear, parameter[]]
call[name[self]._set_boutons_communs, parameter[]]
if name[self].interface begin[:]
call[name[self].addSeparator, parameter[]]
variable[l_actions] assign[=] call[name[self].interface.get_actions_toolbar, parameter[]]
call[name[self]._set_boutons_interface, parameter[name[l_actions]]] | keyword[def] identifier[_update] ( identifier[self] ):
literal[string]
identifier[self] . identifier[clear] ()
identifier[self] . identifier[_set_boutons_communs] ()
keyword[if] identifier[self] . identifier[interface] :
identifier[self] . identifier[addSeparator] ()
identifier[l_actions] = identifier[self] . identifier[interface] . identifier[get_actions_toolbar] ()
identifier[self] . identifier[_set_boutons_interface] ( identifier[l_actions] ) | def _update(self):
"""Update the display of button after querying data from interface"""
self.clear()
self._set_boutons_communs()
if self.interface:
self.addSeparator()
l_actions = self.interface.get_actions_toolbar()
self._set_boutons_interface(l_actions) # depends on [control=['if'], data=[]] |
def plot_by_correct(self, y, is_correct):
""" Plots the images which correspond to the selected class (y) and to the specific case (prediction is correct - is_true=True, prediction is wrong - is_true=False)
Arguments:
y (int): the selected class
is_correct (boolean): a boolean flag (True, False) which specify the what to look for. Ex: True - most correct samples, False - most incorrect samples
"""
return self.plot_val_with_title(self.most_by_correct(y, is_correct), y) | def function[plot_by_correct, parameter[self, y, is_correct]]:
constant[ Plots the images which correspond to the selected class (y) and to the specific case (prediction is correct - is_true=True, prediction is wrong - is_true=False)
Arguments:
y (int): the selected class
is_correct (boolean): a boolean flag (True, False) which specify the what to look for. Ex: True - most correct samples, False - most incorrect samples
]
return[call[name[self].plot_val_with_title, parameter[call[name[self].most_by_correct, parameter[name[y], name[is_correct]]], name[y]]]] | keyword[def] identifier[plot_by_correct] ( identifier[self] , identifier[y] , identifier[is_correct] ):
literal[string]
keyword[return] identifier[self] . identifier[plot_val_with_title] ( identifier[self] . identifier[most_by_correct] ( identifier[y] , identifier[is_correct] ), identifier[y] ) | def plot_by_correct(self, y, is_correct):
""" Plots the images which correspond to the selected class (y) and to the specific case (prediction is correct - is_true=True, prediction is wrong - is_true=False)
Arguments:
y (int): the selected class
is_correct (boolean): a boolean flag (True, False) which specify the what to look for. Ex: True - most correct samples, False - most incorrect samples
"""
return self.plot_val_with_title(self.most_by_correct(y, is_correct), y) |
def link(target, link_to):
"""
Create a link to a target file or a folder.
For simplicity sake, both target and link_to must be absolute path and must
include the filename of the file or folder.
Also do not include any trailing slash.
e.g. link('/path/to/file', '/path/to/link')
But not: link('/path/to/file', 'path/to/')
or link('/path/to/folder/', '/path/to/link')
Args:
target (str): file or folder the link will point to
link_to (str): Link to create
"""
assert isinstance(target, str)
assert os.path.exists(target)
assert isinstance(link_to, str)
# Create the path to the link if it does not exists
abs_path = os.path.dirname(os.path.abspath(link_to))
if not os.path.isdir(abs_path):
os.makedirs(abs_path)
# Make sure the file or folder recursively has the good mode
chmod(target)
# Create the link to target
os.symlink(target, link_to) | def function[link, parameter[target, link_to]]:
constant[
Create a link to a target file or a folder.
For simplicity sake, both target and link_to must be absolute path and must
include the filename of the file or folder.
Also do not include any trailing slash.
e.g. link('/path/to/file', '/path/to/link')
But not: link('/path/to/file', 'path/to/')
or link('/path/to/folder/', '/path/to/link')
Args:
target (str): file or folder the link will point to
link_to (str): Link to create
]
assert[call[name[isinstance], parameter[name[target], name[str]]]]
assert[call[name[os].path.exists, parameter[name[target]]]]
assert[call[name[isinstance], parameter[name[link_to], name[str]]]]
variable[abs_path] assign[=] call[name[os].path.dirname, parameter[call[name[os].path.abspath, parameter[name[link_to]]]]]
if <ast.UnaryOp object at 0x7da1b1b3cc70> begin[:]
call[name[os].makedirs, parameter[name[abs_path]]]
call[name[chmod], parameter[name[target]]]
call[name[os].symlink, parameter[name[target], name[link_to]]] | keyword[def] identifier[link] ( identifier[target] , identifier[link_to] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[target] , identifier[str] )
keyword[assert] identifier[os] . identifier[path] . identifier[exists] ( identifier[target] )
keyword[assert] identifier[isinstance] ( identifier[link_to] , identifier[str] )
identifier[abs_path] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[os] . identifier[path] . identifier[abspath] ( identifier[link_to] ))
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[abs_path] ):
identifier[os] . identifier[makedirs] ( identifier[abs_path] )
identifier[chmod] ( identifier[target] )
identifier[os] . identifier[symlink] ( identifier[target] , identifier[link_to] ) | def link(target, link_to):
"""
Create a link to a target file or a folder.
For simplicity sake, both target and link_to must be absolute path and must
include the filename of the file or folder.
Also do not include any trailing slash.
e.g. link('/path/to/file', '/path/to/link')
But not: link('/path/to/file', 'path/to/')
or link('/path/to/folder/', '/path/to/link')
Args:
target (str): file or folder the link will point to
link_to (str): Link to create
"""
assert isinstance(target, str)
assert os.path.exists(target)
assert isinstance(link_to, str)
# Create the path to the link if it does not exists
abs_path = os.path.dirname(os.path.abspath(link_to))
if not os.path.isdir(abs_path):
os.makedirs(abs_path) # depends on [control=['if'], data=[]]
# Make sure the file or folder recursively has the good mode
chmod(target)
# Create the link to target
os.symlink(target, link_to) |
def register_as_type(self, locator, object_factory):
"""
Registers a component using its type (a constructor function).
:param locator: a locator to identify component to be created.
:param object_factory: a component type.
"""
if locator == None:
raise Exception("Locator cannot be null")
if object_factory == None:
raise Exception("Factory cannot be null")
def factory(locator):
return object_factory()
self._registrations.append(Registration(locator, factory)) | def function[register_as_type, parameter[self, locator, object_factory]]:
constant[
Registers a component using its type (a constructor function).
:param locator: a locator to identify component to be created.
:param object_factory: a component type.
]
if compare[name[locator] equal[==] constant[None]] begin[:]
<ast.Raise object at 0x7da18eb568f0>
if compare[name[object_factory] equal[==] constant[None]] begin[:]
<ast.Raise object at 0x7da18eb57d60>
def function[factory, parameter[locator]]:
return[call[name[object_factory], parameter[]]]
call[name[self]._registrations.append, parameter[call[name[Registration], parameter[name[locator], name[factory]]]]] | keyword[def] identifier[register_as_type] ( identifier[self] , identifier[locator] , identifier[object_factory] ):
literal[string]
keyword[if] identifier[locator] == keyword[None] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[if] identifier[object_factory] == keyword[None] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[def] identifier[factory] ( identifier[locator] ):
keyword[return] identifier[object_factory] ()
identifier[self] . identifier[_registrations] . identifier[append] ( identifier[Registration] ( identifier[locator] , identifier[factory] )) | def register_as_type(self, locator, object_factory):
"""
Registers a component using its type (a constructor function).
:param locator: a locator to identify component to be created.
:param object_factory: a component type.
"""
if locator == None:
raise Exception('Locator cannot be null') # depends on [control=['if'], data=[]]
if object_factory == None:
raise Exception('Factory cannot be null') # depends on [control=['if'], data=[]]
def factory(locator):
return object_factory()
self._registrations.append(Registration(locator, factory)) |
def url_for(endpoint, default="senaite.jsonapi.get", **values):
"""Looks up the API URL for the given endpoint
:param endpoint: The name of the registered route (aka endpoint)
:type endpoint: string
:returns: External URL for this endpoint
:rtype: string/None
"""
try:
return router.url_for(endpoint, force_external=True, values=values)
except Exception:
# XXX plone.jsonapi.core should catch the BuildError of Werkzeug and
# throw another error which can be handled here.
logger.debug("Could not build API URL for endpoint '%s'. "
"No route provider registered?" % endpoint)
# build generic API URL
return router.url_for(default, force_external=True, values=values) | def function[url_for, parameter[endpoint, default]]:
constant[Looks up the API URL for the given endpoint
:param endpoint: The name of the registered route (aka endpoint)
:type endpoint: string
:returns: External URL for this endpoint
:rtype: string/None
]
<ast.Try object at 0x7da1b2539a20> | keyword[def] identifier[url_for] ( identifier[endpoint] , identifier[default] = literal[string] ,** identifier[values] ):
literal[string]
keyword[try] :
keyword[return] identifier[router] . identifier[url_for] ( identifier[endpoint] , identifier[force_external] = keyword[True] , identifier[values] = identifier[values] )
keyword[except] identifier[Exception] :
identifier[logger] . identifier[debug] ( literal[string]
literal[string] % identifier[endpoint] )
keyword[return] identifier[router] . identifier[url_for] ( identifier[default] , identifier[force_external] = keyword[True] , identifier[values] = identifier[values] ) | def url_for(endpoint, default='senaite.jsonapi.get', **values):
"""Looks up the API URL for the given endpoint
:param endpoint: The name of the registered route (aka endpoint)
:type endpoint: string
:returns: External URL for this endpoint
:rtype: string/None
"""
try:
return router.url_for(endpoint, force_external=True, values=values) # depends on [control=['try'], data=[]]
except Exception:
# XXX plone.jsonapi.core should catch the BuildError of Werkzeug and
# throw another error which can be handled here.
logger.debug("Could not build API URL for endpoint '%s'. No route provider registered?" % endpoint)
# build generic API URL
return router.url_for(default, force_external=True, values=values) # depends on [control=['except'], data=[]] |
def unindent(buffer, from_row, to_row, count=1):
"""
Unindent text of a :class:`.Buffer` object.
"""
current_row = buffer.document.cursor_position_row
line_range = range(from_row, to_row)
def transform(text):
remove = ' ' * count
if text.startswith(remove):
return text[len(remove):]
else:
return text.lstrip()
# Apply transformation.
new_text = buffer.transform_lines(line_range, transform)
buffer.document = Document(
new_text,
Document(new_text).translate_row_col_to_index(current_row, 0))
# Go to the start of the line.
buffer.cursor_position += buffer.document.get_start_of_line_position(after_whitespace=True) | def function[unindent, parameter[buffer, from_row, to_row, count]]:
constant[
Unindent text of a :class:`.Buffer` object.
]
variable[current_row] assign[=] name[buffer].document.cursor_position_row
variable[line_range] assign[=] call[name[range], parameter[name[from_row], name[to_row]]]
def function[transform, parameter[text]]:
variable[remove] assign[=] binary_operation[constant[ ] * name[count]]
if call[name[text].startswith, parameter[name[remove]]] begin[:]
return[call[name[text]][<ast.Slice object at 0x7da204347670>]]
variable[new_text] assign[=] call[name[buffer].transform_lines, parameter[name[line_range], name[transform]]]
name[buffer].document assign[=] call[name[Document], parameter[name[new_text], call[call[name[Document], parameter[name[new_text]]].translate_row_col_to_index, parameter[name[current_row], constant[0]]]]]
<ast.AugAssign object at 0x7da204344af0> | keyword[def] identifier[unindent] ( identifier[buffer] , identifier[from_row] , identifier[to_row] , identifier[count] = literal[int] ):
literal[string]
identifier[current_row] = identifier[buffer] . identifier[document] . identifier[cursor_position_row]
identifier[line_range] = identifier[range] ( identifier[from_row] , identifier[to_row] )
keyword[def] identifier[transform] ( identifier[text] ):
identifier[remove] = literal[string] * identifier[count]
keyword[if] identifier[text] . identifier[startswith] ( identifier[remove] ):
keyword[return] identifier[text] [ identifier[len] ( identifier[remove] ):]
keyword[else] :
keyword[return] identifier[text] . identifier[lstrip] ()
identifier[new_text] = identifier[buffer] . identifier[transform_lines] ( identifier[line_range] , identifier[transform] )
identifier[buffer] . identifier[document] = identifier[Document] (
identifier[new_text] ,
identifier[Document] ( identifier[new_text] ). identifier[translate_row_col_to_index] ( identifier[current_row] , literal[int] ))
identifier[buffer] . identifier[cursor_position] += identifier[buffer] . identifier[document] . identifier[get_start_of_line_position] ( identifier[after_whitespace] = keyword[True] ) | def unindent(buffer, from_row, to_row, count=1):
"""
Unindent text of a :class:`.Buffer` object.
"""
current_row = buffer.document.cursor_position_row
line_range = range(from_row, to_row)
def transform(text):
remove = ' ' * count
if text.startswith(remove):
return text[len(remove):] # depends on [control=['if'], data=[]]
else:
return text.lstrip()
# Apply transformation.
new_text = buffer.transform_lines(line_range, transform)
buffer.document = Document(new_text, Document(new_text).translate_row_col_to_index(current_row, 0))
# Go to the start of the line.
buffer.cursor_position += buffer.document.get_start_of_line_position(after_whitespace=True) |
def render_import_image(self, use_auth=None):
"""
Configure the import_image plugin
"""
# import_image is a multi-phase plugin
if self.user_params.imagestream_name.value is None:
self.pt.remove_plugin('exit_plugins', 'import_image',
'imagestream not in user parameters')
elif self.pt.has_plugin_conf('exit_plugins', 'import_image'):
self.pt.set_plugin_arg('exit_plugins', 'import_image', 'imagestream',
self.user_params.imagestream_name.value) | def function[render_import_image, parameter[self, use_auth]]:
constant[
Configure the import_image plugin
]
if compare[name[self].user_params.imagestream_name.value is constant[None]] begin[:]
call[name[self].pt.remove_plugin, parameter[constant[exit_plugins], constant[import_image], constant[imagestream not in user parameters]]] | keyword[def] identifier[render_import_image] ( identifier[self] , identifier[use_auth] = keyword[None] ):
literal[string]
keyword[if] identifier[self] . identifier[user_params] . identifier[imagestream_name] . identifier[value] keyword[is] keyword[None] :
identifier[self] . identifier[pt] . identifier[remove_plugin] ( literal[string] , literal[string] ,
literal[string] )
keyword[elif] identifier[self] . identifier[pt] . identifier[has_plugin_conf] ( literal[string] , literal[string] ):
identifier[self] . identifier[pt] . identifier[set_plugin_arg] ( literal[string] , literal[string] , literal[string] ,
identifier[self] . identifier[user_params] . identifier[imagestream_name] . identifier[value] ) | def render_import_image(self, use_auth=None):
"""
Configure the import_image plugin
"""
# import_image is a multi-phase plugin
if self.user_params.imagestream_name.value is None:
self.pt.remove_plugin('exit_plugins', 'import_image', 'imagestream not in user parameters') # depends on [control=['if'], data=[]]
elif self.pt.has_plugin_conf('exit_plugins', 'import_image'):
self.pt.set_plugin_arg('exit_plugins', 'import_image', 'imagestream', self.user_params.imagestream_name.value) # depends on [control=['if'], data=[]] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.