code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def release(self): """Release the lock.""" success, _ = self.etcd_client.transaction( compare=[ self.etcd_client.transactions.value(self.key) == self.uuid ], success=[self.etcd_client.transactions.delete(self.key)], failure=[] ) return success
def function[release, parameter[self]]: constant[Release the lock.] <ast.Tuple object at 0x7da20c794d60> assign[=] call[name[self].etcd_client.transaction, parameter[]] return[name[success]]
keyword[def] identifier[release] ( identifier[self] ): literal[string] identifier[success] , identifier[_] = identifier[self] . identifier[etcd_client] . identifier[transaction] ( identifier[compare] =[ identifier[self] . identifier[etcd_client] . identifier[transactions] . identifier[value] ( identifier[self] . identifier[key] )== identifier[self] . identifier[uuid] ], identifier[success] =[ identifier[self] . identifier[etcd_client] . identifier[transactions] . identifier[delete] ( identifier[self] . identifier[key] )], identifier[failure] =[] ) keyword[return] identifier[success]
def release(self): """Release the lock.""" (success, _) = self.etcd_client.transaction(compare=[self.etcd_client.transactions.value(self.key) == self.uuid], success=[self.etcd_client.transactions.delete(self.key)], failure=[]) return success
def push(self, index=None): """Push built documents to ElasticSearch. If ``index`` is specified, only that index will be pushed. """ for ind in self.indexes: if index and not isinstance(ind, index): continue ind.push()
def function[push, parameter[self, index]]: constant[Push built documents to ElasticSearch. If ``index`` is specified, only that index will be pushed. ] for taget[name[ind]] in starred[name[self].indexes] begin[:] if <ast.BoolOp object at 0x7da1b19089d0> begin[:] continue call[name[ind].push, parameter[]]
keyword[def] identifier[push] ( identifier[self] , identifier[index] = keyword[None] ): literal[string] keyword[for] identifier[ind] keyword[in] identifier[self] . identifier[indexes] : keyword[if] identifier[index] keyword[and] keyword[not] identifier[isinstance] ( identifier[ind] , identifier[index] ): keyword[continue] identifier[ind] . identifier[push] ()
def push(self, index=None): """Push built documents to ElasticSearch. If ``index`` is specified, only that index will be pushed. """ for ind in self.indexes: if index and (not isinstance(ind, index)): continue # depends on [control=['if'], data=[]] ind.push() # depends on [control=['for'], data=['ind']]
def find_transported_elements(rxn): """ Return a dictionary showing the amount of transported elements of a rxn. Collects the elements for each metabolite participating in a reaction, multiplies the amount by the metabolite's stoichiometry in the reaction and bins the result according to the compartment that metabolite is in. This produces a dictionary of dictionaries such as this ``{'p': {'C': -1, 'H': -4}, c: {'C': 1, 'H': 4}}`` which shows the transported entities. This dictionary is then simplified to only include the non-zero elements of one single compartment i.e. showing the precise elements that are transported. Parameters ---------- rxn : cobra.Reaction Any cobra.Reaction containing metabolites. """ element_dist = defaultdict() # Collecting elements for each metabolite. for met in rxn.metabolites: if met.compartment not in element_dist: # Multiplication by the metabolite stoichiometry. element_dist[met.compartment] = \ {k: v * rxn.metabolites[met] for (k, v) in iteritems(met.elements)} else: x = {k: v * rxn.metabolites[met] for (k, v) in iteritems(met.elements)} y = element_dist[met.compartment] element_dist[met.compartment] = \ {k: x.get(k, 0) + y.get(k, 0) for k in set(x) | set(y)} delta_dict = defaultdict() # Simplification of the resulting dictionary of dictionaries. for elements in itervalues(element_dist): delta_dict.update(elements) # Only non-zero values get included in the returned delta-dict. delta_dict = {k: abs(v) for (k, v) in iteritems(delta_dict) if v != 0} return delta_dict
def function[find_transported_elements, parameter[rxn]]: constant[ Return a dictionary showing the amount of transported elements of a rxn. Collects the elements for each metabolite participating in a reaction, multiplies the amount by the metabolite's stoichiometry in the reaction and bins the result according to the compartment that metabolite is in. This produces a dictionary of dictionaries such as this ``{'p': {'C': -1, 'H': -4}, c: {'C': 1, 'H': 4}}`` which shows the transported entities. This dictionary is then simplified to only include the non-zero elements of one single compartment i.e. showing the precise elements that are transported. Parameters ---------- rxn : cobra.Reaction Any cobra.Reaction containing metabolites. ] variable[element_dist] assign[=] call[name[defaultdict], parameter[]] for taget[name[met]] in starred[name[rxn].metabolites] begin[:] if compare[name[met].compartment <ast.NotIn object at 0x7da2590d7190> name[element_dist]] begin[:] call[name[element_dist]][name[met].compartment] assign[=] <ast.DictComp object at 0x7da1b0578220> variable[delta_dict] assign[=] call[name[defaultdict], parameter[]] for taget[name[elements]] in starred[call[name[itervalues], parameter[name[element_dist]]]] begin[:] call[name[delta_dict].update, parameter[name[elements]]] variable[delta_dict] assign[=] <ast.DictComp object at 0x7da1b0579fc0> return[name[delta_dict]]
keyword[def] identifier[find_transported_elements] ( identifier[rxn] ): literal[string] identifier[element_dist] = identifier[defaultdict] () keyword[for] identifier[met] keyword[in] identifier[rxn] . identifier[metabolites] : keyword[if] identifier[met] . identifier[compartment] keyword[not] keyword[in] identifier[element_dist] : identifier[element_dist] [ identifier[met] . identifier[compartment] ]={ identifier[k] : identifier[v] * identifier[rxn] . identifier[metabolites] [ identifier[met] ] keyword[for] ( identifier[k] , identifier[v] ) keyword[in] identifier[iteritems] ( identifier[met] . identifier[elements] )} keyword[else] : identifier[x] ={ identifier[k] : identifier[v] * identifier[rxn] . identifier[metabolites] [ identifier[met] ] keyword[for] ( identifier[k] , identifier[v] ) keyword[in] identifier[iteritems] ( identifier[met] . identifier[elements] )} identifier[y] = identifier[element_dist] [ identifier[met] . identifier[compartment] ] identifier[element_dist] [ identifier[met] . identifier[compartment] ]={ identifier[k] : identifier[x] . identifier[get] ( identifier[k] , literal[int] )+ identifier[y] . identifier[get] ( identifier[k] , literal[int] ) keyword[for] identifier[k] keyword[in] identifier[set] ( identifier[x] )| identifier[set] ( identifier[y] )} identifier[delta_dict] = identifier[defaultdict] () keyword[for] identifier[elements] keyword[in] identifier[itervalues] ( identifier[element_dist] ): identifier[delta_dict] . identifier[update] ( identifier[elements] ) identifier[delta_dict] ={ identifier[k] : identifier[abs] ( identifier[v] ) keyword[for] ( identifier[k] , identifier[v] ) keyword[in] identifier[iteritems] ( identifier[delta_dict] ) keyword[if] identifier[v] != literal[int] } keyword[return] identifier[delta_dict]
def find_transported_elements(rxn): """ Return a dictionary showing the amount of transported elements of a rxn. Collects the elements for each metabolite participating in a reaction, multiplies the amount by the metabolite's stoichiometry in the reaction and bins the result according to the compartment that metabolite is in. This produces a dictionary of dictionaries such as this ``{'p': {'C': -1, 'H': -4}, c: {'C': 1, 'H': 4}}`` which shows the transported entities. This dictionary is then simplified to only include the non-zero elements of one single compartment i.e. showing the precise elements that are transported. Parameters ---------- rxn : cobra.Reaction Any cobra.Reaction containing metabolites. """ element_dist = defaultdict() # Collecting elements for each metabolite. for met in rxn.metabolites: if met.compartment not in element_dist: # Multiplication by the metabolite stoichiometry. element_dist[met.compartment] = {k: v * rxn.metabolites[met] for (k, v) in iteritems(met.elements)} # depends on [control=['if'], data=['element_dist']] else: x = {k: v * rxn.metabolites[met] for (k, v) in iteritems(met.elements)} y = element_dist[met.compartment] element_dist[met.compartment] = {k: x.get(k, 0) + y.get(k, 0) for k in set(x) | set(y)} # depends on [control=['for'], data=['met']] delta_dict = defaultdict() # Simplification of the resulting dictionary of dictionaries. for elements in itervalues(element_dist): delta_dict.update(elements) # depends on [control=['for'], data=['elements']] # Only non-zero values get included in the returned delta-dict. delta_dict = {k: abs(v) for (k, v) in iteritems(delta_dict) if v != 0} return delta_dict
def files(self): """ File uploads parsed from `multipart/form-data` encoded POST or PUT request body. The values are instances of :class:`FileUpload`. """ files = FormsDict() for name, item in self.POST.allitems(): if isinstance(item, FileUpload): files[name] = item return files
def function[files, parameter[self]]: constant[ File uploads parsed from `multipart/form-data` encoded POST or PUT request body. The values are instances of :class:`FileUpload`. ] variable[files] assign[=] call[name[FormsDict], parameter[]] for taget[tuple[[<ast.Name object at 0x7da1b2344850>, <ast.Name object at 0x7da20c6ab7c0>]]] in starred[call[name[self].POST.allitems, parameter[]]] begin[:] if call[name[isinstance], parameter[name[item], name[FileUpload]]] begin[:] call[name[files]][name[name]] assign[=] name[item] return[name[files]]
keyword[def] identifier[files] ( identifier[self] ): literal[string] identifier[files] = identifier[FormsDict] () keyword[for] identifier[name] , identifier[item] keyword[in] identifier[self] . identifier[POST] . identifier[allitems] (): keyword[if] identifier[isinstance] ( identifier[item] , identifier[FileUpload] ): identifier[files] [ identifier[name] ]= identifier[item] keyword[return] identifier[files]
def files(self): """ File uploads parsed from `multipart/form-data` encoded POST or PUT request body. The values are instances of :class:`FileUpload`. """ files = FormsDict() for (name, item) in self.POST.allitems(): if isinstance(item, FileUpload): files[name] = item # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] return files
def setup(self): """ performs data collection for qpid broker """ options = "" amqps_prefix = "" # set amqps:// when SSL is used if self.get_option("ssl"): amqps_prefix = "amqps://" # for either present option, add --option=value to 'options' variable for option in ["ssl-certificate", "ssl-key"]: if self.get_option(option): amqps_prefix = "amqps://" options = (options + " --%s=" % (option) + self.get_option(option)) if self.get_option("port"): options = (options + " -b " + amqps_prefix + "localhost:%s" % (self.get_option("port"))) self.add_cmd_output([ "qpid-stat -g" + options, # applies since 0.18 version "qpid-stat -b" + options, # applies to pre-0.18 versions "qpid-stat -c" + options, "qpid-stat -e" + options, "qpid-stat -q" + options, "qpid-stat -u" + options, "qpid-stat -m" + options, # applies since 0.18 version "qpid-config exchanges" + options, "qpid-config queues" + options, "qpid-config exchanges -b" + options, # applies to pre-0.18 vers. "qpid-config queues -b" + options, # applies to pre-0.18 versions "qpid-config exchanges -r" + options, # applies since 0.18 version "qpid-config queues -r" + options, # applies since 0.18 version "qpid-route link list" + options, "qpid-route route list" + options, "qpid-cluster" + options, # applies to pre-0.22 versions "qpid-ha query" + options, # applies since 0.22 version "ls -lanR /var/lib/qpidd" ]) self.add_copy_spec([ "/etc/qpidd.conf", # applies to pre-0.22 versions "/etc/qpid/qpidd.conf", # applies since 0.22 version "/var/lib/qpid/syslog", "/etc/ais/openais.conf", "/var/log/cumin.log", "/var/log/mint.log", "/etc/sasl2/qpidd.conf", "/etc/qpid/qpidc.conf", "/etc/sesame/sesame.conf", "/etc/cumin/cumin.conf", "/etc/corosync/corosync.conf", "/var/lib/sesame", "/var/log/qpidd.log", "/var/log/sesame", "/var/log/cumin" ])
def function[setup, parameter[self]]: constant[ performs data collection for qpid broker ] variable[options] assign[=] constant[] variable[amqps_prefix] assign[=] constant[] if call[name[self].get_option, parameter[constant[ssl]]] begin[:] variable[amqps_prefix] assign[=] constant[amqps://] for taget[name[option]] in starred[list[[<ast.Constant object at 0x7da1b184a080>, <ast.Constant object at 0x7da1b1849030>]]] begin[:] if call[name[self].get_option, parameter[name[option]]] begin[:] variable[amqps_prefix] assign[=] constant[amqps://] variable[options] assign[=] binary_operation[binary_operation[name[options] + binary_operation[constant[ --%s=] <ast.Mod object at 0x7da2590d6920> name[option]]] + call[name[self].get_option, parameter[name[option]]]] if call[name[self].get_option, parameter[constant[port]]] begin[:] variable[options] assign[=] binary_operation[binary_operation[binary_operation[name[options] + constant[ -b ]] + name[amqps_prefix]] + binary_operation[constant[localhost:%s] <ast.Mod object at 0x7da2590d6920> call[name[self].get_option, parameter[constant[port]]]]] call[name[self].add_cmd_output, parameter[list[[<ast.BinOp object at 0x7da1b184aa70>, <ast.BinOp object at 0x7da1b1849c30>, <ast.BinOp object at 0x7da1b1848c40>, <ast.BinOp object at 0x7da1b184a230>, <ast.BinOp object at 0x7da1b184ae60>, <ast.BinOp object at 0x7da1b184b070>, <ast.BinOp object at 0x7da1b184a050>, <ast.BinOp object at 0x7da1b1848c10>, <ast.BinOp object at 0x7da1b184b550>, <ast.BinOp object at 0x7da1b1848610>, <ast.BinOp object at 0x7da1b184bb20>, <ast.BinOp object at 0x7da1b1848f70>, <ast.BinOp object at 0x7da1b1849180>, <ast.BinOp object at 0x7da1b184be20>, <ast.BinOp object at 0x7da1b184bf40>, <ast.BinOp object at 0x7da1b1849300>, <ast.BinOp object at 0x7da1b17b4820>, <ast.Constant object at 0x7da1b17b6080>]]]] call[name[self].add_copy_spec, parameter[list[[<ast.Constant object at 0x7da1b17b6ad0>, <ast.Constant object at 0x7da1b17b4fd0>, <ast.Constant object at 0x7da1b17b57b0>, <ast.Constant object at 0x7da1b17b49a0>, <ast.Constant object at 0x7da1b17b55a0>, <ast.Constant object at 0x7da1b17b5420>, <ast.Constant object at 0x7da1b17b7220>, <ast.Constant object at 0x7da1b17b4a90>, <ast.Constant object at 0x7da1b17b74c0>, <ast.Constant object at 0x7da1b17b5d50>, <ast.Constant object at 0x7da1b17b6410>, <ast.Constant object at 0x7da1b17b6c50>, <ast.Constant object at 0x7da20c76d600>, <ast.Constant object at 0x7da20c76fa30>, <ast.Constant object at 0x7da20c76f640>]]]]
keyword[def] identifier[setup] ( identifier[self] ): literal[string] identifier[options] = literal[string] identifier[amqps_prefix] = literal[string] keyword[if] identifier[self] . identifier[get_option] ( literal[string] ): identifier[amqps_prefix] = literal[string] keyword[for] identifier[option] keyword[in] [ literal[string] , literal[string] ]: keyword[if] identifier[self] . identifier[get_option] ( identifier[option] ): identifier[amqps_prefix] = literal[string] identifier[options] =( identifier[options] + literal[string] %( identifier[option] )+ identifier[self] . identifier[get_option] ( identifier[option] )) keyword[if] identifier[self] . identifier[get_option] ( literal[string] ): identifier[options] =( identifier[options] + literal[string] + identifier[amqps_prefix] + literal[string] %( identifier[self] . identifier[get_option] ( literal[string] ))) identifier[self] . identifier[add_cmd_output] ([ literal[string] + identifier[options] , literal[string] + identifier[options] , literal[string] + identifier[options] , literal[string] + identifier[options] , literal[string] + identifier[options] , literal[string] + identifier[options] , literal[string] + identifier[options] , literal[string] + identifier[options] , literal[string] + identifier[options] , literal[string] + identifier[options] , literal[string] + identifier[options] , literal[string] + identifier[options] , literal[string] + identifier[options] , literal[string] + identifier[options] , literal[string] + identifier[options] , literal[string] + identifier[options] , literal[string] + identifier[options] , literal[string] ]) identifier[self] . identifier[add_copy_spec] ([ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ])
def setup(self): """ performs data collection for qpid broker """ options = '' amqps_prefix = '' # set amqps:// when SSL is used if self.get_option('ssl'): amqps_prefix = 'amqps://' # depends on [control=['if'], data=[]] # for either present option, add --option=value to 'options' variable for option in ['ssl-certificate', 'ssl-key']: if self.get_option(option): amqps_prefix = 'amqps://' options = options + ' --%s=' % option + self.get_option(option) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['option']] if self.get_option('port'): options = options + ' -b ' + amqps_prefix + 'localhost:%s' % self.get_option('port') # depends on [control=['if'], data=[]] # applies since 0.18 version # applies to pre-0.18 versions # applies since 0.18 version # applies to pre-0.18 vers. # applies to pre-0.18 versions # applies since 0.18 version # applies since 0.18 version # applies to pre-0.22 versions # applies since 0.22 version self.add_cmd_output(['qpid-stat -g' + options, 'qpid-stat -b' + options, 'qpid-stat -c' + options, 'qpid-stat -e' + options, 'qpid-stat -q' + options, 'qpid-stat -u' + options, 'qpid-stat -m' + options, 'qpid-config exchanges' + options, 'qpid-config queues' + options, 'qpid-config exchanges -b' + options, 'qpid-config queues -b' + options, 'qpid-config exchanges -r' + options, 'qpid-config queues -r' + options, 'qpid-route link list' + options, 'qpid-route route list' + options, 'qpid-cluster' + options, 'qpid-ha query' + options, 'ls -lanR /var/lib/qpidd']) # applies to pre-0.22 versions # applies since 0.22 version self.add_copy_spec(['/etc/qpidd.conf', '/etc/qpid/qpidd.conf', '/var/lib/qpid/syslog', '/etc/ais/openais.conf', '/var/log/cumin.log', '/var/log/mint.log', '/etc/sasl2/qpidd.conf', '/etc/qpid/qpidc.conf', '/etc/sesame/sesame.conf', '/etc/cumin/cumin.conf', '/etc/corosync/corosync.conf', '/var/lib/sesame', '/var/log/qpidd.log', '/var/log/sesame', '/var/log/cumin'])
def samplesheet(self): """ Create a custom sample sheet based on the original sample sheet for the run, but only including the samples that did not pass the quality threshold on the previous iteration """ if self.demultiplex: make_path(self.samplesheetpath) self.customsamplesheet = os.path.join(self.samplesheetpath, 'SampleSheet.csv') header = ['Sample_ID', 'Sample_Name', 'Sample_Plate', 'Sample_Well', 'I7_Index_ID', 'index', 'I5_Index_ID', 'index2', 'Sample_Project', 'Description'] with open(self.customsamplesheet, 'w') as samplesheet: lines = str() lines += '[Header]\n' lines += 'IEMFileVersion,{}\n'.format(self.header.IEMFileVersion) lines += 'Investigator Name,{}\n'.format(self.header.InvestigatorName) lines += 'Experiment Name,{}\n'.format(self.header.ExperimentName) lines += 'Date,{}\n'.format(self.header.Date) lines += 'Workflow,{}\n'.format(self.header.Workflow) lines += 'Application,{}\n'.format(self.header.Application) lines += 'Assay,{}\n'.format(self.header.Assay) lines += 'Description,{}\n'.format(self.header.Description) lines += 'Chemistry,{}\n'.format(self.header.Chemistry) lines += '\n' lines += '[Reads]\n' lines += str(self.forward) + '\n' lines += str(self.reverse) + '\n' lines += '\n' lines += '[Settings]\n' lines += 'ReverseComplement,{}\n'.format(self.header.ReverseComplement) lines += 'Adapter,{}\n'.format(self.header.Adapter) lines += '\n' lines += '[Data]\n' lines += ','.join(header) lines += '\n' # Correlate all the samples added to the list of incomplete samples with their metadata for incomplete in self.incomplete: for sample in self.rundata: if incomplete == sample['SampleID']: # Use each entry in the header list as a key for the rundata dictionary for data in header: # Modify the key to be consistent with how the dictionary was populated result = sample[data.replace('_', '')] # Description is the final entry in the list, and shouldn't have a , following the value if data != 'Description': lines += '{},'.format(result.replace('NA', '')) # This entry should have a newline instead of a , else: lines += '{}\n'.format(result.replace('NA', '')) # Write the string to the sample sheet samplesheet.write(lines)
def function[samplesheet, parameter[self]]: constant[ Create a custom sample sheet based on the original sample sheet for the run, but only including the samples that did not pass the quality threshold on the previous iteration ] if name[self].demultiplex begin[:] call[name[make_path], parameter[name[self].samplesheetpath]] name[self].customsamplesheet assign[=] call[name[os].path.join, parameter[name[self].samplesheetpath, constant[SampleSheet.csv]]] variable[header] assign[=] list[[<ast.Constant object at 0x7da2047e9990>, <ast.Constant object at 0x7da2047e9600>, <ast.Constant object at 0x7da2047e96f0>, <ast.Constant object at 0x7da2047e98d0>, <ast.Constant object at 0x7da2047eb0a0>, <ast.Constant object at 0x7da2047ea740>, <ast.Constant object at 0x7da2047e9960>, <ast.Constant object at 0x7da2047e96c0>, <ast.Constant object at 0x7da2047e8cd0>, <ast.Constant object at 0x7da2047e9e10>]] with call[name[open], parameter[name[self].customsamplesheet, constant[w]]] begin[:] variable[lines] assign[=] call[name[str], parameter[]] <ast.AugAssign object at 0x7da2047ea890> <ast.AugAssign object at 0x7da2047ebbb0> <ast.AugAssign object at 0x7da2047ebb50> <ast.AugAssign object at 0x7da2047eb7f0> <ast.AugAssign object at 0x7da2047e87f0> <ast.AugAssign object at 0x7da2047e9000> <ast.AugAssign object at 0x7da2047e8370> <ast.AugAssign object at 0x7da2047ea350> <ast.AugAssign object at 0x7da18f09e830> <ast.AugAssign object at 0x7da1b1d665f0> <ast.AugAssign object at 0x7da1b1d64670> <ast.AugAssign object at 0x7da1b1d66aa0> <ast.AugAssign object at 0x7da2047ea170> <ast.AugAssign object at 0x7da18fe91690> <ast.AugAssign object at 0x7da18fe91ae0> <ast.AugAssign object at 0x7da18fe90d00> <ast.AugAssign object at 0x7da18fe91270> <ast.AugAssign object at 0x7da18fe927d0> <ast.AugAssign object at 0x7da18fe919c0> <ast.AugAssign object at 0x7da18fe91060> <ast.AugAssign object at 0x7da18fe90c70> <ast.AugAssign object at 0x7da18fe93e20> for taget[name[incomplete]] in starred[name[self].incomplete] begin[:] for taget[name[sample]] in starred[name[self].rundata] begin[:] if compare[name[incomplete] equal[==] call[name[sample]][constant[SampleID]]] begin[:] for taget[name[data]] in starred[name[header]] begin[:] variable[result] assign[=] call[name[sample]][call[name[data].replace, parameter[constant[_], constant[]]]] if compare[name[data] not_equal[!=] constant[Description]] begin[:] <ast.AugAssign object at 0x7da18fe937c0> call[name[samplesheet].write, parameter[name[lines]]]
keyword[def] identifier[samplesheet] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[demultiplex] : identifier[make_path] ( identifier[self] . identifier[samplesheetpath] ) identifier[self] . identifier[customsamplesheet] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[samplesheetpath] , literal[string] ) identifier[header] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ] keyword[with] identifier[open] ( identifier[self] . identifier[customsamplesheet] , literal[string] ) keyword[as] identifier[samplesheet] : identifier[lines] = identifier[str] () identifier[lines] += literal[string] identifier[lines] += literal[string] . identifier[format] ( identifier[self] . identifier[header] . identifier[IEMFileVersion] ) identifier[lines] += literal[string] . identifier[format] ( identifier[self] . identifier[header] . identifier[InvestigatorName] ) identifier[lines] += literal[string] . identifier[format] ( identifier[self] . identifier[header] . identifier[ExperimentName] ) identifier[lines] += literal[string] . identifier[format] ( identifier[self] . identifier[header] . identifier[Date] ) identifier[lines] += literal[string] . identifier[format] ( identifier[self] . identifier[header] . identifier[Workflow] ) identifier[lines] += literal[string] . identifier[format] ( identifier[self] . identifier[header] . identifier[Application] ) identifier[lines] += literal[string] . identifier[format] ( identifier[self] . identifier[header] . identifier[Assay] ) identifier[lines] += literal[string] . identifier[format] ( identifier[self] . identifier[header] . identifier[Description] ) identifier[lines] += literal[string] . identifier[format] ( identifier[self] . identifier[header] . identifier[Chemistry] ) identifier[lines] += literal[string] identifier[lines] += literal[string] identifier[lines] += identifier[str] ( identifier[self] . identifier[forward] )+ literal[string] identifier[lines] += identifier[str] ( identifier[self] . identifier[reverse] )+ literal[string] identifier[lines] += literal[string] identifier[lines] += literal[string] identifier[lines] += literal[string] . identifier[format] ( identifier[self] . identifier[header] . identifier[ReverseComplement] ) identifier[lines] += literal[string] . identifier[format] ( identifier[self] . identifier[header] . identifier[Adapter] ) identifier[lines] += literal[string] identifier[lines] += literal[string] identifier[lines] += literal[string] . identifier[join] ( identifier[header] ) identifier[lines] += literal[string] keyword[for] identifier[incomplete] keyword[in] identifier[self] . identifier[incomplete] : keyword[for] identifier[sample] keyword[in] identifier[self] . identifier[rundata] : keyword[if] identifier[incomplete] == identifier[sample] [ literal[string] ]: keyword[for] identifier[data] keyword[in] identifier[header] : identifier[result] = identifier[sample] [ identifier[data] . identifier[replace] ( literal[string] , literal[string] )] keyword[if] identifier[data] != literal[string] : identifier[lines] += literal[string] . identifier[format] ( identifier[result] . identifier[replace] ( literal[string] , literal[string] )) keyword[else] : identifier[lines] += literal[string] . identifier[format] ( identifier[result] . identifier[replace] ( literal[string] , literal[string] )) identifier[samplesheet] . identifier[write] ( identifier[lines] )
def samplesheet(self): """ Create a custom sample sheet based on the original sample sheet for the run, but only including the samples that did not pass the quality threshold on the previous iteration """ if self.demultiplex: make_path(self.samplesheetpath) self.customsamplesheet = os.path.join(self.samplesheetpath, 'SampleSheet.csv') header = ['Sample_ID', 'Sample_Name', 'Sample_Plate', 'Sample_Well', 'I7_Index_ID', 'index', 'I5_Index_ID', 'index2', 'Sample_Project', 'Description'] with open(self.customsamplesheet, 'w') as samplesheet: lines = str() lines += '[Header]\n' lines += 'IEMFileVersion,{}\n'.format(self.header.IEMFileVersion) lines += 'Investigator Name,{}\n'.format(self.header.InvestigatorName) lines += 'Experiment Name,{}\n'.format(self.header.ExperimentName) lines += 'Date,{}\n'.format(self.header.Date) lines += 'Workflow,{}\n'.format(self.header.Workflow) lines += 'Application,{}\n'.format(self.header.Application) lines += 'Assay,{}\n'.format(self.header.Assay) lines += 'Description,{}\n'.format(self.header.Description) lines += 'Chemistry,{}\n'.format(self.header.Chemistry) lines += '\n' lines += '[Reads]\n' lines += str(self.forward) + '\n' lines += str(self.reverse) + '\n' lines += '\n' lines += '[Settings]\n' lines += 'ReverseComplement,{}\n'.format(self.header.ReverseComplement) lines += 'Adapter,{}\n'.format(self.header.Adapter) lines += '\n' lines += '[Data]\n' lines += ','.join(header) lines += '\n' # Correlate all the samples added to the list of incomplete samples with their metadata for incomplete in self.incomplete: for sample in self.rundata: if incomplete == sample['SampleID']: # Use each entry in the header list as a key for the rundata dictionary for data in header: # Modify the key to be consistent with how the dictionary was populated result = sample[data.replace('_', '')] # Description is the final entry in the list, and shouldn't have a , following the value if data != 'Description': lines += '{},'.format(result.replace('NA', '')) # depends on [control=['if'], data=[]] else: # This entry should have a newline instead of a , lines += '{}\n'.format(result.replace('NA', '')) # depends on [control=['for'], data=['data']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['sample']] # depends on [control=['for'], data=['incomplete']] # Write the string to the sample sheet samplesheet.write(lines) # depends on [control=['with'], data=['samplesheet']] # depends on [control=['if'], data=[]]
def get_contacts(self): """ Returns list of contacts Returns: dict: the roster of contacts """ for jid, item in self.roster.items.items(): try: self._contacts[jid.bare()].update(item.export_as_json()) except KeyError: self._contacts[jid.bare()] = item.export_as_json() return self._contacts
def function[get_contacts, parameter[self]]: constant[ Returns list of contacts Returns: dict: the roster of contacts ] for taget[tuple[[<ast.Name object at 0x7da1b0790b20>, <ast.Name object at 0x7da1b0790b50>]]] in starred[call[name[self].roster.items.items, parameter[]]] begin[:] <ast.Try object at 0x7da1b0790c70> return[name[self]._contacts]
keyword[def] identifier[get_contacts] ( identifier[self] ): literal[string] keyword[for] identifier[jid] , identifier[item] keyword[in] identifier[self] . identifier[roster] . identifier[items] . identifier[items] (): keyword[try] : identifier[self] . identifier[_contacts] [ identifier[jid] . identifier[bare] ()]. identifier[update] ( identifier[item] . identifier[export_as_json] ()) keyword[except] identifier[KeyError] : identifier[self] . identifier[_contacts] [ identifier[jid] . identifier[bare] ()]= identifier[item] . identifier[export_as_json] () keyword[return] identifier[self] . identifier[_contacts]
def get_contacts(self): """ Returns list of contacts Returns: dict: the roster of contacts """ for (jid, item) in self.roster.items.items(): try: self._contacts[jid.bare()].update(item.export_as_json()) # depends on [control=['try'], data=[]] except KeyError: self._contacts[jid.bare()] = item.export_as_json() # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]] return self._contacts
def create_recomended_articles(main_article, article_list): ''' Creates recommended article objects from article_list and _prepends_ to existing recommended articles. ''' # store existing recommended articles existing_recommended_articles = [ ra.recommended_article.specific for ra in main_article.recommended_articles.all()] # delete existing recommended articles ArticlePageRecommendedSections.objects.filter(page=main_article).delete() for hyperlinked_article in article_list: ArticlePageRecommendedSections( page=main_article, recommended_article=hyperlinked_article).save() # re-create existing recommended articles for article in existing_recommended_articles: if article not in article_list: ArticlePageRecommendedSections( page=main_article, recommended_article=article).save()
def function[create_recomended_articles, parameter[main_article, article_list]]: constant[ Creates recommended article objects from article_list and _prepends_ to existing recommended articles. ] variable[existing_recommended_articles] assign[=] <ast.ListComp object at 0x7da1b036b430> call[call[name[ArticlePageRecommendedSections].objects.filter, parameter[]].delete, parameter[]] for taget[name[hyperlinked_article]] in starred[name[article_list]] begin[:] call[call[name[ArticlePageRecommendedSections], parameter[]].save, parameter[]] for taget[name[article]] in starred[name[existing_recommended_articles]] begin[:] if compare[name[article] <ast.NotIn object at 0x7da2590d7190> name[article_list]] begin[:] call[call[name[ArticlePageRecommendedSections], parameter[]].save, parameter[]]
keyword[def] identifier[create_recomended_articles] ( identifier[main_article] , identifier[article_list] ): literal[string] identifier[existing_recommended_articles] =[ identifier[ra] . identifier[recommended_article] . identifier[specific] keyword[for] identifier[ra] keyword[in] identifier[main_article] . identifier[recommended_articles] . identifier[all] ()] identifier[ArticlePageRecommendedSections] . identifier[objects] . identifier[filter] ( identifier[page] = identifier[main_article] ). identifier[delete] () keyword[for] identifier[hyperlinked_article] keyword[in] identifier[article_list] : identifier[ArticlePageRecommendedSections] ( identifier[page] = identifier[main_article] , identifier[recommended_article] = identifier[hyperlinked_article] ). identifier[save] () keyword[for] identifier[article] keyword[in] identifier[existing_recommended_articles] : keyword[if] identifier[article] keyword[not] keyword[in] identifier[article_list] : identifier[ArticlePageRecommendedSections] ( identifier[page] = identifier[main_article] , identifier[recommended_article] = identifier[article] ). identifier[save] ()
def create_recomended_articles(main_article, article_list): """ Creates recommended article objects from article_list and _prepends_ to existing recommended articles. """ # store existing recommended articles existing_recommended_articles = [ra.recommended_article.specific for ra in main_article.recommended_articles.all()] # delete existing recommended articles ArticlePageRecommendedSections.objects.filter(page=main_article).delete() for hyperlinked_article in article_list: ArticlePageRecommendedSections(page=main_article, recommended_article=hyperlinked_article).save() # depends on [control=['for'], data=['hyperlinked_article']] # re-create existing recommended articles for article in existing_recommended_articles: if article not in article_list: ArticlePageRecommendedSections(page=main_article, recommended_article=article).save() # depends on [control=['if'], data=['article']] # depends on [control=['for'], data=['article']]
def available_discounts(cls, user, categories, products): ''' Returns all discounts available to this user for the given categories and products. The discounts also list the available quantity for this user, not including products that are pending purchase. ''' filtered_clauses = cls._filtered_clauses(user) # clauses that match provided categories categories = set(categories) # clauses that match provided products products = set(products) # clauses that match categories for provided products product_categories = set(product.category for product in products) # (Not relevant: clauses that match products in provided categories) all_categories = categories | product_categories filtered_clauses = ( clause for clause in filtered_clauses if hasattr(clause, 'product') and clause.product in products or hasattr(clause, 'category') and clause.category in all_categories ) discounts = [] # Markers so that we don't need to evaluate given conditions # more than once accepted_discounts = set() failed_discounts = set() for clause in filtered_clauses: discount = clause.discount cond = ConditionController.for_condition(discount) past_use_count = clause.past_use_count if past_use_count >= clause.quantity: # This clause has exceeded its use count pass elif discount not in failed_discounts: # This clause is still available is_accepted = discount in accepted_discounts if is_accepted or cond.is_met(user, filtered=True): # This clause is valid for this user discounts.append(DiscountAndQuantity( discount=discount, clause=clause, quantity=clause.quantity - past_use_count, )) accepted_discounts.add(discount) else: # This clause is not valid for this user failed_discounts.add(discount) return discounts
def function[available_discounts, parameter[cls, user, categories, products]]: constant[ Returns all discounts available to this user for the given categories and products. The discounts also list the available quantity for this user, not including products that are pending purchase. ] variable[filtered_clauses] assign[=] call[name[cls]._filtered_clauses, parameter[name[user]]] variable[categories] assign[=] call[name[set], parameter[name[categories]]] variable[products] assign[=] call[name[set], parameter[name[products]]] variable[product_categories] assign[=] call[name[set], parameter[<ast.GeneratorExp object at 0x7da20c795f30>]] variable[all_categories] assign[=] binary_operation[name[categories] <ast.BitOr object at 0x7da2590d6aa0> name[product_categories]] variable[filtered_clauses] assign[=] <ast.GeneratorExp object at 0x7da20c7951b0> variable[discounts] assign[=] list[[]] variable[accepted_discounts] assign[=] call[name[set], parameter[]] variable[failed_discounts] assign[=] call[name[set], parameter[]] for taget[name[clause]] in starred[name[filtered_clauses]] begin[:] variable[discount] assign[=] name[clause].discount variable[cond] assign[=] call[name[ConditionController].for_condition, parameter[name[discount]]] variable[past_use_count] assign[=] name[clause].past_use_count if compare[name[past_use_count] greater_or_equal[>=] name[clause].quantity] begin[:] pass return[name[discounts]]
keyword[def] identifier[available_discounts] ( identifier[cls] , identifier[user] , identifier[categories] , identifier[products] ): literal[string] identifier[filtered_clauses] = identifier[cls] . identifier[_filtered_clauses] ( identifier[user] ) identifier[categories] = identifier[set] ( identifier[categories] ) identifier[products] = identifier[set] ( identifier[products] ) identifier[product_categories] = identifier[set] ( identifier[product] . identifier[category] keyword[for] identifier[product] keyword[in] identifier[products] ) identifier[all_categories] = identifier[categories] | identifier[product_categories] identifier[filtered_clauses] =( identifier[clause] keyword[for] identifier[clause] keyword[in] identifier[filtered_clauses] keyword[if] identifier[hasattr] ( identifier[clause] , literal[string] ) keyword[and] identifier[clause] . identifier[product] keyword[in] identifier[products] keyword[or] identifier[hasattr] ( identifier[clause] , literal[string] ) keyword[and] identifier[clause] . identifier[category] keyword[in] identifier[all_categories] ) identifier[discounts] =[] identifier[accepted_discounts] = identifier[set] () identifier[failed_discounts] = identifier[set] () keyword[for] identifier[clause] keyword[in] identifier[filtered_clauses] : identifier[discount] = identifier[clause] . identifier[discount] identifier[cond] = identifier[ConditionController] . identifier[for_condition] ( identifier[discount] ) identifier[past_use_count] = identifier[clause] . identifier[past_use_count] keyword[if] identifier[past_use_count] >= identifier[clause] . identifier[quantity] : keyword[pass] keyword[elif] identifier[discount] keyword[not] keyword[in] identifier[failed_discounts] : identifier[is_accepted] = identifier[discount] keyword[in] identifier[accepted_discounts] keyword[if] identifier[is_accepted] keyword[or] identifier[cond] . identifier[is_met] ( identifier[user] , identifier[filtered] = keyword[True] ): identifier[discounts] . identifier[append] ( identifier[DiscountAndQuantity] ( identifier[discount] = identifier[discount] , identifier[clause] = identifier[clause] , identifier[quantity] = identifier[clause] . identifier[quantity] - identifier[past_use_count] , )) identifier[accepted_discounts] . identifier[add] ( identifier[discount] ) keyword[else] : identifier[failed_discounts] . identifier[add] ( identifier[discount] ) keyword[return] identifier[discounts]
def available_discounts(cls, user, categories, products): """ Returns all discounts available to this user for the given categories and products. The discounts also list the available quantity for this user, not including products that are pending purchase. """ filtered_clauses = cls._filtered_clauses(user) # clauses that match provided categories categories = set(categories) # clauses that match provided products products = set(products) # clauses that match categories for provided products product_categories = set((product.category for product in products)) # (Not relevant: clauses that match products in provided categories) all_categories = categories | product_categories filtered_clauses = (clause for clause in filtered_clauses if hasattr(clause, 'product') and clause.product in products or (hasattr(clause, 'category') and clause.category in all_categories)) discounts = [] # Markers so that we don't need to evaluate given conditions # more than once accepted_discounts = set() failed_discounts = set() for clause in filtered_clauses: discount = clause.discount cond = ConditionController.for_condition(discount) past_use_count = clause.past_use_count if past_use_count >= clause.quantity: # This clause has exceeded its use count pass # depends on [control=['if'], data=[]] elif discount not in failed_discounts: # This clause is still available is_accepted = discount in accepted_discounts if is_accepted or cond.is_met(user, filtered=True): # This clause is valid for this user discounts.append(DiscountAndQuantity(discount=discount, clause=clause, quantity=clause.quantity - past_use_count)) accepted_discounts.add(discount) # depends on [control=['if'], data=[]] else: # This clause is not valid for this user failed_discounts.add(discount) # depends on [control=['if'], data=['discount', 'failed_discounts']] # depends on [control=['for'], data=['clause']] return discounts
def _GetExtractionErrorsAsWarnings(self): """Retrieves errors from from the store, and converts them to warnings. This method is for backwards compatibility with pre-20190309 storage format stores which used ExtractionError attribute containers. Yields: ExtractionWarning: extraction warnings. """ for extraction_error in self._GetAttributeContainers( self._CONTAINER_TYPE_EXTRACTION_ERROR): error_attributes = extraction_error.CopyToDict() warning = warnings.ExtractionWarning() warning.CopyFromDict(error_attributes) yield warning
def function[_GetExtractionErrorsAsWarnings, parameter[self]]: constant[Retrieves errors from from the store, and converts them to warnings. This method is for backwards compatibility with pre-20190309 storage format stores which used ExtractionError attribute containers. Yields: ExtractionWarning: extraction warnings. ] for taget[name[extraction_error]] in starred[call[name[self]._GetAttributeContainers, parameter[name[self]._CONTAINER_TYPE_EXTRACTION_ERROR]]] begin[:] variable[error_attributes] assign[=] call[name[extraction_error].CopyToDict, parameter[]] variable[warning] assign[=] call[name[warnings].ExtractionWarning, parameter[]] call[name[warning].CopyFromDict, parameter[name[error_attributes]]] <ast.Yield object at 0x7da20c6a8be0>
keyword[def] identifier[_GetExtractionErrorsAsWarnings] ( identifier[self] ): literal[string] keyword[for] identifier[extraction_error] keyword[in] identifier[self] . identifier[_GetAttributeContainers] ( identifier[self] . identifier[_CONTAINER_TYPE_EXTRACTION_ERROR] ): identifier[error_attributes] = identifier[extraction_error] . identifier[CopyToDict] () identifier[warning] = identifier[warnings] . identifier[ExtractionWarning] () identifier[warning] . identifier[CopyFromDict] ( identifier[error_attributes] ) keyword[yield] identifier[warning]
def _GetExtractionErrorsAsWarnings(self): """Retrieves errors from from the store, and converts them to warnings. This method is for backwards compatibility with pre-20190309 storage format stores which used ExtractionError attribute containers. Yields: ExtractionWarning: extraction warnings. """ for extraction_error in self._GetAttributeContainers(self._CONTAINER_TYPE_EXTRACTION_ERROR): error_attributes = extraction_error.CopyToDict() warning = warnings.ExtractionWarning() warning.CopyFromDict(error_attributes) yield warning # depends on [control=['for'], data=['extraction_error']]
def _type_repr(obj): """Return the repr() of an object, special-casing types (internal helper). If obj is a type, we return a shorter version than the default type.__repr__, based on the module and qualified name, which is typically enough to uniquely identify a type. For everything else, we fall back on repr(obj). """ if isinstance(obj, type) and not isinstance(obj, TypingMeta): if obj.__module__ == 'builtins': return _qualname(obj) return '%s.%s' % (obj.__module__, _qualname(obj)) if obj is ...: return ('...') if isinstance(obj, types.FunctionType): return obj.__name__ return repr(obj)
def function[_type_repr, parameter[obj]]: constant[Return the repr() of an object, special-casing types (internal helper). If obj is a type, we return a shorter version than the default type.__repr__, based on the module and qualified name, which is typically enough to uniquely identify a type. For everything else, we fall back on repr(obj). ] if <ast.BoolOp object at 0x7da1b1c835b0> begin[:] if compare[name[obj].__module__ equal[==] constant[builtins]] begin[:] return[call[name[_qualname], parameter[name[obj]]]] return[binary_operation[constant[%s.%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b1c82ce0>, <ast.Call object at 0x7da1b1c83ee0>]]]] if compare[name[obj] is constant[Ellipsis]] begin[:] return[constant[...]] if call[name[isinstance], parameter[name[obj], name[types].FunctionType]] begin[:] return[name[obj].__name__] return[call[name[repr], parameter[name[obj]]]]
keyword[def] identifier[_type_repr] ( identifier[obj] ): literal[string] keyword[if] identifier[isinstance] ( identifier[obj] , identifier[type] ) keyword[and] keyword[not] identifier[isinstance] ( identifier[obj] , identifier[TypingMeta] ): keyword[if] identifier[obj] . identifier[__module__] == literal[string] : keyword[return] identifier[_qualname] ( identifier[obj] ) keyword[return] literal[string] %( identifier[obj] . identifier[__module__] , identifier[_qualname] ( identifier[obj] )) keyword[if] identifier[obj] keyword[is] ...: keyword[return] ( literal[string] ) keyword[if] identifier[isinstance] ( identifier[obj] , identifier[types] . identifier[FunctionType] ): keyword[return] identifier[obj] . identifier[__name__] keyword[return] identifier[repr] ( identifier[obj] )
def _type_repr(obj): """Return the repr() of an object, special-casing types (internal helper). If obj is a type, we return a shorter version than the default type.__repr__, based on the module and qualified name, which is typically enough to uniquely identify a type. For everything else, we fall back on repr(obj). """ if isinstance(obj, type) and (not isinstance(obj, TypingMeta)): if obj.__module__ == 'builtins': return _qualname(obj) # depends on [control=['if'], data=[]] return '%s.%s' % (obj.__module__, _qualname(obj)) # depends on [control=['if'], data=[]] if obj is ...: return '...' # depends on [control=['if'], data=[]] if isinstance(obj, types.FunctionType): return obj.__name__ # depends on [control=['if'], data=[]] return repr(obj)
def constant_time_cmp(a, b): '''Compare two strings using constant time.''' result = True for x, y in zip(a, b): result &= (x == y) return result
def function[constant_time_cmp, parameter[a, b]]: constant[Compare two strings using constant time.] variable[result] assign[=] constant[True] for taget[tuple[[<ast.Name object at 0x7da20e962470>, <ast.Name object at 0x7da20e962b90>]]] in starred[call[name[zip], parameter[name[a], name[b]]]] begin[:] <ast.AugAssign object at 0x7da20e960130> return[name[result]]
keyword[def] identifier[constant_time_cmp] ( identifier[a] , identifier[b] ): literal[string] identifier[result] = keyword[True] keyword[for] identifier[x] , identifier[y] keyword[in] identifier[zip] ( identifier[a] , identifier[b] ): identifier[result] &=( identifier[x] == identifier[y] ) keyword[return] identifier[result]
def constant_time_cmp(a, b): """Compare two strings using constant time.""" result = True for (x, y) in zip(a, b): result &= x == y # depends on [control=['for'], data=[]] return result
def validate(self): """ Check that the Xmrs is well-formed. The Xmrs is analyzed and a list of problems is compiled. If any problems exist, an :exc:`XmrsError` is raised with the list joined as the error message. A well-formed Xmrs has the following properties: * All predications have an intrinsic variable * Every intrinsic variable belongs one predication and maybe one quantifier * Every predication has no more than one quantifier * All predications have a label * The graph of predications form a net (i.e. are connected). Connectivity can be established with variable arguments, QEQs, or label-equality. * The lo-handle for each QEQ must exist as the label of a predication """ errors = [] ivs, bvs = {}, {} _vars = self._vars _hcons = self._hcons labels = defaultdict(set) # ep_args = {} for ep in self.eps(): nid, lbl, args, is_q = ( ep.nodeid, ep.label, ep.args, ep.is_quantifier() ) if lbl is None: errors.append('EP ({}) is missing a label.'.format(nid)) labels[lbl].add(nid) iv = args.get(IVARG_ROLE) if iv is None: errors.append('EP {nid} is missing an intrinsic variable.' .format(nid)) if is_q: if iv in bvs: errors.append('{} is the bound variable for more than ' 'one quantifier.'.format(iv)) bvs[iv] = nid else: if iv in ivs: errors.append('{} is the intrinsic variable for more ' 'than one EP.'.format(iv)) ivs[iv] = nid # ep_args[nid] = args for hc in _hcons.values(): if hc[2] not in labels: errors.append('Lo variable of HCONS ({} {} {}) is not the ' 'label of any EP.'.format(*hc)) if not self.is_connected(): errors.append('Xmrs structure is not connected.') if errors: raise XmrsError('\n'.join(errors))
def function[validate, parameter[self]]: constant[ Check that the Xmrs is well-formed. The Xmrs is analyzed and a list of problems is compiled. If any problems exist, an :exc:`XmrsError` is raised with the list joined as the error message. A well-formed Xmrs has the following properties: * All predications have an intrinsic variable * Every intrinsic variable belongs one predication and maybe one quantifier * Every predication has no more than one quantifier * All predications have a label * The graph of predications form a net (i.e. are connected). Connectivity can be established with variable arguments, QEQs, or label-equality. * The lo-handle for each QEQ must exist as the label of a predication ] variable[errors] assign[=] list[[]] <ast.Tuple object at 0x7da18bc71810> assign[=] tuple[[<ast.Dict object at 0x7da18bc739d0>, <ast.Dict object at 0x7da18bc73b20>]] variable[_vars] assign[=] name[self]._vars variable[_hcons] assign[=] name[self]._hcons variable[labels] assign[=] call[name[defaultdict], parameter[name[set]]] for taget[name[ep]] in starred[call[name[self].eps, parameter[]]] begin[:] <ast.Tuple object at 0x7da18bc73220> assign[=] tuple[[<ast.Attribute object at 0x7da18f09c220>, <ast.Attribute object at 0x7da18f09fb20>, <ast.Attribute object at 0x7da18f09c4c0>, <ast.Call object at 0x7da18f09d840>]] if compare[name[lbl] is constant[None]] begin[:] call[name[errors].append, parameter[call[constant[EP ({}) is missing a label.].format, parameter[name[nid]]]]] call[call[name[labels]][name[lbl]].add, parameter[name[nid]]] variable[iv] assign[=] call[name[args].get, parameter[name[IVARG_ROLE]]] if compare[name[iv] is constant[None]] begin[:] call[name[errors].append, parameter[call[constant[EP {nid} is missing an intrinsic variable.].format, parameter[name[nid]]]]] if name[is_q] begin[:] if compare[name[iv] in name[bvs]] begin[:] call[name[errors].append, parameter[call[constant[{} is the bound variable for more than one quantifier.].format, parameter[name[iv]]]]] call[name[bvs]][name[iv]] assign[=] name[nid] for taget[name[hc]] in starred[call[name[_hcons].values, parameter[]]] begin[:] if compare[call[name[hc]][constant[2]] <ast.NotIn object at 0x7da2590d7190> name[labels]] begin[:] call[name[errors].append, parameter[call[constant[Lo variable of HCONS ({} {} {}) is not the label of any EP.].format, parameter[<ast.Starred object at 0x7da18f09c040>]]]] if <ast.UnaryOp object at 0x7da18f09e5c0> begin[:] call[name[errors].append, parameter[constant[Xmrs structure is not connected.]]] if name[errors] begin[:] <ast.Raise object at 0x7da18f09d450>
keyword[def] identifier[validate] ( identifier[self] ): literal[string] identifier[errors] =[] identifier[ivs] , identifier[bvs] ={},{} identifier[_vars] = identifier[self] . identifier[_vars] identifier[_hcons] = identifier[self] . identifier[_hcons] identifier[labels] = identifier[defaultdict] ( identifier[set] ) keyword[for] identifier[ep] keyword[in] identifier[self] . identifier[eps] (): identifier[nid] , identifier[lbl] , identifier[args] , identifier[is_q] =( identifier[ep] . identifier[nodeid] , identifier[ep] . identifier[label] , identifier[ep] . identifier[args] , identifier[ep] . identifier[is_quantifier] () ) keyword[if] identifier[lbl] keyword[is] keyword[None] : identifier[errors] . identifier[append] ( literal[string] . identifier[format] ( identifier[nid] )) identifier[labels] [ identifier[lbl] ]. identifier[add] ( identifier[nid] ) identifier[iv] = identifier[args] . identifier[get] ( identifier[IVARG_ROLE] ) keyword[if] identifier[iv] keyword[is] keyword[None] : identifier[errors] . identifier[append] ( literal[string] . identifier[format] ( identifier[nid] )) keyword[if] identifier[is_q] : keyword[if] identifier[iv] keyword[in] identifier[bvs] : identifier[errors] . identifier[append] ( literal[string] literal[string] . identifier[format] ( identifier[iv] )) identifier[bvs] [ identifier[iv] ]= identifier[nid] keyword[else] : keyword[if] identifier[iv] keyword[in] identifier[ivs] : identifier[errors] . identifier[append] ( literal[string] literal[string] . identifier[format] ( identifier[iv] )) identifier[ivs] [ identifier[iv] ]= identifier[nid] keyword[for] identifier[hc] keyword[in] identifier[_hcons] . identifier[values] (): keyword[if] identifier[hc] [ literal[int] ] keyword[not] keyword[in] identifier[labels] : identifier[errors] . identifier[append] ( literal[string] literal[string] . identifier[format] (* identifier[hc] )) keyword[if] keyword[not] identifier[self] . identifier[is_connected] (): identifier[errors] . identifier[append] ( literal[string] ) keyword[if] identifier[errors] : keyword[raise] identifier[XmrsError] ( literal[string] . identifier[join] ( identifier[errors] ))
def validate(self): """ Check that the Xmrs is well-formed. The Xmrs is analyzed and a list of problems is compiled. If any problems exist, an :exc:`XmrsError` is raised with the list joined as the error message. A well-formed Xmrs has the following properties: * All predications have an intrinsic variable * Every intrinsic variable belongs one predication and maybe one quantifier * Every predication has no more than one quantifier * All predications have a label * The graph of predications form a net (i.e. are connected). Connectivity can be established with variable arguments, QEQs, or label-equality. * The lo-handle for each QEQ must exist as the label of a predication """ errors = [] (ivs, bvs) = ({}, {}) _vars = self._vars _hcons = self._hcons labels = defaultdict(set) # ep_args = {} for ep in self.eps(): (nid, lbl, args, is_q) = (ep.nodeid, ep.label, ep.args, ep.is_quantifier()) if lbl is None: errors.append('EP ({}) is missing a label.'.format(nid)) # depends on [control=['if'], data=[]] labels[lbl].add(nid) iv = args.get(IVARG_ROLE) if iv is None: errors.append('EP {nid} is missing an intrinsic variable.'.format(nid)) # depends on [control=['if'], data=[]] if is_q: if iv in bvs: errors.append('{} is the bound variable for more than one quantifier.'.format(iv)) # depends on [control=['if'], data=['iv']] bvs[iv] = nid # depends on [control=['if'], data=[]] else: if iv in ivs: errors.append('{} is the intrinsic variable for more than one EP.'.format(iv)) # depends on [control=['if'], data=['iv']] ivs[iv] = nid # depends on [control=['for'], data=['ep']] # ep_args[nid] = args for hc in _hcons.values(): if hc[2] not in labels: errors.append('Lo variable of HCONS ({} {} {}) is not the label of any EP.'.format(*hc)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['hc']] if not self.is_connected(): errors.append('Xmrs structure is not connected.') # depends on [control=['if'], data=[]] if errors: raise XmrsError('\n'.join(errors)) # depends on [control=['if'], data=[]]
def download_shared_files(job, config): """ Downloads shared reference files for Toil Germline pipeline :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Pipeline configuration options :return: Updated config with shared fileStoreIDS :rtype: Namespace """ job.fileStore.logToMaster('Downloading shared reference files') shared_files = {'genome_fasta', 'genome_fai', 'genome_dict'} nonessential_files = {'genome_fai', 'genome_dict'} # Download necessary files for pipeline configuration if config.run_bwa: shared_files |= {'amb', 'ann', 'bwt', 'pac', 'sa', 'alt'} nonessential_files.add('alt') if config.preprocess: shared_files |= {'g1k_indel', 'mills', 'dbsnp'} if config.run_vqsr: shared_files |= {'g1k_snp', 'mills', 'dbsnp', 'hapmap', 'omni'} if config.run_oncotator: shared_files.add('oncotator_db') for name in shared_files: try: url = getattr(config, name, None) if url is None: continue setattr(config, name, job.addChildJobFn(download_url_job, url, name=name, s3_key_path=config.ssec, disk='15G' # Estimated reference file size ).rv()) finally: if getattr(config, name, None) is None and name not in nonessential_files: raise ValueError("Necessary configuration parameter is missing:\n{}".format(name)) return job.addFollowOnJobFn(reference_preprocessing, config).rv()
def function[download_shared_files, parameter[job, config]]: constant[ Downloads shared reference files for Toil Germline pipeline :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Pipeline configuration options :return: Updated config with shared fileStoreIDS :rtype: Namespace ] call[name[job].fileStore.logToMaster, parameter[constant[Downloading shared reference files]]] variable[shared_files] assign[=] <ast.Set object at 0x7da18ede74f0> variable[nonessential_files] assign[=] <ast.Set object at 0x7da18ede6170> if name[config].run_bwa begin[:] <ast.AugAssign object at 0x7da18ede6e60> call[name[nonessential_files].add, parameter[constant[alt]]] if name[config].preprocess begin[:] <ast.AugAssign object at 0x7da18ede5390> if name[config].run_vqsr begin[:] <ast.AugAssign object at 0x7da18ede58a0> if name[config].run_oncotator begin[:] call[name[shared_files].add, parameter[constant[oncotator_db]]] for taget[name[name]] in starred[name[shared_files]] begin[:] <ast.Try object at 0x7da18ede6d70> return[call[call[name[job].addFollowOnJobFn, parameter[name[reference_preprocessing], name[config]]].rv, parameter[]]]
keyword[def] identifier[download_shared_files] ( identifier[job] , identifier[config] ): literal[string] identifier[job] . identifier[fileStore] . identifier[logToMaster] ( literal[string] ) identifier[shared_files] ={ literal[string] , literal[string] , literal[string] } identifier[nonessential_files] ={ literal[string] , literal[string] } keyword[if] identifier[config] . identifier[run_bwa] : identifier[shared_files] |={ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] } identifier[nonessential_files] . identifier[add] ( literal[string] ) keyword[if] identifier[config] . identifier[preprocess] : identifier[shared_files] |={ literal[string] , literal[string] , literal[string] } keyword[if] identifier[config] . identifier[run_vqsr] : identifier[shared_files] |={ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] } keyword[if] identifier[config] . identifier[run_oncotator] : identifier[shared_files] . identifier[add] ( literal[string] ) keyword[for] identifier[name] keyword[in] identifier[shared_files] : keyword[try] : identifier[url] = identifier[getattr] ( identifier[config] , identifier[name] , keyword[None] ) keyword[if] identifier[url] keyword[is] keyword[None] : keyword[continue] identifier[setattr] ( identifier[config] , identifier[name] , identifier[job] . identifier[addChildJobFn] ( identifier[download_url_job] , identifier[url] , identifier[name] = identifier[name] , identifier[s3_key_path] = identifier[config] . identifier[ssec] , identifier[disk] = literal[string] ). identifier[rv] ()) keyword[finally] : keyword[if] identifier[getattr] ( identifier[config] , identifier[name] , keyword[None] ) keyword[is] keyword[None] keyword[and] identifier[name] keyword[not] keyword[in] identifier[nonessential_files] : keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[name] )) keyword[return] identifier[job] . identifier[addFollowOnJobFn] ( identifier[reference_preprocessing] , identifier[config] ). identifier[rv] ()
def download_shared_files(job, config): """ Downloads shared reference files for Toil Germline pipeline :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Pipeline configuration options :return: Updated config with shared fileStoreIDS :rtype: Namespace """ job.fileStore.logToMaster('Downloading shared reference files') shared_files = {'genome_fasta', 'genome_fai', 'genome_dict'} nonessential_files = {'genome_fai', 'genome_dict'} # Download necessary files for pipeline configuration if config.run_bwa: shared_files |= {'amb', 'ann', 'bwt', 'pac', 'sa', 'alt'} nonessential_files.add('alt') # depends on [control=['if'], data=[]] if config.preprocess: shared_files |= {'g1k_indel', 'mills', 'dbsnp'} # depends on [control=['if'], data=[]] if config.run_vqsr: shared_files |= {'g1k_snp', 'mills', 'dbsnp', 'hapmap', 'omni'} # depends on [control=['if'], data=[]] if config.run_oncotator: shared_files.add('oncotator_db') # depends on [control=['if'], data=[]] for name in shared_files: try: url = getattr(config, name, None) if url is None: continue # depends on [control=['if'], data=[]] # Estimated reference file size setattr(config, name, job.addChildJobFn(download_url_job, url, name=name, s3_key_path=config.ssec, disk='15G').rv()) # depends on [control=['try'], data=[]] finally: if getattr(config, name, None) is None and name not in nonessential_files: raise ValueError('Necessary configuration parameter is missing:\n{}'.format(name)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['name']] return job.addFollowOnJobFn(reference_preprocessing, config).rv()
def get_particles_featuring(feature_rad, state_name=None, im_name=None, use_full_path=False, actual_rad=None, invert=True, featuring_params={}, **kwargs): """ Combines centroid featuring with the globals from a previous state. Runs trackpy.locate on an image, sets the globals from a previous state, calls _translate_particles Parameters ---------- feature_rad : Int, odd The particle radius for featuring, as passed to locate_spheres. state_name : String or None, optional The name of the initially-optimized state. Default is None, which prompts the user to select the name interactively through a Tk window. im_name : String or None, optional The name of the new image to optimize. Default is None, which prompts the user to select the name interactively through a Tk window. use_full_path : Bool, optional Set to True to use the full path of the state instead of partial path names (e.g. /full/path/name/state.pkl vs state.pkl). Default is False actual_rad : Float or None, optional The initial guess for the particle radii. Default is the median of the previous state. invert : Bool Whether to invert the image for featuring, as passed to addsubtract.add_subtract and locate_spheres. Set to False if the image is bright particles on a dark background. Default is True (dark particles on bright background). featuring_params : Dict, optional kwargs-like dict of any additional keyword arguments to pass to ``get_initial_featuring``, such as ``'use_tp'`` or ``'minmass'``. Default is ``{}``. Other Parameters ---------------- max_mem : Numeric The maximum additional memory to use for the optimizers, as passed to optimize.burn. Default is 1e9. desc : String, optional A description to be inserted in saved state. The save name will be, e.g., '0.tif-peri-' + desc + 'initial-burn.pkl'. Default is '' min_rad : Float, optional The minimum particle radius, as passed to addsubtract.add_subtract. Particles with a fitted radius smaller than this are identified as fake and removed. Default is 0.5 * actual_rad. max_rad : Float, optional The maximum particle radius, as passed to addsubtract.add_subtract. Particles with a fitted radius larger than this are identified as fake and removed. Default is 1.5 * actual_rad, however you may find better results if you make this more stringent. rz_order : int, optional If nonzero, the order of an additional augmented rscl(z) parameter for optimization. Default is 0; i.e. no rscl(z) optimization. do_polish : Bool, optional Set to False to only optimize the particles and add-subtract. Default is True, which then runs a polish afterwards. Returns ------- s : :class:`peri.states.ImageState` The optimized state. See Also -------- get_initial_featuring : Features an image from scratch, using centroid methods as initial particle locations. feature_from_pos_rad : Using a previous state's globals and user-provided positions and radii as an initial guess, completely optimizes a state. translate_featuring : Use a previous state's globals and centroids methods for an initial particle guess, completely optimizes a state. Notes ----- The ``Other Parameters`` are passed to _translate_particles. Proceeds by: 1. Find a guess of the particle positions through centroid methods. 2. Optimize particle positions only. 3. Optimize particle positions and radii only. 4. Add-subtract missing and bad particles. 5. If polish, optimize the illumination, background, and particles. 6. If polish, optimize everything. """ state_name, im_name = _pick_state_im_name( state_name, im_name, use_full_path=use_full_path) s = states.load(state_name) if actual_rad is None: actual_rad = np.median(s.obj_get_radii()) im = util.RawImage(im_name, tile=s.image.tile) pos = locate_spheres(im, feature_rad, invert=invert, **featuring_params) _ = s.obj_remove_particle(np.arange(s.obj_get_radii().size)) s.obj_add_particle(pos, np.ones(pos.shape[0])*actual_rad) s.set_image(im) _translate_particles(s, invert=invert, **kwargs) return s
def function[get_particles_featuring, parameter[feature_rad, state_name, im_name, use_full_path, actual_rad, invert, featuring_params]]: constant[ Combines centroid featuring with the globals from a previous state. Runs trackpy.locate on an image, sets the globals from a previous state, calls _translate_particles Parameters ---------- feature_rad : Int, odd The particle radius for featuring, as passed to locate_spheres. state_name : String or None, optional The name of the initially-optimized state. Default is None, which prompts the user to select the name interactively through a Tk window. im_name : String or None, optional The name of the new image to optimize. Default is None, which prompts the user to select the name interactively through a Tk window. use_full_path : Bool, optional Set to True to use the full path of the state instead of partial path names (e.g. /full/path/name/state.pkl vs state.pkl). Default is False actual_rad : Float or None, optional The initial guess for the particle radii. Default is the median of the previous state. invert : Bool Whether to invert the image for featuring, as passed to addsubtract.add_subtract and locate_spheres. Set to False if the image is bright particles on a dark background. Default is True (dark particles on bright background). featuring_params : Dict, optional kwargs-like dict of any additional keyword arguments to pass to ``get_initial_featuring``, such as ``'use_tp'`` or ``'minmass'``. Default is ``{}``. Other Parameters ---------------- max_mem : Numeric The maximum additional memory to use for the optimizers, as passed to optimize.burn. Default is 1e9. desc : String, optional A description to be inserted in saved state. The save name will be, e.g., '0.tif-peri-' + desc + 'initial-burn.pkl'. Default is '' min_rad : Float, optional The minimum particle radius, as passed to addsubtract.add_subtract. Particles with a fitted radius smaller than this are identified as fake and removed. Default is 0.5 * actual_rad. max_rad : Float, optional The maximum particle radius, as passed to addsubtract.add_subtract. Particles with a fitted radius larger than this are identified as fake and removed. Default is 1.5 * actual_rad, however you may find better results if you make this more stringent. rz_order : int, optional If nonzero, the order of an additional augmented rscl(z) parameter for optimization. Default is 0; i.e. no rscl(z) optimization. do_polish : Bool, optional Set to False to only optimize the particles and add-subtract. Default is True, which then runs a polish afterwards. Returns ------- s : :class:`peri.states.ImageState` The optimized state. See Also -------- get_initial_featuring : Features an image from scratch, using centroid methods as initial particle locations. feature_from_pos_rad : Using a previous state's globals and user-provided positions and radii as an initial guess, completely optimizes a state. translate_featuring : Use a previous state's globals and centroids methods for an initial particle guess, completely optimizes a state. Notes ----- The ``Other Parameters`` are passed to _translate_particles. Proceeds by: 1. Find a guess of the particle positions through centroid methods. 2. Optimize particle positions only. 3. Optimize particle positions and radii only. 4. Add-subtract missing and bad particles. 5. If polish, optimize the illumination, background, and particles. 6. If polish, optimize everything. ] <ast.Tuple object at 0x7da2047eb010> assign[=] call[name[_pick_state_im_name], parameter[name[state_name], name[im_name]]] variable[s] assign[=] call[name[states].load, parameter[name[state_name]]] if compare[name[actual_rad] is constant[None]] begin[:] variable[actual_rad] assign[=] call[name[np].median, parameter[call[name[s].obj_get_radii, parameter[]]]] variable[im] assign[=] call[name[util].RawImage, parameter[name[im_name]]] variable[pos] assign[=] call[name[locate_spheres], parameter[name[im], name[feature_rad]]] variable[_] assign[=] call[name[s].obj_remove_particle, parameter[call[name[np].arange, parameter[call[name[s].obj_get_radii, parameter[]].size]]]] call[name[s].obj_add_particle, parameter[name[pos], binary_operation[call[name[np].ones, parameter[call[name[pos].shape][constant[0]]]] * name[actual_rad]]]] call[name[s].set_image, parameter[name[im]]] call[name[_translate_particles], parameter[name[s]]] return[name[s]]
keyword[def] identifier[get_particles_featuring] ( identifier[feature_rad] , identifier[state_name] = keyword[None] , identifier[im_name] = keyword[None] , identifier[use_full_path] = keyword[False] , identifier[actual_rad] = keyword[None] , identifier[invert] = keyword[True] , identifier[featuring_params] ={}, ** identifier[kwargs] ): literal[string] identifier[state_name] , identifier[im_name] = identifier[_pick_state_im_name] ( identifier[state_name] , identifier[im_name] , identifier[use_full_path] = identifier[use_full_path] ) identifier[s] = identifier[states] . identifier[load] ( identifier[state_name] ) keyword[if] identifier[actual_rad] keyword[is] keyword[None] : identifier[actual_rad] = identifier[np] . identifier[median] ( identifier[s] . identifier[obj_get_radii] ()) identifier[im] = identifier[util] . identifier[RawImage] ( identifier[im_name] , identifier[tile] = identifier[s] . identifier[image] . identifier[tile] ) identifier[pos] = identifier[locate_spheres] ( identifier[im] , identifier[feature_rad] , identifier[invert] = identifier[invert] ,** identifier[featuring_params] ) identifier[_] = identifier[s] . identifier[obj_remove_particle] ( identifier[np] . identifier[arange] ( identifier[s] . identifier[obj_get_radii] (). identifier[size] )) identifier[s] . identifier[obj_add_particle] ( identifier[pos] , identifier[np] . identifier[ones] ( identifier[pos] . identifier[shape] [ literal[int] ])* identifier[actual_rad] ) identifier[s] . identifier[set_image] ( identifier[im] ) identifier[_translate_particles] ( identifier[s] , identifier[invert] = identifier[invert] ,** identifier[kwargs] ) keyword[return] identifier[s]
def get_particles_featuring(feature_rad, state_name=None, im_name=None, use_full_path=False, actual_rad=None, invert=True, featuring_params={}, **kwargs): """ Combines centroid featuring with the globals from a previous state. Runs trackpy.locate on an image, sets the globals from a previous state, calls _translate_particles Parameters ---------- feature_rad : Int, odd The particle radius for featuring, as passed to locate_spheres. state_name : String or None, optional The name of the initially-optimized state. Default is None, which prompts the user to select the name interactively through a Tk window. im_name : String or None, optional The name of the new image to optimize. Default is None, which prompts the user to select the name interactively through a Tk window. use_full_path : Bool, optional Set to True to use the full path of the state instead of partial path names (e.g. /full/path/name/state.pkl vs state.pkl). Default is False actual_rad : Float or None, optional The initial guess for the particle radii. Default is the median of the previous state. invert : Bool Whether to invert the image for featuring, as passed to addsubtract.add_subtract and locate_spheres. Set to False if the image is bright particles on a dark background. Default is True (dark particles on bright background). featuring_params : Dict, optional kwargs-like dict of any additional keyword arguments to pass to ``get_initial_featuring``, such as ``'use_tp'`` or ``'minmass'``. Default is ``{}``. Other Parameters ---------------- max_mem : Numeric The maximum additional memory to use for the optimizers, as passed to optimize.burn. Default is 1e9. desc : String, optional A description to be inserted in saved state. The save name will be, e.g., '0.tif-peri-' + desc + 'initial-burn.pkl'. Default is '' min_rad : Float, optional The minimum particle radius, as passed to addsubtract.add_subtract. Particles with a fitted radius smaller than this are identified as fake and removed. Default is 0.5 * actual_rad. max_rad : Float, optional The maximum particle radius, as passed to addsubtract.add_subtract. Particles with a fitted radius larger than this are identified as fake and removed. Default is 1.5 * actual_rad, however you may find better results if you make this more stringent. rz_order : int, optional If nonzero, the order of an additional augmented rscl(z) parameter for optimization. Default is 0; i.e. no rscl(z) optimization. do_polish : Bool, optional Set to False to only optimize the particles and add-subtract. Default is True, which then runs a polish afterwards. Returns ------- s : :class:`peri.states.ImageState` The optimized state. See Also -------- get_initial_featuring : Features an image from scratch, using centroid methods as initial particle locations. feature_from_pos_rad : Using a previous state's globals and user-provided positions and radii as an initial guess, completely optimizes a state. translate_featuring : Use a previous state's globals and centroids methods for an initial particle guess, completely optimizes a state. Notes ----- The ``Other Parameters`` are passed to _translate_particles. Proceeds by: 1. Find a guess of the particle positions through centroid methods. 2. Optimize particle positions only. 3. Optimize particle positions and radii only. 4. Add-subtract missing and bad particles. 5. If polish, optimize the illumination, background, and particles. 6. If polish, optimize everything. """ (state_name, im_name) = _pick_state_im_name(state_name, im_name, use_full_path=use_full_path) s = states.load(state_name) if actual_rad is None: actual_rad = np.median(s.obj_get_radii()) # depends on [control=['if'], data=['actual_rad']] im = util.RawImage(im_name, tile=s.image.tile) pos = locate_spheres(im, feature_rad, invert=invert, **featuring_params) _ = s.obj_remove_particle(np.arange(s.obj_get_radii().size)) s.obj_add_particle(pos, np.ones(pos.shape[0]) * actual_rad) s.set_image(im) _translate_particles(s, invert=invert, **kwargs) return s
async def log( self, date: datetime.date = None, days: int = None, details: bool = False) -> list: """Get watering information for X days from Y date.""" endpoint = 'watering/log' if details: endpoint += '/details' if date and days: endpoint = '{0}/{1}/{2}'.format( endpoint, date.strftime('%Y-%m-%d'), days) data = await self._request('get', endpoint) return data['waterLog']['days']
<ast.AsyncFunctionDef object at 0x7da18fe908e0>
keyword[async] keyword[def] identifier[log] ( identifier[self] , identifier[date] : identifier[datetime] . identifier[date] = keyword[None] , identifier[days] : identifier[int] = keyword[None] , identifier[details] : identifier[bool] = keyword[False] )-> identifier[list] : literal[string] identifier[endpoint] = literal[string] keyword[if] identifier[details] : identifier[endpoint] += literal[string] keyword[if] identifier[date] keyword[and] identifier[days] : identifier[endpoint] = literal[string] . identifier[format] ( identifier[endpoint] , identifier[date] . identifier[strftime] ( literal[string] ), identifier[days] ) identifier[data] = keyword[await] identifier[self] . identifier[_request] ( literal[string] , identifier[endpoint] ) keyword[return] identifier[data] [ literal[string] ][ literal[string] ]
async def log(self, date: datetime.date=None, days: int=None, details: bool=False) -> list: """Get watering information for X days from Y date.""" endpoint = 'watering/log' if details: endpoint += '/details' # depends on [control=['if'], data=[]] if date and days: endpoint = '{0}/{1}/{2}'.format(endpoint, date.strftime('%Y-%m-%d'), days) # depends on [control=['if'], data=[]] data = await self._request('get', endpoint) return data['waterLog']['days']
def eval_stdin(): 'evaluate expressions read from stdin' cmd = ['plash', 'eval'] p = subprocess.Popen(cmd, stdin=sys.stdin, stdout=sys.stdout) exit = p.wait() if exit: raise subprocess.CalledProcessError(exit, cmd)
def function[eval_stdin, parameter[]]: constant[evaluate expressions read from stdin] variable[cmd] assign[=] list[[<ast.Constant object at 0x7da1b1297250>, <ast.Constant object at 0x7da1b12953f0>]] variable[p] assign[=] call[name[subprocess].Popen, parameter[name[cmd]]] variable[exit] assign[=] call[name[p].wait, parameter[]] if name[exit] begin[:] <ast.Raise object at 0x7da18f09e0b0>
keyword[def] identifier[eval_stdin] (): literal[string] identifier[cmd] =[ literal[string] , literal[string] ] identifier[p] = identifier[subprocess] . identifier[Popen] ( identifier[cmd] , identifier[stdin] = identifier[sys] . identifier[stdin] , identifier[stdout] = identifier[sys] . identifier[stdout] ) identifier[exit] = identifier[p] . identifier[wait] () keyword[if] identifier[exit] : keyword[raise] identifier[subprocess] . identifier[CalledProcessError] ( identifier[exit] , identifier[cmd] )
def eval_stdin(): """evaluate expressions read from stdin""" cmd = ['plash', 'eval'] p = subprocess.Popen(cmd, stdin=sys.stdin, stdout=sys.stdout) exit = p.wait() if exit: raise subprocess.CalledProcessError(exit, cmd) # depends on [control=['if'], data=[]]
def _get_role(rolename): """Reads and parses a file containing a role""" path = os.path.join('roles', rolename + '.json') if not os.path.exists(path): abort("Couldn't read role file {0}".format(path)) with open(path, 'r') as f: try: role = json.loads(f.read()) except ValueError as e: msg = "Little Chef found the following error in your" msg += " {0}.json file:\n {1}".format(rolename, str(e)) abort(msg) role['fullname'] = rolename return role
def function[_get_role, parameter[rolename]]: constant[Reads and parses a file containing a role] variable[path] assign[=] call[name[os].path.join, parameter[constant[roles], binary_operation[name[rolename] + constant[.json]]]] if <ast.UnaryOp object at 0x7da1b12b8790> begin[:] call[name[abort], parameter[call[constant[Couldn't read role file {0}].format, parameter[name[path]]]]] with call[name[open], parameter[name[path], constant[r]]] begin[:] <ast.Try object at 0x7da1b12b8850> call[name[role]][constant[fullname]] assign[=] name[rolename] return[name[role]]
keyword[def] identifier[_get_role] ( identifier[rolename] ): literal[string] identifier[path] = identifier[os] . identifier[path] . identifier[join] ( literal[string] , identifier[rolename] + literal[string] ) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[path] ): identifier[abort] ( literal[string] . identifier[format] ( identifier[path] )) keyword[with] identifier[open] ( identifier[path] , literal[string] ) keyword[as] identifier[f] : keyword[try] : identifier[role] = identifier[json] . identifier[loads] ( identifier[f] . identifier[read] ()) keyword[except] identifier[ValueError] keyword[as] identifier[e] : identifier[msg] = literal[string] identifier[msg] += literal[string] . identifier[format] ( identifier[rolename] , identifier[str] ( identifier[e] )) identifier[abort] ( identifier[msg] ) identifier[role] [ literal[string] ]= identifier[rolename] keyword[return] identifier[role]
def _get_role(rolename): """Reads and parses a file containing a role""" path = os.path.join('roles', rolename + '.json') if not os.path.exists(path): abort("Couldn't read role file {0}".format(path)) # depends on [control=['if'], data=[]] with open(path, 'r') as f: try: role = json.loads(f.read()) # depends on [control=['try'], data=[]] except ValueError as e: msg = 'Little Chef found the following error in your' msg += ' {0}.json file:\n {1}'.format(rolename, str(e)) abort(msg) # depends on [control=['except'], data=['e']] role['fullname'] = rolename return role # depends on [control=['with'], data=['f']]
def AdditivePoissonNoise(lam=0, per_channel=False, name=None, deterministic=False, random_state=None): """ Create an augmenter to add poisson noise to images. Poisson noise is comparable to gaussian noise as in ``AdditiveGaussianNoise``, but the values are sampled from a poisson distribution instead of a gaussian distribution. As poisson distributions produce only positive numbers, the sign of the sampled values are here randomly flipped. Values of around ``10.0`` for `lam` lead to visible noise (for uint8). Values of around ``20.0`` for `lam` lead to very visible noise (for uint8). It is recommended to usually set `per_channel` to True. dtype support:: See ``imgaug.augmenters.arithmetic.AddElementwise``. Parameters ---------- lam : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional Lambda parameter of the poisson distribution. Recommended values are around ``0.0`` to ``10.0``. * If a number, exactly that value will be used. * If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will be sampled per image. * If a list, then a random value will be sampled from that list per image. * If a StochasticParameter, a value will be sampled from the parameter per image. per_channel : bool or float, optional Whether to use the same noise value per pixel for all channels (False) or to sample a new value for each channel (True). If this value is a float ``p``, then for ``p`` percent of all images `per_channel` will be treated as True, otherwise as False. name : None or str, optional See :func:`imgaug.augmenters.meta.Augmenter.__init__`. deterministic : bool, optional See :func:`imgaug.augmenters.meta.Augmenter.__init__`. random_state : None or int or numpy.random.RandomState, optional See :func:`imgaug.augmenters.meta.Augmenter.__init__`. Examples -------- >>> aug = iaa.AdditivePoissonNoise(lam=5.0) Adds poisson noise sampled from ``Poisson(5.0)`` to images. >>> aug = iaa.AdditivePoissonNoise(lam=(0.0, 10.0)) Adds poisson noise sampled from ``Poisson(x)`` to images, where ``x`` is randomly sampled per image from the interval ``[0.0, 10.0]``. >>> aug = iaa.AdditivePoissonNoise(lam=5.0, per_channel=True) Adds poisson noise sampled from ``Poisson(5.0)`` to images, where the values are different per pixel *and* channel (e.g. a different one for red, green and blue channels for the same pixel). >>> aug = iaa.AdditivePoissonNoise(lam=(0.0, 10.0), per_channel=True) Adds poisson noise sampled from ``Poisson(x)`` to images, with ``x`` being sampled from ``uniform(0.0, 10.0)`` per image, pixel and channel. This is the *recommended* configuration. >>> aug = iaa.AdditivePoissonNoise(lam=2, per_channel=0.5) Adds poisson noise sampled from the distribution ``Poisson(2)`` to images, where the values are sometimes (50 percent of all cases) the same per pixel for all channels and sometimes different (other 50 percent). """ lam2 = iap.handle_continuous_param(lam, "lam", value_range=(0, None), tuple_to_uniform=True, list_to_choice=True) if name is None: name = "Unnamed%s" % (ia.caller_name(),) return AddElementwise(iap.RandomSign(iap.Poisson(lam=lam2)), per_channel=per_channel, name=name, deterministic=deterministic, random_state=random_state)
def function[AdditivePoissonNoise, parameter[lam, per_channel, name, deterministic, random_state]]: constant[ Create an augmenter to add poisson noise to images. Poisson noise is comparable to gaussian noise as in ``AdditiveGaussianNoise``, but the values are sampled from a poisson distribution instead of a gaussian distribution. As poisson distributions produce only positive numbers, the sign of the sampled values are here randomly flipped. Values of around ``10.0`` for `lam` lead to visible noise (for uint8). Values of around ``20.0`` for `lam` lead to very visible noise (for uint8). It is recommended to usually set `per_channel` to True. dtype support:: See ``imgaug.augmenters.arithmetic.AddElementwise``. Parameters ---------- lam : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional Lambda parameter of the poisson distribution. Recommended values are around ``0.0`` to ``10.0``. * If a number, exactly that value will be used. * If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will be sampled per image. * If a list, then a random value will be sampled from that list per image. * If a StochasticParameter, a value will be sampled from the parameter per image. per_channel : bool or float, optional Whether to use the same noise value per pixel for all channels (False) or to sample a new value for each channel (True). If this value is a float ``p``, then for ``p`` percent of all images `per_channel` will be treated as True, otherwise as False. name : None or str, optional See :func:`imgaug.augmenters.meta.Augmenter.__init__`. deterministic : bool, optional See :func:`imgaug.augmenters.meta.Augmenter.__init__`. random_state : None or int or numpy.random.RandomState, optional See :func:`imgaug.augmenters.meta.Augmenter.__init__`. Examples -------- >>> aug = iaa.AdditivePoissonNoise(lam=5.0) Adds poisson noise sampled from ``Poisson(5.0)`` to images. >>> aug = iaa.AdditivePoissonNoise(lam=(0.0, 10.0)) Adds poisson noise sampled from ``Poisson(x)`` to images, where ``x`` is randomly sampled per image from the interval ``[0.0, 10.0]``. >>> aug = iaa.AdditivePoissonNoise(lam=5.0, per_channel=True) Adds poisson noise sampled from ``Poisson(5.0)`` to images, where the values are different per pixel *and* channel (e.g. a different one for red, green and blue channels for the same pixel). >>> aug = iaa.AdditivePoissonNoise(lam=(0.0, 10.0), per_channel=True) Adds poisson noise sampled from ``Poisson(x)`` to images, with ``x`` being sampled from ``uniform(0.0, 10.0)`` per image, pixel and channel. This is the *recommended* configuration. >>> aug = iaa.AdditivePoissonNoise(lam=2, per_channel=0.5) Adds poisson noise sampled from the distribution ``Poisson(2)`` to images, where the values are sometimes (50 percent of all cases) the same per pixel for all channels and sometimes different (other 50 percent). ] variable[lam2] assign[=] call[name[iap].handle_continuous_param, parameter[name[lam], constant[lam]]] if compare[name[name] is constant[None]] begin[:] variable[name] assign[=] binary_operation[constant[Unnamed%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da18c4ccf40>]]] return[call[name[AddElementwise], parameter[call[name[iap].RandomSign, parameter[call[name[iap].Poisson, parameter[]]]]]]]
keyword[def] identifier[AdditivePoissonNoise] ( identifier[lam] = literal[int] , identifier[per_channel] = keyword[False] , identifier[name] = keyword[None] , identifier[deterministic] = keyword[False] , identifier[random_state] = keyword[None] ): literal[string] identifier[lam2] = identifier[iap] . identifier[handle_continuous_param] ( identifier[lam] , literal[string] , identifier[value_range] =( literal[int] , keyword[None] ), identifier[tuple_to_uniform] = keyword[True] , identifier[list_to_choice] = keyword[True] ) keyword[if] identifier[name] keyword[is] keyword[None] : identifier[name] = literal[string] %( identifier[ia] . identifier[caller_name] (),) keyword[return] identifier[AddElementwise] ( identifier[iap] . identifier[RandomSign] ( identifier[iap] . identifier[Poisson] ( identifier[lam] = identifier[lam2] )), identifier[per_channel] = identifier[per_channel] , identifier[name] = identifier[name] , identifier[deterministic] = identifier[deterministic] , identifier[random_state] = identifier[random_state] )
def AdditivePoissonNoise(lam=0, per_channel=False, name=None, deterministic=False, random_state=None): """ Create an augmenter to add poisson noise to images. Poisson noise is comparable to gaussian noise as in ``AdditiveGaussianNoise``, but the values are sampled from a poisson distribution instead of a gaussian distribution. As poisson distributions produce only positive numbers, the sign of the sampled values are here randomly flipped. Values of around ``10.0`` for `lam` lead to visible noise (for uint8). Values of around ``20.0`` for `lam` lead to very visible noise (for uint8). It is recommended to usually set `per_channel` to True. dtype support:: See ``imgaug.augmenters.arithmetic.AddElementwise``. Parameters ---------- lam : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional Lambda parameter of the poisson distribution. Recommended values are around ``0.0`` to ``10.0``. * If a number, exactly that value will be used. * If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will be sampled per image. * If a list, then a random value will be sampled from that list per image. * If a StochasticParameter, a value will be sampled from the parameter per image. per_channel : bool or float, optional Whether to use the same noise value per pixel for all channels (False) or to sample a new value for each channel (True). If this value is a float ``p``, then for ``p`` percent of all images `per_channel` will be treated as True, otherwise as False. name : None or str, optional See :func:`imgaug.augmenters.meta.Augmenter.__init__`. deterministic : bool, optional See :func:`imgaug.augmenters.meta.Augmenter.__init__`. random_state : None or int or numpy.random.RandomState, optional See :func:`imgaug.augmenters.meta.Augmenter.__init__`. Examples -------- >>> aug = iaa.AdditivePoissonNoise(lam=5.0) Adds poisson noise sampled from ``Poisson(5.0)`` to images. >>> aug = iaa.AdditivePoissonNoise(lam=(0.0, 10.0)) Adds poisson noise sampled from ``Poisson(x)`` to images, where ``x`` is randomly sampled per image from the interval ``[0.0, 10.0]``. >>> aug = iaa.AdditivePoissonNoise(lam=5.0, per_channel=True) Adds poisson noise sampled from ``Poisson(5.0)`` to images, where the values are different per pixel *and* channel (e.g. a different one for red, green and blue channels for the same pixel). >>> aug = iaa.AdditivePoissonNoise(lam=(0.0, 10.0), per_channel=True) Adds poisson noise sampled from ``Poisson(x)`` to images, with ``x`` being sampled from ``uniform(0.0, 10.0)`` per image, pixel and channel. This is the *recommended* configuration. >>> aug = iaa.AdditivePoissonNoise(lam=2, per_channel=0.5) Adds poisson noise sampled from the distribution ``Poisson(2)`` to images, where the values are sometimes (50 percent of all cases) the same per pixel for all channels and sometimes different (other 50 percent). """ lam2 = iap.handle_continuous_param(lam, 'lam', value_range=(0, None), tuple_to_uniform=True, list_to_choice=True) if name is None: name = 'Unnamed%s' % (ia.caller_name(),) # depends on [control=['if'], data=['name']] return AddElementwise(iap.RandomSign(iap.Poisson(lam=lam2)), per_channel=per_channel, name=name, deterministic=deterministic, random_state=random_state)
def process_filter_directive(filter_operation_info, location, context): """Return a Filter basic block that corresponds to the filter operation in the directive. Args: filter_operation_info: FilterOperationInfo object, containing the directive and field info of the field where the filter is to be applied. location: Location where this filter is used. context: dict, various per-compilation data (e.g. declared tags, whether the current block is optional, etc.). May be mutated in-place in this function! Returns: a Filter basic block that performs the requested filtering operation """ op_name, operator_params = _get_filter_op_name_and_values(filter_operation_info.directive) non_comparison_filters = { u'name_or_alias': _process_name_or_alias_filter_directive, u'between': _process_between_filter_directive, u'in_collection': _process_in_collection_filter_directive, u'has_substring': _process_has_substring_filter_directive, u'contains': _process_contains_filter_directive, u'intersects': _process_intersects_filter_directive, u'has_edge_degree': _process_has_edge_degree_filter_directive, } all_recognized_filters = frozenset(non_comparison_filters.keys()) | COMPARISON_OPERATORS if all_recognized_filters != ALL_OPERATORS: unrecognized_filters = ALL_OPERATORS - all_recognized_filters raise AssertionError(u'Some filtering operators are defined but do not have an associated ' u'processing function. This is a bug: {}'.format(unrecognized_filters)) if op_name in COMPARISON_OPERATORS: process_func = partial(_process_comparison_filter_directive, operator=op_name) else: process_func = non_comparison_filters.get(op_name, None) if process_func is None: raise GraphQLCompilationError(u'Unknown op_name for filter directive: {}'.format(op_name)) # Operators that do not affect the inner scope require a field name to which they apply. # There is no field name on InlineFragment ASTs, which is why only operators that affect # the inner scope make semantic sense when applied to InlineFragments. # Here, we ensure that we either have a field name to which the filter applies, # or that the operator affects the inner scope. if (filter_operation_info.field_name is None and op_name not in INNER_SCOPE_VERTEX_FIELD_OPERATORS): raise GraphQLCompilationError(u'The filter with op_name "{}" must be applied on a field. ' u'It may not be applied on a type coercion.'.format(op_name)) fields = ((filter_operation_info.field_name,) if op_name != 'name_or_alias' else ('name', 'alias')) context['metadata'].record_filter_info( location, FilterInfo(fields=fields, op_name=op_name, args=tuple(operator_params)) ) return process_func(filter_operation_info, location, context, operator_params)
def function[process_filter_directive, parameter[filter_operation_info, location, context]]: constant[Return a Filter basic block that corresponds to the filter operation in the directive. Args: filter_operation_info: FilterOperationInfo object, containing the directive and field info of the field where the filter is to be applied. location: Location where this filter is used. context: dict, various per-compilation data (e.g. declared tags, whether the current block is optional, etc.). May be mutated in-place in this function! Returns: a Filter basic block that performs the requested filtering operation ] <ast.Tuple object at 0x7da1b170ea70> assign[=] call[name[_get_filter_op_name_and_values], parameter[name[filter_operation_info].directive]] variable[non_comparison_filters] assign[=] dictionary[[<ast.Constant object at 0x7da18dc99060>, <ast.Constant object at 0x7da18dc9b7f0>, <ast.Constant object at 0x7da18dc99810>, <ast.Constant object at 0x7da18dc9ac80>, <ast.Constant object at 0x7da18dc9a110>, <ast.Constant object at 0x7da207f02710>, <ast.Constant object at 0x7da207f03430>], [<ast.Name object at 0x7da207f01ed0>, <ast.Name object at 0x7da207f018d0>, <ast.Name object at 0x7da207f026e0>, <ast.Name object at 0x7da207f00bb0>, <ast.Name object at 0x7da207f03b20>, <ast.Name object at 0x7da207f02470>, <ast.Name object at 0x7da207f035e0>]] variable[all_recognized_filters] assign[=] binary_operation[call[name[frozenset], parameter[call[name[non_comparison_filters].keys, parameter[]]]] <ast.BitOr object at 0x7da2590d6aa0> name[COMPARISON_OPERATORS]] if compare[name[all_recognized_filters] not_equal[!=] name[ALL_OPERATORS]] begin[:] variable[unrecognized_filters] assign[=] binary_operation[name[ALL_OPERATORS] - name[all_recognized_filters]] <ast.Raise object at 0x7da1b1734dc0> if compare[name[op_name] in name[COMPARISON_OPERATORS]] begin[:] variable[process_func] assign[=] call[name[partial], parameter[name[_process_comparison_filter_directive]]] if compare[name[process_func] is constant[None]] begin[:] <ast.Raise object at 0x7da1b1735a80> if <ast.BoolOp object at 0x7da1b1736350> begin[:] <ast.Raise object at 0x7da1b1735150> variable[fields] assign[=] <ast.IfExp object at 0x7da18eb57dc0> call[call[name[context]][constant[metadata]].record_filter_info, parameter[name[location], call[name[FilterInfo], parameter[]]]] return[call[name[process_func], parameter[name[filter_operation_info], name[location], name[context], name[operator_params]]]]
keyword[def] identifier[process_filter_directive] ( identifier[filter_operation_info] , identifier[location] , identifier[context] ): literal[string] identifier[op_name] , identifier[operator_params] = identifier[_get_filter_op_name_and_values] ( identifier[filter_operation_info] . identifier[directive] ) identifier[non_comparison_filters] ={ literal[string] : identifier[_process_name_or_alias_filter_directive] , literal[string] : identifier[_process_between_filter_directive] , literal[string] : identifier[_process_in_collection_filter_directive] , literal[string] : identifier[_process_has_substring_filter_directive] , literal[string] : identifier[_process_contains_filter_directive] , literal[string] : identifier[_process_intersects_filter_directive] , literal[string] : identifier[_process_has_edge_degree_filter_directive] , } identifier[all_recognized_filters] = identifier[frozenset] ( identifier[non_comparison_filters] . identifier[keys] ())| identifier[COMPARISON_OPERATORS] keyword[if] identifier[all_recognized_filters] != identifier[ALL_OPERATORS] : identifier[unrecognized_filters] = identifier[ALL_OPERATORS] - identifier[all_recognized_filters] keyword[raise] identifier[AssertionError] ( literal[string] literal[string] . identifier[format] ( identifier[unrecognized_filters] )) keyword[if] identifier[op_name] keyword[in] identifier[COMPARISON_OPERATORS] : identifier[process_func] = identifier[partial] ( identifier[_process_comparison_filter_directive] , identifier[operator] = identifier[op_name] ) keyword[else] : identifier[process_func] = identifier[non_comparison_filters] . identifier[get] ( identifier[op_name] , keyword[None] ) keyword[if] identifier[process_func] keyword[is] keyword[None] : keyword[raise] identifier[GraphQLCompilationError] ( literal[string] . identifier[format] ( identifier[op_name] )) keyword[if] ( identifier[filter_operation_info] . identifier[field_name] keyword[is] keyword[None] keyword[and] identifier[op_name] keyword[not] keyword[in] identifier[INNER_SCOPE_VERTEX_FIELD_OPERATORS] ): keyword[raise] identifier[GraphQLCompilationError] ( literal[string] literal[string] . identifier[format] ( identifier[op_name] )) identifier[fields] =(( identifier[filter_operation_info] . identifier[field_name] ,) keyword[if] identifier[op_name] != literal[string] keyword[else] ( literal[string] , literal[string] )) identifier[context] [ literal[string] ]. identifier[record_filter_info] ( identifier[location] , identifier[FilterInfo] ( identifier[fields] = identifier[fields] , identifier[op_name] = identifier[op_name] , identifier[args] = identifier[tuple] ( identifier[operator_params] )) ) keyword[return] identifier[process_func] ( identifier[filter_operation_info] , identifier[location] , identifier[context] , identifier[operator_params] )
def process_filter_directive(filter_operation_info, location, context): """Return a Filter basic block that corresponds to the filter operation in the directive. Args: filter_operation_info: FilterOperationInfo object, containing the directive and field info of the field where the filter is to be applied. location: Location where this filter is used. context: dict, various per-compilation data (e.g. declared tags, whether the current block is optional, etc.). May be mutated in-place in this function! Returns: a Filter basic block that performs the requested filtering operation """ (op_name, operator_params) = _get_filter_op_name_and_values(filter_operation_info.directive) non_comparison_filters = {u'name_or_alias': _process_name_or_alias_filter_directive, u'between': _process_between_filter_directive, u'in_collection': _process_in_collection_filter_directive, u'has_substring': _process_has_substring_filter_directive, u'contains': _process_contains_filter_directive, u'intersects': _process_intersects_filter_directive, u'has_edge_degree': _process_has_edge_degree_filter_directive} all_recognized_filters = frozenset(non_comparison_filters.keys()) | COMPARISON_OPERATORS if all_recognized_filters != ALL_OPERATORS: unrecognized_filters = ALL_OPERATORS - all_recognized_filters raise AssertionError(u'Some filtering operators are defined but do not have an associated processing function. This is a bug: {}'.format(unrecognized_filters)) # depends on [control=['if'], data=['all_recognized_filters', 'ALL_OPERATORS']] if op_name in COMPARISON_OPERATORS: process_func = partial(_process_comparison_filter_directive, operator=op_name) # depends on [control=['if'], data=['op_name']] else: process_func = non_comparison_filters.get(op_name, None) if process_func is None: raise GraphQLCompilationError(u'Unknown op_name for filter directive: {}'.format(op_name)) # depends on [control=['if'], data=[]] # Operators that do not affect the inner scope require a field name to which they apply. # There is no field name on InlineFragment ASTs, which is why only operators that affect # the inner scope make semantic sense when applied to InlineFragments. # Here, we ensure that we either have a field name to which the filter applies, # or that the operator affects the inner scope. if filter_operation_info.field_name is None and op_name not in INNER_SCOPE_VERTEX_FIELD_OPERATORS: raise GraphQLCompilationError(u'The filter with op_name "{}" must be applied on a field. It may not be applied on a type coercion.'.format(op_name)) # depends on [control=['if'], data=[]] fields = (filter_operation_info.field_name,) if op_name != 'name_or_alias' else ('name', 'alias') context['metadata'].record_filter_info(location, FilterInfo(fields=fields, op_name=op_name, args=tuple(operator_params))) return process_func(filter_operation_info, location, context, operator_params)
def _resample_residuals(self, stars, epsf): """ Compute normalized residual images for all the input stars. Parameters ---------- stars : `EPSFStars` object The stars used to build the ePSF. epsf : `EPSFModel` object The ePSF model. Returns ------- star_imgs : 3D `~numpy.ndarray` A 3D cube containing the resampled residual images. """ shape = (stars.n_good_stars, epsf.shape[0], epsf.shape[1]) star_imgs = np.zeros(shape) for i, star in enumerate(stars.all_good_stars): star_imgs[i, :, :] = self._resample_residual(star, epsf) return star_imgs
def function[_resample_residuals, parameter[self, stars, epsf]]: constant[ Compute normalized residual images for all the input stars. Parameters ---------- stars : `EPSFStars` object The stars used to build the ePSF. epsf : `EPSFModel` object The ePSF model. Returns ------- star_imgs : 3D `~numpy.ndarray` A 3D cube containing the resampled residual images. ] variable[shape] assign[=] tuple[[<ast.Attribute object at 0x7da18eb571f0>, <ast.Subscript object at 0x7da18eb549d0>, <ast.Subscript object at 0x7da18eb57dc0>]] variable[star_imgs] assign[=] call[name[np].zeros, parameter[name[shape]]] for taget[tuple[[<ast.Name object at 0x7da18eb56ad0>, <ast.Name object at 0x7da18eb57100>]]] in starred[call[name[enumerate], parameter[name[stars].all_good_stars]]] begin[:] call[name[star_imgs]][tuple[[<ast.Name object at 0x7da18eb579d0>, <ast.Slice object at 0x7da18eb56dd0>, <ast.Slice object at 0x7da18eb54520>]]] assign[=] call[name[self]._resample_residual, parameter[name[star], name[epsf]]] return[name[star_imgs]]
keyword[def] identifier[_resample_residuals] ( identifier[self] , identifier[stars] , identifier[epsf] ): literal[string] identifier[shape] =( identifier[stars] . identifier[n_good_stars] , identifier[epsf] . identifier[shape] [ literal[int] ], identifier[epsf] . identifier[shape] [ literal[int] ]) identifier[star_imgs] = identifier[np] . identifier[zeros] ( identifier[shape] ) keyword[for] identifier[i] , identifier[star] keyword[in] identifier[enumerate] ( identifier[stars] . identifier[all_good_stars] ): identifier[star_imgs] [ identifier[i] ,:,:]= identifier[self] . identifier[_resample_residual] ( identifier[star] , identifier[epsf] ) keyword[return] identifier[star_imgs]
def _resample_residuals(self, stars, epsf): """ Compute normalized residual images for all the input stars. Parameters ---------- stars : `EPSFStars` object The stars used to build the ePSF. epsf : `EPSFModel` object The ePSF model. Returns ------- star_imgs : 3D `~numpy.ndarray` A 3D cube containing the resampled residual images. """ shape = (stars.n_good_stars, epsf.shape[0], epsf.shape[1]) star_imgs = np.zeros(shape) for (i, star) in enumerate(stars.all_good_stars): star_imgs[i, :, :] = self._resample_residual(star, epsf) # depends on [control=['for'], data=[]] return star_imgs
def update(self): """Update |KInz| based on |HInz| and |LAI|. >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> nhru(2) >>> hinz(0.2) >>> lai.acker_jun = 1.0 >>> lai.vers_dec = 2.0 >>> derived.kinz.update() >>> from hydpy import round_ >>> round_(derived.kinz.acker_jun) 0.2 >>> round_(derived.kinz.vers_dec) 0.4 """ con = self.subpars.pars.control self(con.hinz*con.lai)
def function[update, parameter[self]]: constant[Update |KInz| based on |HInz| and |LAI|. >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> nhru(2) >>> hinz(0.2) >>> lai.acker_jun = 1.0 >>> lai.vers_dec = 2.0 >>> derived.kinz.update() >>> from hydpy import round_ >>> round_(derived.kinz.acker_jun) 0.2 >>> round_(derived.kinz.vers_dec) 0.4 ] variable[con] assign[=] name[self].subpars.pars.control call[name[self], parameter[binary_operation[name[con].hinz * name[con].lai]]]
keyword[def] identifier[update] ( identifier[self] ): literal[string] identifier[con] = identifier[self] . identifier[subpars] . identifier[pars] . identifier[control] identifier[self] ( identifier[con] . identifier[hinz] * identifier[con] . identifier[lai] )
def update(self): """Update |KInz| based on |HInz| and |LAI|. >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> nhru(2) >>> hinz(0.2) >>> lai.acker_jun = 1.0 >>> lai.vers_dec = 2.0 >>> derived.kinz.update() >>> from hydpy import round_ >>> round_(derived.kinz.acker_jun) 0.2 >>> round_(derived.kinz.vers_dec) 0.4 """ con = self.subpars.pars.control self(con.hinz * con.lai)
def getMonthName(self): ''' This exists as a separate method because sometimes events should really belong to more than one month (e.g. class series that persist over multiple months). ''' class_counter = Counter([(x.startTime.year, x.startTime.month) for x in self.eventoccurrence_set.all()]) multiclass_months = [x[0] for x in class_counter.items() if x[1] > 1] all_months = [x[0] for x in class_counter.items()] if multiclass_months: multiclass_months.sort() return '/'.join([month_name[x[1]] for x in multiclass_months]) else: return month_name[min(all_months)[1]]
def function[getMonthName, parameter[self]]: constant[ This exists as a separate method because sometimes events should really belong to more than one month (e.g. class series that persist over multiple months). ] variable[class_counter] assign[=] call[name[Counter], parameter[<ast.ListComp object at 0x7da1b1379ed0>]] variable[multiclass_months] assign[=] <ast.ListComp object at 0x7da1b137a620> variable[all_months] assign[=] <ast.ListComp object at 0x7da1b1378130> if name[multiclass_months] begin[:] call[name[multiclass_months].sort, parameter[]] return[call[constant[/].join, parameter[<ast.ListComp object at 0x7da1b1378460>]]]
keyword[def] identifier[getMonthName] ( identifier[self] ): literal[string] identifier[class_counter] = identifier[Counter] ([( identifier[x] . identifier[startTime] . identifier[year] , identifier[x] . identifier[startTime] . identifier[month] ) keyword[for] identifier[x] keyword[in] identifier[self] . identifier[eventoccurrence_set] . identifier[all] ()]) identifier[multiclass_months] =[ identifier[x] [ literal[int] ] keyword[for] identifier[x] keyword[in] identifier[class_counter] . identifier[items] () keyword[if] identifier[x] [ literal[int] ]> literal[int] ] identifier[all_months] =[ identifier[x] [ literal[int] ] keyword[for] identifier[x] keyword[in] identifier[class_counter] . identifier[items] ()] keyword[if] identifier[multiclass_months] : identifier[multiclass_months] . identifier[sort] () keyword[return] literal[string] . identifier[join] ([ identifier[month_name] [ identifier[x] [ literal[int] ]] keyword[for] identifier[x] keyword[in] identifier[multiclass_months] ]) keyword[else] : keyword[return] identifier[month_name] [ identifier[min] ( identifier[all_months] )[ literal[int] ]]
def getMonthName(self): """ This exists as a separate method because sometimes events should really belong to more than one month (e.g. class series that persist over multiple months). """ class_counter = Counter([(x.startTime.year, x.startTime.month) for x in self.eventoccurrence_set.all()]) multiclass_months = [x[0] for x in class_counter.items() if x[1] > 1] all_months = [x[0] for x in class_counter.items()] if multiclass_months: multiclass_months.sort() return '/'.join([month_name[x[1]] for x in multiclass_months]) # depends on [control=['if'], data=[]] else: return month_name[min(all_months)[1]]
def init_widget(self): """ Bind the on property to the checked state """ super(UiKitSlider, self).init_widget() d = self.declaration if d.min: self.set_min(d.min) if d.max: self.set_max(d.max) if d.progress: self.set_progress(d.progress) #: A really ugly way to add the target #: would be nice if we could just pass the block pointer here :) self.get_app().bridge.addTarget( self.widget, forControlEvents=UISlider.UIControlEventValueChanged, andCallback=self.widget.getId(), usingMethod="onValueChanged", withValues=["value"]#,"selected"] ) self.widget.onValueChanged.connect(self.on_checked_changed)
def function[init_widget, parameter[self]]: constant[ Bind the on property to the checked state ] call[call[name[super], parameter[name[UiKitSlider], name[self]]].init_widget, parameter[]] variable[d] assign[=] name[self].declaration if name[d].min begin[:] call[name[self].set_min, parameter[name[d].min]] if name[d].max begin[:] call[name[self].set_max, parameter[name[d].max]] if name[d].progress begin[:] call[name[self].set_progress, parameter[name[d].progress]] call[call[name[self].get_app, parameter[]].bridge.addTarget, parameter[name[self].widget]] call[name[self].widget.onValueChanged.connect, parameter[name[self].on_checked_changed]]
keyword[def] identifier[init_widget] ( identifier[self] ): literal[string] identifier[super] ( identifier[UiKitSlider] , identifier[self] ). identifier[init_widget] () identifier[d] = identifier[self] . identifier[declaration] keyword[if] identifier[d] . identifier[min] : identifier[self] . identifier[set_min] ( identifier[d] . identifier[min] ) keyword[if] identifier[d] . identifier[max] : identifier[self] . identifier[set_max] ( identifier[d] . identifier[max] ) keyword[if] identifier[d] . identifier[progress] : identifier[self] . identifier[set_progress] ( identifier[d] . identifier[progress] ) identifier[self] . identifier[get_app] (). identifier[bridge] . identifier[addTarget] ( identifier[self] . identifier[widget] , identifier[forControlEvents] = identifier[UISlider] . identifier[UIControlEventValueChanged] , identifier[andCallback] = identifier[self] . identifier[widget] . identifier[getId] (), identifier[usingMethod] = literal[string] , identifier[withValues] =[ literal[string] ] ) identifier[self] . identifier[widget] . identifier[onValueChanged] . identifier[connect] ( identifier[self] . identifier[on_checked_changed] )
def init_widget(self): """ Bind the on property to the checked state """ super(UiKitSlider, self).init_widget() d = self.declaration if d.min: self.set_min(d.min) # depends on [control=['if'], data=[]] if d.max: self.set_max(d.max) # depends on [control=['if'], data=[]] if d.progress: self.set_progress(d.progress) # depends on [control=['if'], data=[]] #: A really ugly way to add the target #: would be nice if we could just pass the block pointer here :) #,"selected"] self.get_app().bridge.addTarget(self.widget, forControlEvents=UISlider.UIControlEventValueChanged, andCallback=self.widget.getId(), usingMethod='onValueChanged', withValues=['value']) self.widget.onValueChanged.connect(self.on_checked_changed)
def articles(self): """ articles getter """ if self._articles is None: self._articles = [] for doc in self.docs: # ensure all fields in the "fl" are in the doc to address # issue #38 for k in set(self.fl).difference(doc.keys()): doc[k] = None self._articles.append(Article(**doc)) return self._articles
def function[articles, parameter[self]]: constant[ articles getter ] if compare[name[self]._articles is constant[None]] begin[:] name[self]._articles assign[=] list[[]] for taget[name[doc]] in starred[name[self].docs] begin[:] for taget[name[k]] in starred[call[call[name[set], parameter[name[self].fl]].difference, parameter[call[name[doc].keys, parameter[]]]]] begin[:] call[name[doc]][name[k]] assign[=] constant[None] call[name[self]._articles.append, parameter[call[name[Article], parameter[]]]] return[name[self]._articles]
keyword[def] identifier[articles] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[_articles] keyword[is] keyword[None] : identifier[self] . identifier[_articles] =[] keyword[for] identifier[doc] keyword[in] identifier[self] . identifier[docs] : keyword[for] identifier[k] keyword[in] identifier[set] ( identifier[self] . identifier[fl] ). identifier[difference] ( identifier[doc] . identifier[keys] ()): identifier[doc] [ identifier[k] ]= keyword[None] identifier[self] . identifier[_articles] . identifier[append] ( identifier[Article] (** identifier[doc] )) keyword[return] identifier[self] . identifier[_articles]
def articles(self): """ articles getter """ if self._articles is None: self._articles = [] for doc in self.docs: # ensure all fields in the "fl" are in the doc to address # issue #38 for k in set(self.fl).difference(doc.keys()): doc[k] = None # depends on [control=['for'], data=['k']] self._articles.append(Article(**doc)) # depends on [control=['for'], data=['doc']] # depends on [control=['if'], data=[]] return self._articles
def extract_table_names(query): """ Extract table names from an SQL query. """ # a good old fashioned regex. turns out this worked better than actually parsing the code tables_blocks = re.findall(r'(?:FROM|JOIN)\s+(\w+(?:\s*,\s*\w+)*)', query, re.IGNORECASE) tables = [tbl for block in tables_blocks for tbl in re.findall(r'\w+', block)] return set(tables)
def function[extract_table_names, parameter[query]]: constant[ Extract table names from an SQL query. ] variable[tables_blocks] assign[=] call[name[re].findall, parameter[constant[(?:FROM|JOIN)\s+(\w+(?:\s*,\s*\w+)*)], name[query], name[re].IGNORECASE]] variable[tables] assign[=] <ast.ListComp object at 0x7da20e960b80> return[call[name[set], parameter[name[tables]]]]
keyword[def] identifier[extract_table_names] ( identifier[query] ): literal[string] identifier[tables_blocks] = identifier[re] . identifier[findall] ( literal[string] , identifier[query] , identifier[re] . identifier[IGNORECASE] ) identifier[tables] =[ identifier[tbl] keyword[for] identifier[block] keyword[in] identifier[tables_blocks] keyword[for] identifier[tbl] keyword[in] identifier[re] . identifier[findall] ( literal[string] , identifier[block] )] keyword[return] identifier[set] ( identifier[tables] )
def extract_table_names(query): """ Extract table names from an SQL query. """ # a good old fashioned regex. turns out this worked better than actually parsing the code tables_blocks = re.findall('(?:FROM|JOIN)\\s+(\\w+(?:\\s*,\\s*\\w+)*)', query, re.IGNORECASE) tables = [tbl for block in tables_blocks for tbl in re.findall('\\w+', block)] return set(tables)
def emit(self, *args, **kwargs): """ Emit the signal. :param args: The arguments. :param kwargs: The keyword arguments. All the connected callbacks will be called synchronously in order of their registration. """ for callback in self.callbacks: callback(*args, **kwargs)
def function[emit, parameter[self]]: constant[ Emit the signal. :param args: The arguments. :param kwargs: The keyword arguments. All the connected callbacks will be called synchronously in order of their registration. ] for taget[name[callback]] in starred[name[self].callbacks] begin[:] call[name[callback], parameter[<ast.Starred object at 0x7da18bc71a80>]]
keyword[def] identifier[emit] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ): literal[string] keyword[for] identifier[callback] keyword[in] identifier[self] . identifier[callbacks] : identifier[callback] (* identifier[args] ,** identifier[kwargs] )
def emit(self, *args, **kwargs): """ Emit the signal. :param args: The arguments. :param kwargs: The keyword arguments. All the connected callbacks will be called synchronously in order of their registration. """ for callback in self.callbacks: callback(*args, **kwargs) # depends on [control=['for'], data=['callback']]
def write_def_decl(self, node, identifiers): """write a locally-available callable referencing a top-level def""" funcname = node.funcname namedecls = node.get_argument_expressions() nameargs = node.get_argument_expressions(as_call=True) if not self.in_def and ( len(self.identifiers.locally_assigned) > 0 or len(self.identifiers.argument_declared) > 0): nameargs.insert(0, 'context._locals(__M_locals)') else: nameargs.insert(0, 'context') self.printer.writeline("def %s(%s):" % (funcname, ",".join(namedecls))) self.printer.writeline( "return render_%s(%s)" % (funcname, ",".join(nameargs))) self.printer.writeline(None)
def function[write_def_decl, parameter[self, node, identifiers]]: constant[write a locally-available callable referencing a top-level def] variable[funcname] assign[=] name[node].funcname variable[namedecls] assign[=] call[name[node].get_argument_expressions, parameter[]] variable[nameargs] assign[=] call[name[node].get_argument_expressions, parameter[]] if <ast.BoolOp object at 0x7da1b1d34e80> begin[:] call[name[nameargs].insert, parameter[constant[0], constant[context._locals(__M_locals)]]] call[name[self].printer.writeline, parameter[binary_operation[constant[def %s(%s):] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b2294130>, <ast.Call object at 0x7da1b22979d0>]]]]] call[name[self].printer.writeline, parameter[binary_operation[constant[return render_%s(%s)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b2296f50>, <ast.Call object at 0x7da1b2295d50>]]]]] call[name[self].printer.writeline, parameter[constant[None]]]
keyword[def] identifier[write_def_decl] ( identifier[self] , identifier[node] , identifier[identifiers] ): literal[string] identifier[funcname] = identifier[node] . identifier[funcname] identifier[namedecls] = identifier[node] . identifier[get_argument_expressions] () identifier[nameargs] = identifier[node] . identifier[get_argument_expressions] ( identifier[as_call] = keyword[True] ) keyword[if] keyword[not] identifier[self] . identifier[in_def] keyword[and] ( identifier[len] ( identifier[self] . identifier[identifiers] . identifier[locally_assigned] )> literal[int] keyword[or] identifier[len] ( identifier[self] . identifier[identifiers] . identifier[argument_declared] )> literal[int] ): identifier[nameargs] . identifier[insert] ( literal[int] , literal[string] ) keyword[else] : identifier[nameargs] . identifier[insert] ( literal[int] , literal[string] ) identifier[self] . identifier[printer] . identifier[writeline] ( literal[string] %( identifier[funcname] , literal[string] . identifier[join] ( identifier[namedecls] ))) identifier[self] . identifier[printer] . identifier[writeline] ( literal[string] %( identifier[funcname] , literal[string] . identifier[join] ( identifier[nameargs] ))) identifier[self] . identifier[printer] . identifier[writeline] ( keyword[None] )
def write_def_decl(self, node, identifiers): """write a locally-available callable referencing a top-level def""" funcname = node.funcname namedecls = node.get_argument_expressions() nameargs = node.get_argument_expressions(as_call=True) if not self.in_def and (len(self.identifiers.locally_assigned) > 0 or len(self.identifiers.argument_declared) > 0): nameargs.insert(0, 'context._locals(__M_locals)') # depends on [control=['if'], data=[]] else: nameargs.insert(0, 'context') self.printer.writeline('def %s(%s):' % (funcname, ','.join(namedecls))) self.printer.writeline('return render_%s(%s)' % (funcname, ','.join(nameargs))) self.printer.writeline(None)
def get_descendants(self, strategy="levelorder", is_leaf_fn=None): """ Returns a list of all (leaves and internal) descendant nodes.""" return [n for n in self.iter_descendants( strategy=strategy, is_leaf_fn=is_leaf_fn)]
def function[get_descendants, parameter[self, strategy, is_leaf_fn]]: constant[ Returns a list of all (leaves and internal) descendant nodes.] return[<ast.ListComp object at 0x7da1b0fec880>]
keyword[def] identifier[get_descendants] ( identifier[self] , identifier[strategy] = literal[string] , identifier[is_leaf_fn] = keyword[None] ): literal[string] keyword[return] [ identifier[n] keyword[for] identifier[n] keyword[in] identifier[self] . identifier[iter_descendants] ( identifier[strategy] = identifier[strategy] , identifier[is_leaf_fn] = identifier[is_leaf_fn] )]
def get_descendants(self, strategy='levelorder', is_leaf_fn=None): """ Returns a list of all (leaves and internal) descendant nodes.""" return [n for n in self.iter_descendants(strategy=strategy, is_leaf_fn=is_leaf_fn)]
def switch(poi): """ Zaps into a specific product specified by switch context to the product of interest(poi) A poi is: sdox:dev - for product "dev" located in container "sdox" If poi does not contain a ":" it is interpreted as product name implying that a product within this container is already active. So if this task is called with ape zap prod (and the corresponding container is already zapped in), than only the product is switched. After the context has been switched to sdox:dev additional commands may be available that are relevant to sdox:dev :param poi: product of interest, string: <container_name>:<product_name> or <product_name>. """ parts = poi.split(':') if len(parts) == 2: container_name, product_name = parts elif len(parts) == 1 and os.environ.get('CONTAINER_NAME'): # interpret poi as product name if already zapped into a product in order # to enable simply switching products by doing ape zap prod. container_name = os.environ.get('CONTAINER_NAME') product_name = parts[0] else: print('unable to find poi: ', poi) sys.exit(1) if container_name not in tasks.get_containers(): raise ContainerNotFound('No such container %s' % container_name) elif product_name not in tasks.get_products(container_name): raise ProductNotFound('No such product %s' % product_name) else: print(SWITCH_TEMPLATE.format( source_header=tasks.conf.SOURCE_HEADER, container_name=container_name, product_name=product_name ))
def function[switch, parameter[poi]]: constant[ Zaps into a specific product specified by switch context to the product of interest(poi) A poi is: sdox:dev - for product "dev" located in container "sdox" If poi does not contain a ":" it is interpreted as product name implying that a product within this container is already active. So if this task is called with ape zap prod (and the corresponding container is already zapped in), than only the product is switched. After the context has been switched to sdox:dev additional commands may be available that are relevant to sdox:dev :param poi: product of interest, string: <container_name>:<product_name> or <product_name>. ] variable[parts] assign[=] call[name[poi].split, parameter[constant[:]]] if compare[call[name[len], parameter[name[parts]]] equal[==] constant[2]] begin[:] <ast.Tuple object at 0x7da18fe91030> assign[=] name[parts] if compare[name[container_name] <ast.NotIn object at 0x7da2590d7190> call[name[tasks].get_containers, parameter[]]] begin[:] <ast.Raise object at 0x7da18fe90760>
keyword[def] identifier[switch] ( identifier[poi] ): literal[string] identifier[parts] = identifier[poi] . identifier[split] ( literal[string] ) keyword[if] identifier[len] ( identifier[parts] )== literal[int] : identifier[container_name] , identifier[product_name] = identifier[parts] keyword[elif] identifier[len] ( identifier[parts] )== literal[int] keyword[and] identifier[os] . identifier[environ] . identifier[get] ( literal[string] ): identifier[container_name] = identifier[os] . identifier[environ] . identifier[get] ( literal[string] ) identifier[product_name] = identifier[parts] [ literal[int] ] keyword[else] : identifier[print] ( literal[string] , identifier[poi] ) identifier[sys] . identifier[exit] ( literal[int] ) keyword[if] identifier[container_name] keyword[not] keyword[in] identifier[tasks] . identifier[get_containers] (): keyword[raise] identifier[ContainerNotFound] ( literal[string] % identifier[container_name] ) keyword[elif] identifier[product_name] keyword[not] keyword[in] identifier[tasks] . identifier[get_products] ( identifier[container_name] ): keyword[raise] identifier[ProductNotFound] ( literal[string] % identifier[product_name] ) keyword[else] : identifier[print] ( identifier[SWITCH_TEMPLATE] . identifier[format] ( identifier[source_header] = identifier[tasks] . identifier[conf] . identifier[SOURCE_HEADER] , identifier[container_name] = identifier[container_name] , identifier[product_name] = identifier[product_name] ))
def switch(poi): """ Zaps into a specific product specified by switch context to the product of interest(poi) A poi is: sdox:dev - for product "dev" located in container "sdox" If poi does not contain a ":" it is interpreted as product name implying that a product within this container is already active. So if this task is called with ape zap prod (and the corresponding container is already zapped in), than only the product is switched. After the context has been switched to sdox:dev additional commands may be available that are relevant to sdox:dev :param poi: product of interest, string: <container_name>:<product_name> or <product_name>. """ parts = poi.split(':') if len(parts) == 2: (container_name, product_name) = parts # depends on [control=['if'], data=[]] elif len(parts) == 1 and os.environ.get('CONTAINER_NAME'): # interpret poi as product name if already zapped into a product in order # to enable simply switching products by doing ape zap prod. container_name = os.environ.get('CONTAINER_NAME') product_name = parts[0] # depends on [control=['if'], data=[]] else: print('unable to find poi: ', poi) sys.exit(1) if container_name not in tasks.get_containers(): raise ContainerNotFound('No such container %s' % container_name) # depends on [control=['if'], data=['container_name']] elif product_name not in tasks.get_products(container_name): raise ProductNotFound('No such product %s' % product_name) # depends on [control=['if'], data=['product_name']] else: print(SWITCH_TEMPLATE.format(source_header=tasks.conf.SOURCE_HEADER, container_name=container_name, product_name=product_name))
def set(self, subject_id, entity_id, info, timestamp=0): """ Stores session information in the cache. Assumes that the subject_id is unique within the context of the Service Provider. :param subject_id: The subject identifier :param entity_id: The identifier of the entity_id/receiver of an assertion :param info: The session info, the assertion is part of this :param timestamp: A time after which the assertion is not valid. """ entities = self._cache.get(subject_id) if not entities: entities = [] subjects = self._cache.get("subjects") if not subjects: subjects = [] if subject_id not in subjects: subjects.append(subject_id) if not self._cache.set("subjects", subjects): raise CacheError("set failed") if entity_id not in entities: entities.append(entity_id) if not self._cache.set(subject_id, entities): raise CacheError("set failed") # Should use memcache's expire if not self._cache.set(_key(subject_id, entity_id), (timestamp, info)): raise CacheError("set failed")
def function[set, parameter[self, subject_id, entity_id, info, timestamp]]: constant[ Stores session information in the cache. Assumes that the subject_id is unique within the context of the Service Provider. :param subject_id: The subject identifier :param entity_id: The identifier of the entity_id/receiver of an assertion :param info: The session info, the assertion is part of this :param timestamp: A time after which the assertion is not valid. ] variable[entities] assign[=] call[name[self]._cache.get, parameter[name[subject_id]]] if <ast.UnaryOp object at 0x7da18f812710> begin[:] variable[entities] assign[=] list[[]] variable[subjects] assign[=] call[name[self]._cache.get, parameter[constant[subjects]]] if <ast.UnaryOp object at 0x7da18f811270> begin[:] variable[subjects] assign[=] list[[]] if compare[name[subject_id] <ast.NotIn object at 0x7da2590d7190> name[subjects]] begin[:] call[name[subjects].append, parameter[name[subject_id]]] if <ast.UnaryOp object at 0x7da18f810070> begin[:] <ast.Raise object at 0x7da18f8104f0> if compare[name[entity_id] <ast.NotIn object at 0x7da2590d7190> name[entities]] begin[:] call[name[entities].append, parameter[name[entity_id]]] if <ast.UnaryOp object at 0x7da18f812410> begin[:] <ast.Raise object at 0x7da18f810df0> if <ast.UnaryOp object at 0x7da18f811c00> begin[:] <ast.Raise object at 0x7da18f810100>
keyword[def] identifier[set] ( identifier[self] , identifier[subject_id] , identifier[entity_id] , identifier[info] , identifier[timestamp] = literal[int] ): literal[string] identifier[entities] = identifier[self] . identifier[_cache] . identifier[get] ( identifier[subject_id] ) keyword[if] keyword[not] identifier[entities] : identifier[entities] =[] identifier[subjects] = identifier[self] . identifier[_cache] . identifier[get] ( literal[string] ) keyword[if] keyword[not] identifier[subjects] : identifier[subjects] =[] keyword[if] identifier[subject_id] keyword[not] keyword[in] identifier[subjects] : identifier[subjects] . identifier[append] ( identifier[subject_id] ) keyword[if] keyword[not] identifier[self] . identifier[_cache] . identifier[set] ( literal[string] , identifier[subjects] ): keyword[raise] identifier[CacheError] ( literal[string] ) keyword[if] identifier[entity_id] keyword[not] keyword[in] identifier[entities] : identifier[entities] . identifier[append] ( identifier[entity_id] ) keyword[if] keyword[not] identifier[self] . identifier[_cache] . identifier[set] ( identifier[subject_id] , identifier[entities] ): keyword[raise] identifier[CacheError] ( literal[string] ) keyword[if] keyword[not] identifier[self] . identifier[_cache] . identifier[set] ( identifier[_key] ( identifier[subject_id] , identifier[entity_id] ),( identifier[timestamp] , identifier[info] )): keyword[raise] identifier[CacheError] ( literal[string] )
def set(self, subject_id, entity_id, info, timestamp=0): """ Stores session information in the cache. Assumes that the subject_id is unique within the context of the Service Provider. :param subject_id: The subject identifier :param entity_id: The identifier of the entity_id/receiver of an assertion :param info: The session info, the assertion is part of this :param timestamp: A time after which the assertion is not valid. """ entities = self._cache.get(subject_id) if not entities: entities = [] subjects = self._cache.get('subjects') if not subjects: subjects = [] # depends on [control=['if'], data=[]] if subject_id not in subjects: subjects.append(subject_id) if not self._cache.set('subjects', subjects): raise CacheError('set failed') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['subject_id', 'subjects']] # depends on [control=['if'], data=[]] if entity_id not in entities: entities.append(entity_id) if not self._cache.set(subject_id, entities): raise CacheError('set failed') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['entity_id', 'entities']] # Should use memcache's expire if not self._cache.set(_key(subject_id, entity_id), (timestamp, info)): raise CacheError('set failed') # depends on [control=['if'], data=[]]
def items_overlap_with_suffix(left, lsuffix, right, rsuffix): """ If two indices overlap, add suffixes to overlapping entries. If corresponding suffix is empty, the entry is simply converted to string. """ to_rename = left.intersection(right) if len(to_rename) == 0: return left, right else: if not lsuffix and not rsuffix: raise ValueError('columns overlap but no suffix specified: ' '{rename}'.format(rename=to_rename)) def renamer(x, suffix): """Rename the left and right indices. If there is overlap, and suffix is not None, add suffix, otherwise, leave it as-is. Parameters ---------- x : original column name suffix : str or None Returns ------- x : renamed column name """ if x in to_rename and suffix is not None: return '{x}{suffix}'.format(x=x, suffix=suffix) return x lrenamer = partial(renamer, suffix=lsuffix) rrenamer = partial(renamer, suffix=rsuffix) return (_transform_index(left, lrenamer), _transform_index(right, rrenamer))
def function[items_overlap_with_suffix, parameter[left, lsuffix, right, rsuffix]]: constant[ If two indices overlap, add suffixes to overlapping entries. If corresponding suffix is empty, the entry is simply converted to string. ] variable[to_rename] assign[=] call[name[left].intersection, parameter[name[right]]] if compare[call[name[len], parameter[name[to_rename]]] equal[==] constant[0]] begin[:] return[tuple[[<ast.Name object at 0x7da18f00cc70>, <ast.Name object at 0x7da18f00c730>]]]
keyword[def] identifier[items_overlap_with_suffix] ( identifier[left] , identifier[lsuffix] , identifier[right] , identifier[rsuffix] ): literal[string] identifier[to_rename] = identifier[left] . identifier[intersection] ( identifier[right] ) keyword[if] identifier[len] ( identifier[to_rename] )== literal[int] : keyword[return] identifier[left] , identifier[right] keyword[else] : keyword[if] keyword[not] identifier[lsuffix] keyword[and] keyword[not] identifier[rsuffix] : keyword[raise] identifier[ValueError] ( literal[string] literal[string] . identifier[format] ( identifier[rename] = identifier[to_rename] )) keyword[def] identifier[renamer] ( identifier[x] , identifier[suffix] ): literal[string] keyword[if] identifier[x] keyword[in] identifier[to_rename] keyword[and] identifier[suffix] keyword[is] keyword[not] keyword[None] : keyword[return] literal[string] . identifier[format] ( identifier[x] = identifier[x] , identifier[suffix] = identifier[suffix] ) keyword[return] identifier[x] identifier[lrenamer] = identifier[partial] ( identifier[renamer] , identifier[suffix] = identifier[lsuffix] ) identifier[rrenamer] = identifier[partial] ( identifier[renamer] , identifier[suffix] = identifier[rsuffix] ) keyword[return] ( identifier[_transform_index] ( identifier[left] , identifier[lrenamer] ), identifier[_transform_index] ( identifier[right] , identifier[rrenamer] ))
def items_overlap_with_suffix(left, lsuffix, right, rsuffix): """ If two indices overlap, add suffixes to overlapping entries. If corresponding suffix is empty, the entry is simply converted to string. """ to_rename = left.intersection(right) if len(to_rename) == 0: return (left, right) # depends on [control=['if'], data=[]] else: if not lsuffix and (not rsuffix): raise ValueError('columns overlap but no suffix specified: {rename}'.format(rename=to_rename)) # depends on [control=['if'], data=[]] def renamer(x, suffix): """Rename the left and right indices. If there is overlap, and suffix is not None, add suffix, otherwise, leave it as-is. Parameters ---------- x : original column name suffix : str or None Returns ------- x : renamed column name """ if x in to_rename and suffix is not None: return '{x}{suffix}'.format(x=x, suffix=suffix) # depends on [control=['if'], data=[]] return x lrenamer = partial(renamer, suffix=lsuffix) rrenamer = partial(renamer, suffix=rsuffix) return (_transform_index(left, lrenamer), _transform_index(right, rrenamer))
def fit(self, X, y, sample_weight=None, eval_set=None, eval_metric=None, early_stopping_rounds=None, verbose=True): # pylint: disable = attribute-defined-outside-init,arguments-differ """ Fit gradient boosting classifier Parameters ---------- X : array_like Feature matrix y : array_like Labels sample_weight : array_like Weight for each instance eval_set : list, optional A list of (X, y) pairs to use as a validation set for early-stopping eval_metric : str, callable, optional If a str, should be a built-in evaluation metric to use. See doc/parameter.md. If callable, a custom evaluation metric. The call signature is func(y_predicted, y_true) where y_true will be a DMatrix object such that you may need to call the get_label method. It must return a str, value pair where the str is a name for the evaluation and value is the value of the evaluation function. This objective is always minimized. early_stopping_rounds : int, optional Activates early stopping. Validation error needs to decrease at least every <early_stopping_rounds> round(s) to continue training. Requires at least one item in evals. If there's more than one, will use the last. Returns the model from the last iteration (not the best one). If early stopping occurs, the model will have two additional fields: bst.best_score and bst.best_iteration. verbose : bool If `verbose` and an evaluation set is used, writes the evaluation metric measured on the validation set to stderr. """ evals_result = {} self.classes_ = list(np.unique(y)) self.n_classes_ = len(self.classes_) if self.n_classes_ > 2: # Switch to using a multiclass objective in the underlying XGB instance self.objective = "multi:softprob" xgb_options = self.get_xgb_params() xgb_options['num_class'] = self.n_classes_ else: xgb_options = self.get_xgb_params() feval = eval_metric if callable(eval_metric) else None if eval_metric is not None: if callable(eval_metric): eval_metric = None else: xgb_options.update({"eval_metric": eval_metric}) if eval_set is not None: # TODO: use sample_weight if given? evals = list(DMatrix(x[0], label=x[1]) for x in eval_set) nevals = len(evals) eval_names = ["validation_{}".format(i) for i in range(nevals)] evals = list(zip(evals, eval_names)) else: evals = () self._le = LabelEncoder().fit(y) training_labels = self._le.transform(y) if sample_weight is not None: train_dmatrix = DMatrix(X, label=training_labels, weight=sample_weight, missing=self.missing) else: train_dmatrix = DMatrix(X, label=training_labels, missing=self.missing) self._Booster = train(xgb_options, train_dmatrix, self.n_estimators, evals=evals, early_stopping_rounds=early_stopping_rounds, evals_result=evals_result, feval=feval, verbose_eval=verbose) if evals_result: for val in evals_result.items(): evals_result_key = list(val[1].keys())[0] evals_result[val[0]][evals_result_key] = val[1][evals_result_key] self.evals_result_ = evals_result if early_stopping_rounds is not None: self.best_score = self._Booster.best_score self.best_iteration = self._Booster.best_iteration return self
def function[fit, parameter[self, X, y, sample_weight, eval_set, eval_metric, early_stopping_rounds, verbose]]: constant[ Fit gradient boosting classifier Parameters ---------- X : array_like Feature matrix y : array_like Labels sample_weight : array_like Weight for each instance eval_set : list, optional A list of (X, y) pairs to use as a validation set for early-stopping eval_metric : str, callable, optional If a str, should be a built-in evaluation metric to use. See doc/parameter.md. If callable, a custom evaluation metric. The call signature is func(y_predicted, y_true) where y_true will be a DMatrix object such that you may need to call the get_label method. It must return a str, value pair where the str is a name for the evaluation and value is the value of the evaluation function. This objective is always minimized. early_stopping_rounds : int, optional Activates early stopping. Validation error needs to decrease at least every <early_stopping_rounds> round(s) to continue training. Requires at least one item in evals. If there's more than one, will use the last. Returns the model from the last iteration (not the best one). If early stopping occurs, the model will have two additional fields: bst.best_score and bst.best_iteration. verbose : bool If `verbose` and an evaluation set is used, writes the evaluation metric measured on the validation set to stderr. ] variable[evals_result] assign[=] dictionary[[], []] name[self].classes_ assign[=] call[name[list], parameter[call[name[np].unique, parameter[name[y]]]]] name[self].n_classes_ assign[=] call[name[len], parameter[name[self].classes_]] if compare[name[self].n_classes_ greater[>] constant[2]] begin[:] name[self].objective assign[=] constant[multi:softprob] variable[xgb_options] assign[=] call[name[self].get_xgb_params, parameter[]] call[name[xgb_options]][constant[num_class]] assign[=] name[self].n_classes_ variable[feval] assign[=] <ast.IfExp object at 0x7da1b1fa4190> if compare[name[eval_metric] is_not constant[None]] begin[:] if call[name[callable], parameter[name[eval_metric]]] begin[:] variable[eval_metric] assign[=] constant[None] if compare[name[eval_set] is_not constant[None]] begin[:] variable[evals] assign[=] call[name[list], parameter[<ast.GeneratorExp object at 0x7da1b1fa4550>]] variable[nevals] assign[=] call[name[len], parameter[name[evals]]] variable[eval_names] assign[=] <ast.ListComp object at 0x7da1b1fa7940> variable[evals] assign[=] call[name[list], parameter[call[name[zip], parameter[name[evals], name[eval_names]]]]] name[self]._le assign[=] call[call[name[LabelEncoder], parameter[]].fit, parameter[name[y]]] variable[training_labels] assign[=] call[name[self]._le.transform, parameter[name[y]]] if compare[name[sample_weight] is_not constant[None]] begin[:] variable[train_dmatrix] assign[=] call[name[DMatrix], parameter[name[X]]] name[self]._Booster assign[=] call[name[train], parameter[name[xgb_options], name[train_dmatrix], name[self].n_estimators]] if name[evals_result] begin[:] for taget[name[val]] in starred[call[name[evals_result].items, parameter[]]] begin[:] variable[evals_result_key] assign[=] call[call[name[list], parameter[call[call[name[val]][constant[1]].keys, parameter[]]]]][constant[0]] call[call[name[evals_result]][call[name[val]][constant[0]]]][name[evals_result_key]] assign[=] call[call[name[val]][constant[1]]][name[evals_result_key]] name[self].evals_result_ assign[=] name[evals_result] if compare[name[early_stopping_rounds] is_not constant[None]] begin[:] name[self].best_score assign[=] name[self]._Booster.best_score name[self].best_iteration assign[=] name[self]._Booster.best_iteration return[name[self]]
keyword[def] identifier[fit] ( identifier[self] , identifier[X] , identifier[y] , identifier[sample_weight] = keyword[None] , identifier[eval_set] = keyword[None] , identifier[eval_metric] = keyword[None] , identifier[early_stopping_rounds] = keyword[None] , identifier[verbose] = keyword[True] ): literal[string] identifier[evals_result] ={} identifier[self] . identifier[classes_] = identifier[list] ( identifier[np] . identifier[unique] ( identifier[y] )) identifier[self] . identifier[n_classes_] = identifier[len] ( identifier[self] . identifier[classes_] ) keyword[if] identifier[self] . identifier[n_classes_] > literal[int] : identifier[self] . identifier[objective] = literal[string] identifier[xgb_options] = identifier[self] . identifier[get_xgb_params] () identifier[xgb_options] [ literal[string] ]= identifier[self] . identifier[n_classes_] keyword[else] : identifier[xgb_options] = identifier[self] . identifier[get_xgb_params] () identifier[feval] = identifier[eval_metric] keyword[if] identifier[callable] ( identifier[eval_metric] ) keyword[else] keyword[None] keyword[if] identifier[eval_metric] keyword[is] keyword[not] keyword[None] : keyword[if] identifier[callable] ( identifier[eval_metric] ): identifier[eval_metric] = keyword[None] keyword[else] : identifier[xgb_options] . identifier[update] ({ literal[string] : identifier[eval_metric] }) keyword[if] identifier[eval_set] keyword[is] keyword[not] keyword[None] : identifier[evals] = identifier[list] ( identifier[DMatrix] ( identifier[x] [ literal[int] ], identifier[label] = identifier[x] [ literal[int] ]) keyword[for] identifier[x] keyword[in] identifier[eval_set] ) identifier[nevals] = identifier[len] ( identifier[evals] ) identifier[eval_names] =[ literal[string] . identifier[format] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[nevals] )] identifier[evals] = identifier[list] ( identifier[zip] ( identifier[evals] , identifier[eval_names] )) keyword[else] : identifier[evals] =() identifier[self] . identifier[_le] = identifier[LabelEncoder] (). identifier[fit] ( identifier[y] ) identifier[training_labels] = identifier[self] . identifier[_le] . identifier[transform] ( identifier[y] ) keyword[if] identifier[sample_weight] keyword[is] keyword[not] keyword[None] : identifier[train_dmatrix] = identifier[DMatrix] ( identifier[X] , identifier[label] = identifier[training_labels] , identifier[weight] = identifier[sample_weight] , identifier[missing] = identifier[self] . identifier[missing] ) keyword[else] : identifier[train_dmatrix] = identifier[DMatrix] ( identifier[X] , identifier[label] = identifier[training_labels] , identifier[missing] = identifier[self] . identifier[missing] ) identifier[self] . identifier[_Booster] = identifier[train] ( identifier[xgb_options] , identifier[train_dmatrix] , identifier[self] . identifier[n_estimators] , identifier[evals] = identifier[evals] , identifier[early_stopping_rounds] = identifier[early_stopping_rounds] , identifier[evals_result] = identifier[evals_result] , identifier[feval] = identifier[feval] , identifier[verbose_eval] = identifier[verbose] ) keyword[if] identifier[evals_result] : keyword[for] identifier[val] keyword[in] identifier[evals_result] . identifier[items] (): identifier[evals_result_key] = identifier[list] ( identifier[val] [ literal[int] ]. identifier[keys] ())[ literal[int] ] identifier[evals_result] [ identifier[val] [ literal[int] ]][ identifier[evals_result_key] ]= identifier[val] [ literal[int] ][ identifier[evals_result_key] ] identifier[self] . identifier[evals_result_] = identifier[evals_result] keyword[if] identifier[early_stopping_rounds] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[best_score] = identifier[self] . identifier[_Booster] . identifier[best_score] identifier[self] . identifier[best_iteration] = identifier[self] . identifier[_Booster] . identifier[best_iteration] keyword[return] identifier[self]
def fit(self, X, y, sample_weight=None, eval_set=None, eval_metric=None, early_stopping_rounds=None, verbose=True): # pylint: disable = attribute-defined-outside-init,arguments-differ "\n Fit gradient boosting classifier\n\n Parameters\n ----------\n X : array_like\n Feature matrix\n y : array_like\n Labels\n sample_weight : array_like\n Weight for each instance\n eval_set : list, optional\n A list of (X, y) pairs to use as a validation set for\n early-stopping\n eval_metric : str, callable, optional\n If a str, should be a built-in evaluation metric to use. See\n doc/parameter.md. If callable, a custom evaluation metric. The call\n signature is func(y_predicted, y_true) where y_true will be a\n DMatrix object such that you may need to call the get_label\n method. It must return a str, value pair where the str is a name\n for the evaluation and value is the value of the evaluation\n function. This objective is always minimized.\n early_stopping_rounds : int, optional\n Activates early stopping. Validation error needs to decrease at\n least every <early_stopping_rounds> round(s) to continue training.\n Requires at least one item in evals. If there's more than one,\n will use the last. Returns the model from the last iteration\n (not the best one). If early stopping occurs, the model will\n have two additional fields: bst.best_score and bst.best_iteration.\n verbose : bool\n If `verbose` and an evaluation set is used, writes the evaluation\n metric measured on the validation set to stderr.\n " evals_result = {} self.classes_ = list(np.unique(y)) self.n_classes_ = len(self.classes_) if self.n_classes_ > 2: # Switch to using a multiclass objective in the underlying XGB instance self.objective = 'multi:softprob' xgb_options = self.get_xgb_params() xgb_options['num_class'] = self.n_classes_ # depends on [control=['if'], data=[]] else: xgb_options = self.get_xgb_params() feval = eval_metric if callable(eval_metric) else None if eval_metric is not None: if callable(eval_metric): eval_metric = None # depends on [control=['if'], data=[]] else: xgb_options.update({'eval_metric': eval_metric}) # depends on [control=['if'], data=['eval_metric']] if eval_set is not None: # TODO: use sample_weight if given? evals = list((DMatrix(x[0], label=x[1]) for x in eval_set)) nevals = len(evals) eval_names = ['validation_{}'.format(i) for i in range(nevals)] evals = list(zip(evals, eval_names)) # depends on [control=['if'], data=['eval_set']] else: evals = () self._le = LabelEncoder().fit(y) training_labels = self._le.transform(y) if sample_weight is not None: train_dmatrix = DMatrix(X, label=training_labels, weight=sample_weight, missing=self.missing) # depends on [control=['if'], data=['sample_weight']] else: train_dmatrix = DMatrix(X, label=training_labels, missing=self.missing) self._Booster = train(xgb_options, train_dmatrix, self.n_estimators, evals=evals, early_stopping_rounds=early_stopping_rounds, evals_result=evals_result, feval=feval, verbose_eval=verbose) if evals_result: for val in evals_result.items(): evals_result_key = list(val[1].keys())[0] evals_result[val[0]][evals_result_key] = val[1][evals_result_key] # depends on [control=['for'], data=['val']] self.evals_result_ = evals_result # depends on [control=['if'], data=[]] if early_stopping_rounds is not None: self.best_score = self._Booster.best_score self.best_iteration = self._Booster.best_iteration # depends on [control=['if'], data=[]] return self
def get_context(pid_file, daemon=False): """Get context of running notebook. A context file is created when notebook starts. :param daemon: Are we trying to fetch the context inside the daemon. Otherwise do the death check. :return: dict or None if the process is dead/not launcherd """ port_file = get_context_file_name(pid_file) if not os.path.exists(port_file): return None with open(port_file, "rt") as f: json_data = f.read() try: data = json.loads(json_data) except ValueError as e: logger.error("Damaged context json data %s", json_data) return None if not daemon: pid = data.get("pid") if pid and not check_pid(int(pid)): # The Notebook daemon has exited uncleanly, as the PID does not point to any valid process return None return data
def function[get_context, parameter[pid_file, daemon]]: constant[Get context of running notebook. A context file is created when notebook starts. :param daemon: Are we trying to fetch the context inside the daemon. Otherwise do the death check. :return: dict or None if the process is dead/not launcherd ] variable[port_file] assign[=] call[name[get_context_file_name], parameter[name[pid_file]]] if <ast.UnaryOp object at 0x7da1b25874f0> begin[:] return[constant[None]] with call[name[open], parameter[name[port_file], constant[rt]]] begin[:] variable[json_data] assign[=] call[name[f].read, parameter[]] <ast.Try object at 0x7da1b2586710> if <ast.UnaryOp object at 0x7da1b2585b70> begin[:] variable[pid] assign[=] call[name[data].get, parameter[constant[pid]]] if <ast.BoolOp object at 0x7da1b2584e80> begin[:] return[constant[None]] return[name[data]]
keyword[def] identifier[get_context] ( identifier[pid_file] , identifier[daemon] = keyword[False] ): literal[string] identifier[port_file] = identifier[get_context_file_name] ( identifier[pid_file] ) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[port_file] ): keyword[return] keyword[None] keyword[with] identifier[open] ( identifier[port_file] , literal[string] ) keyword[as] identifier[f] : identifier[json_data] = identifier[f] . identifier[read] () keyword[try] : identifier[data] = identifier[json] . identifier[loads] ( identifier[json_data] ) keyword[except] identifier[ValueError] keyword[as] identifier[e] : identifier[logger] . identifier[error] ( literal[string] , identifier[json_data] ) keyword[return] keyword[None] keyword[if] keyword[not] identifier[daemon] : identifier[pid] = identifier[data] . identifier[get] ( literal[string] ) keyword[if] identifier[pid] keyword[and] keyword[not] identifier[check_pid] ( identifier[int] ( identifier[pid] )): keyword[return] keyword[None] keyword[return] identifier[data]
def get_context(pid_file, daemon=False): """Get context of running notebook. A context file is created when notebook starts. :param daemon: Are we trying to fetch the context inside the daemon. Otherwise do the death check. :return: dict or None if the process is dead/not launcherd """ port_file = get_context_file_name(pid_file) if not os.path.exists(port_file): return None # depends on [control=['if'], data=[]] with open(port_file, 'rt') as f: json_data = f.read() try: data = json.loads(json_data) # depends on [control=['try'], data=[]] except ValueError as e: logger.error('Damaged context json data %s', json_data) return None # depends on [control=['except'], data=[]] if not daemon: pid = data.get('pid') if pid and (not check_pid(int(pid))): # The Notebook daemon has exited uncleanly, as the PID does not point to any valid process return None # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return data # depends on [control=['with'], data=['f']]
def _select_labels(self, segmentation, labels=None): """ Get selection of labels from input segmentation :param segmentation: :param labels: :return: """ logger.debug("select_labels() started with labels={}".format(labels)) if self.slab is not None and labels is not None: segmentation_out = select_labels(segmentation, labels, slab=self.slab) else: logger.warning("Nothing found for labels " + str(labels)) un = np.unique(segmentation) if len(un) < 2: logger.error("Just one label found in input segmenation") segmentation_out = (segmentation > un[0]).astype(segmentation.dtype) return segmentation_out
def function[_select_labels, parameter[self, segmentation, labels]]: constant[ Get selection of labels from input segmentation :param segmentation: :param labels: :return: ] call[name[logger].debug, parameter[call[constant[select_labels() started with labels={}].format, parameter[name[labels]]]]] if <ast.BoolOp object at 0x7da2047e9f90> begin[:] variable[segmentation_out] assign[=] call[name[select_labels], parameter[name[segmentation], name[labels]]] return[name[segmentation_out]]
keyword[def] identifier[_select_labels] ( identifier[self] , identifier[segmentation] , identifier[labels] = keyword[None] ): literal[string] identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[labels] )) keyword[if] identifier[self] . identifier[slab] keyword[is] keyword[not] keyword[None] keyword[and] identifier[labels] keyword[is] keyword[not] keyword[None] : identifier[segmentation_out] = identifier[select_labels] ( identifier[segmentation] , identifier[labels] , identifier[slab] = identifier[self] . identifier[slab] ) keyword[else] : identifier[logger] . identifier[warning] ( literal[string] + identifier[str] ( identifier[labels] )) identifier[un] = identifier[np] . identifier[unique] ( identifier[segmentation] ) keyword[if] identifier[len] ( identifier[un] )< literal[int] : identifier[logger] . identifier[error] ( literal[string] ) identifier[segmentation_out] =( identifier[segmentation] > identifier[un] [ literal[int] ]). identifier[astype] ( identifier[segmentation] . identifier[dtype] ) keyword[return] identifier[segmentation_out]
def _select_labels(self, segmentation, labels=None): """ Get selection of labels from input segmentation :param segmentation: :param labels: :return: """ logger.debug('select_labels() started with labels={}'.format(labels)) if self.slab is not None and labels is not None: segmentation_out = select_labels(segmentation, labels, slab=self.slab) # depends on [control=['if'], data=[]] else: logger.warning('Nothing found for labels ' + str(labels)) un = np.unique(segmentation) if len(un) < 2: logger.error('Just one label found in input segmenation') # depends on [control=['if'], data=[]] segmentation_out = (segmentation > un[0]).astype(segmentation.dtype) return segmentation_out
def logs(self, container_id, stderr=True, stream=True): """ acquire output (stdout, stderr) from provided container :param container_id: str :param stderr: True, False :param stream: if True, return as generator :return: either generator, or list of strings """ logger.info("getting stdout of container '%s'", container_id) logger.debug("container_id = '%s', stream = '%s'", container_id, stream) # returns bytes response = self.d.logs(container_id, stdout=True, stderr=stderr, stream=stream) if not stream: if isinstance(response, bytes): response = response.decode("utf-8") # py2 & 3 compat response = [line for line in response.splitlines() if line] return response
def function[logs, parameter[self, container_id, stderr, stream]]: constant[ acquire output (stdout, stderr) from provided container :param container_id: str :param stderr: True, False :param stream: if True, return as generator :return: either generator, or list of strings ] call[name[logger].info, parameter[constant[getting stdout of container '%s'], name[container_id]]] call[name[logger].debug, parameter[constant[container_id = '%s', stream = '%s'], name[container_id], name[stream]]] variable[response] assign[=] call[name[self].d.logs, parameter[name[container_id]]] if <ast.UnaryOp object at 0x7da20c76d600> begin[:] if call[name[isinstance], parameter[name[response], name[bytes]]] begin[:] variable[response] assign[=] call[name[response].decode, parameter[constant[utf-8]]] variable[response] assign[=] <ast.ListComp object at 0x7da20c76e4a0> return[name[response]]
keyword[def] identifier[logs] ( identifier[self] , identifier[container_id] , identifier[stderr] = keyword[True] , identifier[stream] = keyword[True] ): literal[string] identifier[logger] . identifier[info] ( literal[string] , identifier[container_id] ) identifier[logger] . identifier[debug] ( literal[string] , identifier[container_id] , identifier[stream] ) identifier[response] = identifier[self] . identifier[d] . identifier[logs] ( identifier[container_id] , identifier[stdout] = keyword[True] , identifier[stderr] = identifier[stderr] , identifier[stream] = identifier[stream] ) keyword[if] keyword[not] identifier[stream] : keyword[if] identifier[isinstance] ( identifier[response] , identifier[bytes] ): identifier[response] = identifier[response] . identifier[decode] ( literal[string] ) identifier[response] =[ identifier[line] keyword[for] identifier[line] keyword[in] identifier[response] . identifier[splitlines] () keyword[if] identifier[line] ] keyword[return] identifier[response]
def logs(self, container_id, stderr=True, stream=True): """ acquire output (stdout, stderr) from provided container :param container_id: str :param stderr: True, False :param stream: if True, return as generator :return: either generator, or list of strings """ logger.info("getting stdout of container '%s'", container_id) logger.debug("container_id = '%s', stream = '%s'", container_id, stream) # returns bytes response = self.d.logs(container_id, stdout=True, stderr=stderr, stream=stream) if not stream: if isinstance(response, bytes): response = response.decode('utf-8') # py2 & 3 compat # depends on [control=['if'], data=[]] response = [line for line in response.splitlines() if line] # depends on [control=['if'], data=[]] return response
def find_package(name, installed, package=False): '''Finds a package in the installed list. If `package` is true, match package names, otherwise, match import paths. ''' if package: name = name.lower() tests = ( lambda x: x.user and name == x.name.lower(), lambda x: x.local and name == x.name.lower(), lambda x: name == x.name.lower(), ) else: tests = ( lambda x: x.user and name in x.import_names, lambda x: x.local and name in x.import_names, lambda x: name in x.import_names, ) for t in tests: try: found = list(filter(t, installed)) if found and not found[0].is_scan: return found[0] except StopIteration: pass return None
def function[find_package, parameter[name, installed, package]]: constant[Finds a package in the installed list. If `package` is true, match package names, otherwise, match import paths. ] if name[package] begin[:] variable[name] assign[=] call[name[name].lower, parameter[]] variable[tests] assign[=] tuple[[<ast.Lambda object at 0x7da18bc73550>, <ast.Lambda object at 0x7da18bc72500>, <ast.Lambda object at 0x7da18bc72ec0>]] for taget[name[t]] in starred[name[tests]] begin[:] <ast.Try object at 0x7da18bc72950> return[constant[None]]
keyword[def] identifier[find_package] ( identifier[name] , identifier[installed] , identifier[package] = keyword[False] ): literal[string] keyword[if] identifier[package] : identifier[name] = identifier[name] . identifier[lower] () identifier[tests] =( keyword[lambda] identifier[x] : identifier[x] . identifier[user] keyword[and] identifier[name] == identifier[x] . identifier[name] . identifier[lower] (), keyword[lambda] identifier[x] : identifier[x] . identifier[local] keyword[and] identifier[name] == identifier[x] . identifier[name] . identifier[lower] (), keyword[lambda] identifier[x] : identifier[name] == identifier[x] . identifier[name] . identifier[lower] (), ) keyword[else] : identifier[tests] =( keyword[lambda] identifier[x] : identifier[x] . identifier[user] keyword[and] identifier[name] keyword[in] identifier[x] . identifier[import_names] , keyword[lambda] identifier[x] : identifier[x] . identifier[local] keyword[and] identifier[name] keyword[in] identifier[x] . identifier[import_names] , keyword[lambda] identifier[x] : identifier[name] keyword[in] identifier[x] . identifier[import_names] , ) keyword[for] identifier[t] keyword[in] identifier[tests] : keyword[try] : identifier[found] = identifier[list] ( identifier[filter] ( identifier[t] , identifier[installed] )) keyword[if] identifier[found] keyword[and] keyword[not] identifier[found] [ literal[int] ]. identifier[is_scan] : keyword[return] identifier[found] [ literal[int] ] keyword[except] identifier[StopIteration] : keyword[pass] keyword[return] keyword[None]
def find_package(name, installed, package=False): """Finds a package in the installed list. If `package` is true, match package names, otherwise, match import paths. """ if package: name = name.lower() tests = (lambda x: x.user and name == x.name.lower(), lambda x: x.local and name == x.name.lower(), lambda x: name == x.name.lower()) # depends on [control=['if'], data=[]] else: tests = (lambda x: x.user and name in x.import_names, lambda x: x.local and name in x.import_names, lambda x: name in x.import_names) for t in tests: try: found = list(filter(t, installed)) if found and (not found[0].is_scan): return found[0] # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except StopIteration: pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['t']] return None
def cli(ctx, organism_id, common_name, directory, blatdb="", species="", genus="", public=False): """Update an organism Output: a dictionary with information about the new organism """ return ctx.gi.organisms.update_organism(organism_id, common_name, directory, blatdb=blatdb, species=species, genus=genus, public=public)
def function[cli, parameter[ctx, organism_id, common_name, directory, blatdb, species, genus, public]]: constant[Update an organism Output: a dictionary with information about the new organism ] return[call[name[ctx].gi.organisms.update_organism, parameter[name[organism_id], name[common_name], name[directory]]]]
keyword[def] identifier[cli] ( identifier[ctx] , identifier[organism_id] , identifier[common_name] , identifier[directory] , identifier[blatdb] = literal[string] , identifier[species] = literal[string] , identifier[genus] = literal[string] , identifier[public] = keyword[False] ): literal[string] keyword[return] identifier[ctx] . identifier[gi] . identifier[organisms] . identifier[update_organism] ( identifier[organism_id] , identifier[common_name] , identifier[directory] , identifier[blatdb] = identifier[blatdb] , identifier[species] = identifier[species] , identifier[genus] = identifier[genus] , identifier[public] = identifier[public] )
def cli(ctx, organism_id, common_name, directory, blatdb='', species='', genus='', public=False): """Update an organism Output: a dictionary with information about the new organism """ return ctx.gi.organisms.update_organism(organism_id, common_name, directory, blatdb=blatdb, species=species, genus=genus, public=public)
def initialize(self,num_reals=1,enforce_bounds="reset", parensemble=None,obsensemble=None,restart_obsensemble=None): """Initialize. Depending on arguments, draws or loads initial parameter observations ensembles and runs the initial parameter ensemble Parameters ---------- num_reals : int the number of realizations to draw. Ignored if parensemble/obsensemble are not None enforce_bounds : str how to enfore parameter bound transgression. options are reset, drop, or None parensemble : pyemu.ParameterEnsemble or str a parameter ensemble or filename to use as the initial parameter ensemble. If not None, then obsenemble must not be None obsensemble : pyemu.ObservationEnsemble or str an observation ensemble or filename to use as the initial observation ensemble. If not None, then parensemble must not be None restart_obsensemble : pyemu.ObservationEnsemble or str an observation ensemble or filename to use as an evaluated observation ensemble. If not None, this will skip the initial parameter ensemble evaluation - user beware! """ build_empirical_prior = False # initialize the phi report csv self.enforce_bounds = enforce_bounds self.total_runs = 0 # this matrix gets used a lot, so only calc once and store self.obscov_inv_sqrt = self.obscov.get(self.pst.nnz_obs_names).inv.sqrt self.logger.log("forming inverse sqrt parcov matrix") self.parcov_inv_sqrt = self.parcov.inv.sqrt self.logger.log("forming inverse sqrt parcov matrix") if parensemble is not None and obsensemble is not None: self.logger.log("initializing with existing ensembles") if isinstance(parensemble,str): self.logger.log("loading parensemble from file") if not os.path.exists(obsensemble): self.logger.lraise("can not find parensemble file: {0}".\ format(parensemble)) df = pd.read_csv(parensemble,index_col=0) df.columns = df.columns.str.lower() #df.index = [str(i) for i in df.index] self.parensemble_0 = pyemu.ParameterEnsemble.from_dataframe(df=df,pst=self.pst) self.logger.log("loading parensemble from file") elif isinstance(parensemble,ParameterEnsemble): self.parensemble_0 = parensemble.copy() else: raise Exception("unrecognized arg type for parensemble, " +\ "should be filename or ParameterEnsemble" +\ ", not {0}".format(type(parensemble))) self.parensemble = self.parensemble_0.copy() if isinstance(obsensemble,str): self.logger.log("loading obsensemble from file") if not os.path.exists(obsensemble): self.logger.lraise("can not find obsensemble file: {0}".\ format(obsensemble)) df = pd.read_csv(obsensemble,index_col=0) df.columns = df.columns.str.lower() df = df.loc[:,self.pst.nnz_obs_names] #df.index = [str(i) for i in df.index] self.obsensemble_0 = pyemu.ObservationEnsemble.from_dataframe(df=df,pst=self.pst) self.logger.log("loading obsensemble from file") elif isinstance(obsensemble,ObservationEnsemble): self.obsensemble_0 = obsensemble.copy() else: raise Exception("unrecognized arg type for obsensemble, " +\ "should be filename or ObservationEnsemble" +\ ", not {0}".format(type(obsensemble))) assert self.parensemble_0.shape[0] == self.obsensemble_0.shape[0] #self.num_reals = self.parensemble_0.shape[0] num_reals = self.parensemble.shape[0] self.logger.log("initializing with existing ensembles") if build_empirical_prior: self.reset_parcov(self.parensemble.covariance_matrix()) if self.save_mats: self.parcov.to_binary(self.pst.filename+".empcov.jcb") else: if build_empirical_prior: self.logger.lraise("can't use build_emprirical_prior without parensemble...") self.logger.log("initializing with {0} realizations".format(num_reals)) self.logger.log("initializing parensemble") self.parensemble_0 = pyemu.ParameterEnsemble.from_gaussian_draw(self.pst, self.parcov,num_reals=num_reals) self.parensemble_0.enforce(enforce_bounds=enforce_bounds) self.logger.log("initializing parensemble") self.parensemble = self.parensemble_0.copy() self.parensemble_0.to_csv(self.pst.filename +\ self.paren_prefix.format(0)) self.logger.log("initializing parensemble") self.logger.log("initializing obsensemble") self.obsensemble_0 = pyemu.ObservationEnsemble.from_id_gaussian_draw(self.pst, num_reals=num_reals) #self.obsensemble = self.obsensemble_0.copy() # save the base obsensemble self.obsensemble_0.to_csv(self.pst.filename +\ self.obsen_prefix.format(-1)) self.logger.log("initializing obsensemble") self.logger.log("initializing with {0} realizations".format(num_reals)) self.enforce_bounds = enforce_bounds if restart_obsensemble is not None: self.logger.log("loading restart_obsensemble {0}".format(restart_obsensemble)) #failed_runs,self.obsensemble = self._load_obs_ensemble(restart_obsensemble) df = pd.read_csv(restart_obsensemble, index_col=0) df.columns = df.columns.str.lower() #df = df.loc[:, self.pst.nnz_obs_names] # df.index = [str(i) for i in df.index] self.obsensemble = pyemu.ObservationEnsemble.from_dataframe(df=df, pst=self.pst) assert self.obsensemble.shape[0] == self.obsensemble_0.shape[0] assert list(self.obsensemble.columns) == list(self.obsensemble_0.columns) self.logger.log("loading restart_obsensemble {0}".format(restart_obsensemble)) else: # run the initial parameter ensemble self.logger.log("evaluating initial ensembles") self.obsensemble = self.forecast() self.logger.log("evaluating initial ensembles") #if not self.parensemble.istransformed: self.parensemble._transform(inplace=True) #if not self.parensemble_0.istransformed: self.parensemble_0._transform(inplace=True) self._initialized = True
def function[initialize, parameter[self, num_reals, enforce_bounds, parensemble, obsensemble, restart_obsensemble]]: constant[Initialize. Depending on arguments, draws or loads initial parameter observations ensembles and runs the initial parameter ensemble Parameters ---------- num_reals : int the number of realizations to draw. Ignored if parensemble/obsensemble are not None enforce_bounds : str how to enfore parameter bound transgression. options are reset, drop, or None parensemble : pyemu.ParameterEnsemble or str a parameter ensemble or filename to use as the initial parameter ensemble. If not None, then obsenemble must not be None obsensemble : pyemu.ObservationEnsemble or str an observation ensemble or filename to use as the initial observation ensemble. If not None, then parensemble must not be None restart_obsensemble : pyemu.ObservationEnsemble or str an observation ensemble or filename to use as an evaluated observation ensemble. If not None, this will skip the initial parameter ensemble evaluation - user beware! ] variable[build_empirical_prior] assign[=] constant[False] name[self].enforce_bounds assign[=] name[enforce_bounds] name[self].total_runs assign[=] constant[0] name[self].obscov_inv_sqrt assign[=] call[name[self].obscov.get, parameter[name[self].pst.nnz_obs_names]].inv.sqrt call[name[self].logger.log, parameter[constant[forming inverse sqrt parcov matrix]]] name[self].parcov_inv_sqrt assign[=] name[self].parcov.inv.sqrt call[name[self].logger.log, parameter[constant[forming inverse sqrt parcov matrix]]] if <ast.BoolOp object at 0x7da1b23c6200> begin[:] call[name[self].logger.log, parameter[constant[initializing with existing ensembles]]] if call[name[isinstance], parameter[name[parensemble], name[str]]] begin[:] call[name[self].logger.log, parameter[constant[loading parensemble from file]]] if <ast.UnaryOp object at 0x7da1b23c5cc0> begin[:] call[name[self].logger.lraise, parameter[call[constant[can not find parensemble file: {0}].format, parameter[name[parensemble]]]]] variable[df] assign[=] call[name[pd].read_csv, parameter[name[parensemble]]] name[df].columns assign[=] call[name[df].columns.str.lower, parameter[]] name[self].parensemble_0 assign[=] call[name[pyemu].ParameterEnsemble.from_dataframe, parameter[]] call[name[self].logger.log, parameter[constant[loading parensemble from file]]] name[self].parensemble assign[=] call[name[self].parensemble_0.copy, parameter[]] if call[name[isinstance], parameter[name[obsensemble], name[str]]] begin[:] call[name[self].logger.log, parameter[constant[loading obsensemble from file]]] if <ast.UnaryOp object at 0x7da1b23c4af0> begin[:] call[name[self].logger.lraise, parameter[call[constant[can not find obsensemble file: {0}].format, parameter[name[obsensemble]]]]] variable[df] assign[=] call[name[pd].read_csv, parameter[name[obsensemble]]] name[df].columns assign[=] call[name[df].columns.str.lower, parameter[]] variable[df] assign[=] call[name[df].loc][tuple[[<ast.Slice object at 0x7da1b23c43d0>, <ast.Attribute object at 0x7da1b23c43a0>]]] name[self].obsensemble_0 assign[=] call[name[pyemu].ObservationEnsemble.from_dataframe, parameter[]] call[name[self].logger.log, parameter[constant[loading obsensemble from file]]] assert[compare[call[name[self].parensemble_0.shape][constant[0]] equal[==] call[name[self].obsensemble_0.shape][constant[0]]]] variable[num_reals] assign[=] call[name[self].parensemble.shape][constant[0]] call[name[self].logger.log, parameter[constant[initializing with existing ensembles]]] if name[build_empirical_prior] begin[:] call[name[self].reset_parcov, parameter[call[name[self].parensemble.covariance_matrix, parameter[]]]] if name[self].save_mats begin[:] call[name[self].parcov.to_binary, parameter[binary_operation[name[self].pst.filename + constant[.empcov.jcb]]]] name[self].enforce_bounds assign[=] name[enforce_bounds] if compare[name[restart_obsensemble] is_not constant[None]] begin[:] call[name[self].logger.log, parameter[call[constant[loading restart_obsensemble {0}].format, parameter[name[restart_obsensemble]]]]] variable[df] assign[=] call[name[pd].read_csv, parameter[name[restart_obsensemble]]] name[df].columns assign[=] call[name[df].columns.str.lower, parameter[]] name[self].obsensemble assign[=] call[name[pyemu].ObservationEnsemble.from_dataframe, parameter[]] assert[compare[call[name[self].obsensemble.shape][constant[0]] equal[==] call[name[self].obsensemble_0.shape][constant[0]]]] assert[compare[call[name[list], parameter[name[self].obsensemble.columns]] equal[==] call[name[list], parameter[name[self].obsensemble_0.columns]]]] call[name[self].logger.log, parameter[call[constant[loading restart_obsensemble {0}].format, parameter[name[restart_obsensemble]]]]] call[name[self].parensemble._transform, parameter[]] call[name[self].parensemble_0._transform, parameter[]] name[self]._initialized assign[=] constant[True]
keyword[def] identifier[initialize] ( identifier[self] , identifier[num_reals] = literal[int] , identifier[enforce_bounds] = literal[string] , identifier[parensemble] = keyword[None] , identifier[obsensemble] = keyword[None] , identifier[restart_obsensemble] = keyword[None] ): literal[string] identifier[build_empirical_prior] = keyword[False] identifier[self] . identifier[enforce_bounds] = identifier[enforce_bounds] identifier[self] . identifier[total_runs] = literal[int] identifier[self] . identifier[obscov_inv_sqrt] = identifier[self] . identifier[obscov] . identifier[get] ( identifier[self] . identifier[pst] . identifier[nnz_obs_names] ). identifier[inv] . identifier[sqrt] identifier[self] . identifier[logger] . identifier[log] ( literal[string] ) identifier[self] . identifier[parcov_inv_sqrt] = identifier[self] . identifier[parcov] . identifier[inv] . identifier[sqrt] identifier[self] . identifier[logger] . identifier[log] ( literal[string] ) keyword[if] identifier[parensemble] keyword[is] keyword[not] keyword[None] keyword[and] identifier[obsensemble] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[logger] . identifier[log] ( literal[string] ) keyword[if] identifier[isinstance] ( identifier[parensemble] , identifier[str] ): identifier[self] . identifier[logger] . identifier[log] ( literal[string] ) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[obsensemble] ): identifier[self] . identifier[logger] . identifier[lraise] ( literal[string] . identifier[format] ( identifier[parensemble] )) identifier[df] = identifier[pd] . identifier[read_csv] ( identifier[parensemble] , identifier[index_col] = literal[int] ) identifier[df] . identifier[columns] = identifier[df] . identifier[columns] . identifier[str] . identifier[lower] () identifier[self] . identifier[parensemble_0] = identifier[pyemu] . identifier[ParameterEnsemble] . identifier[from_dataframe] ( identifier[df] = identifier[df] , identifier[pst] = identifier[self] . identifier[pst] ) identifier[self] . identifier[logger] . identifier[log] ( literal[string] ) keyword[elif] identifier[isinstance] ( identifier[parensemble] , identifier[ParameterEnsemble] ): identifier[self] . identifier[parensemble_0] = identifier[parensemble] . identifier[copy] () keyword[else] : keyword[raise] identifier[Exception] ( literal[string] + literal[string] + literal[string] . identifier[format] ( identifier[type] ( identifier[parensemble] ))) identifier[self] . identifier[parensemble] = identifier[self] . identifier[parensemble_0] . identifier[copy] () keyword[if] identifier[isinstance] ( identifier[obsensemble] , identifier[str] ): identifier[self] . identifier[logger] . identifier[log] ( literal[string] ) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[obsensemble] ): identifier[self] . identifier[logger] . identifier[lraise] ( literal[string] . identifier[format] ( identifier[obsensemble] )) identifier[df] = identifier[pd] . identifier[read_csv] ( identifier[obsensemble] , identifier[index_col] = literal[int] ) identifier[df] . identifier[columns] = identifier[df] . identifier[columns] . identifier[str] . identifier[lower] () identifier[df] = identifier[df] . identifier[loc] [:, identifier[self] . identifier[pst] . identifier[nnz_obs_names] ] identifier[self] . identifier[obsensemble_0] = identifier[pyemu] . identifier[ObservationEnsemble] . identifier[from_dataframe] ( identifier[df] = identifier[df] , identifier[pst] = identifier[self] . identifier[pst] ) identifier[self] . identifier[logger] . identifier[log] ( literal[string] ) keyword[elif] identifier[isinstance] ( identifier[obsensemble] , identifier[ObservationEnsemble] ): identifier[self] . identifier[obsensemble_0] = identifier[obsensemble] . identifier[copy] () keyword[else] : keyword[raise] identifier[Exception] ( literal[string] + literal[string] + literal[string] . identifier[format] ( identifier[type] ( identifier[obsensemble] ))) keyword[assert] identifier[self] . identifier[parensemble_0] . identifier[shape] [ literal[int] ]== identifier[self] . identifier[obsensemble_0] . identifier[shape] [ literal[int] ] identifier[num_reals] = identifier[self] . identifier[parensemble] . identifier[shape] [ literal[int] ] identifier[self] . identifier[logger] . identifier[log] ( literal[string] ) keyword[if] identifier[build_empirical_prior] : identifier[self] . identifier[reset_parcov] ( identifier[self] . identifier[parensemble] . identifier[covariance_matrix] ()) keyword[if] identifier[self] . identifier[save_mats] : identifier[self] . identifier[parcov] . identifier[to_binary] ( identifier[self] . identifier[pst] . identifier[filename] + literal[string] ) keyword[else] : keyword[if] identifier[build_empirical_prior] : identifier[self] . identifier[logger] . identifier[lraise] ( literal[string] ) identifier[self] . identifier[logger] . identifier[log] ( literal[string] . identifier[format] ( identifier[num_reals] )) identifier[self] . identifier[logger] . identifier[log] ( literal[string] ) identifier[self] . identifier[parensemble_0] = identifier[pyemu] . identifier[ParameterEnsemble] . identifier[from_gaussian_draw] ( identifier[self] . identifier[pst] , identifier[self] . identifier[parcov] , identifier[num_reals] = identifier[num_reals] ) identifier[self] . identifier[parensemble_0] . identifier[enforce] ( identifier[enforce_bounds] = identifier[enforce_bounds] ) identifier[self] . identifier[logger] . identifier[log] ( literal[string] ) identifier[self] . identifier[parensemble] = identifier[self] . identifier[parensemble_0] . identifier[copy] () identifier[self] . identifier[parensemble_0] . identifier[to_csv] ( identifier[self] . identifier[pst] . identifier[filename] + identifier[self] . identifier[paren_prefix] . identifier[format] ( literal[int] )) identifier[self] . identifier[logger] . identifier[log] ( literal[string] ) identifier[self] . identifier[logger] . identifier[log] ( literal[string] ) identifier[self] . identifier[obsensemble_0] = identifier[pyemu] . identifier[ObservationEnsemble] . identifier[from_id_gaussian_draw] ( identifier[self] . identifier[pst] , identifier[num_reals] = identifier[num_reals] ) identifier[self] . identifier[obsensemble_0] . identifier[to_csv] ( identifier[self] . identifier[pst] . identifier[filename] + identifier[self] . identifier[obsen_prefix] . identifier[format] (- literal[int] )) identifier[self] . identifier[logger] . identifier[log] ( literal[string] ) identifier[self] . identifier[logger] . identifier[log] ( literal[string] . identifier[format] ( identifier[num_reals] )) identifier[self] . identifier[enforce_bounds] = identifier[enforce_bounds] keyword[if] identifier[restart_obsensemble] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[logger] . identifier[log] ( literal[string] . identifier[format] ( identifier[restart_obsensemble] )) identifier[df] = identifier[pd] . identifier[read_csv] ( identifier[restart_obsensemble] , identifier[index_col] = literal[int] ) identifier[df] . identifier[columns] = identifier[df] . identifier[columns] . identifier[str] . identifier[lower] () identifier[self] . identifier[obsensemble] = identifier[pyemu] . identifier[ObservationEnsemble] . identifier[from_dataframe] ( identifier[df] = identifier[df] , identifier[pst] = identifier[self] . identifier[pst] ) keyword[assert] identifier[self] . identifier[obsensemble] . identifier[shape] [ literal[int] ]== identifier[self] . identifier[obsensemble_0] . identifier[shape] [ literal[int] ] keyword[assert] identifier[list] ( identifier[self] . identifier[obsensemble] . identifier[columns] )== identifier[list] ( identifier[self] . identifier[obsensemble_0] . identifier[columns] ) identifier[self] . identifier[logger] . identifier[log] ( literal[string] . identifier[format] ( identifier[restart_obsensemble] )) keyword[else] : identifier[self] . identifier[logger] . identifier[log] ( literal[string] ) identifier[self] . identifier[obsensemble] = identifier[self] . identifier[forecast] () identifier[self] . identifier[logger] . identifier[log] ( literal[string] ) identifier[self] . identifier[parensemble] . identifier[_transform] ( identifier[inplace] = keyword[True] ) identifier[self] . identifier[parensemble_0] . identifier[_transform] ( identifier[inplace] = keyword[True] ) identifier[self] . identifier[_initialized] = keyword[True]
def initialize(self, num_reals=1, enforce_bounds='reset', parensemble=None, obsensemble=None, restart_obsensemble=None): """Initialize. Depending on arguments, draws or loads initial parameter observations ensembles and runs the initial parameter ensemble Parameters ---------- num_reals : int the number of realizations to draw. Ignored if parensemble/obsensemble are not None enforce_bounds : str how to enfore parameter bound transgression. options are reset, drop, or None parensemble : pyemu.ParameterEnsemble or str a parameter ensemble or filename to use as the initial parameter ensemble. If not None, then obsenemble must not be None obsensemble : pyemu.ObservationEnsemble or str an observation ensemble or filename to use as the initial observation ensemble. If not None, then parensemble must not be None restart_obsensemble : pyemu.ObservationEnsemble or str an observation ensemble or filename to use as an evaluated observation ensemble. If not None, this will skip the initial parameter ensemble evaluation - user beware! """ build_empirical_prior = False # initialize the phi report csv self.enforce_bounds = enforce_bounds self.total_runs = 0 # this matrix gets used a lot, so only calc once and store self.obscov_inv_sqrt = self.obscov.get(self.pst.nnz_obs_names).inv.sqrt self.logger.log('forming inverse sqrt parcov matrix') self.parcov_inv_sqrt = self.parcov.inv.sqrt self.logger.log('forming inverse sqrt parcov matrix') if parensemble is not None and obsensemble is not None: self.logger.log('initializing with existing ensembles') if isinstance(parensemble, str): self.logger.log('loading parensemble from file') if not os.path.exists(obsensemble): self.logger.lraise('can not find parensemble file: {0}'.format(parensemble)) # depends on [control=['if'], data=[]] df = pd.read_csv(parensemble, index_col=0) df.columns = df.columns.str.lower() #df.index = [str(i) for i in df.index] self.parensemble_0 = pyemu.ParameterEnsemble.from_dataframe(df=df, pst=self.pst) self.logger.log('loading parensemble from file') # depends on [control=['if'], data=[]] elif isinstance(parensemble, ParameterEnsemble): self.parensemble_0 = parensemble.copy() # depends on [control=['if'], data=[]] else: raise Exception('unrecognized arg type for parensemble, ' + 'should be filename or ParameterEnsemble' + ', not {0}'.format(type(parensemble))) self.parensemble = self.parensemble_0.copy() if isinstance(obsensemble, str): self.logger.log('loading obsensemble from file') if not os.path.exists(obsensemble): self.logger.lraise('can not find obsensemble file: {0}'.format(obsensemble)) # depends on [control=['if'], data=[]] df = pd.read_csv(obsensemble, index_col=0) df.columns = df.columns.str.lower() df = df.loc[:, self.pst.nnz_obs_names] #df.index = [str(i) for i in df.index] self.obsensemble_0 = pyemu.ObservationEnsemble.from_dataframe(df=df, pst=self.pst) self.logger.log('loading obsensemble from file') # depends on [control=['if'], data=[]] elif isinstance(obsensemble, ObservationEnsemble): self.obsensemble_0 = obsensemble.copy() # depends on [control=['if'], data=[]] else: raise Exception('unrecognized arg type for obsensemble, ' + 'should be filename or ObservationEnsemble' + ', not {0}'.format(type(obsensemble))) assert self.parensemble_0.shape[0] == self.obsensemble_0.shape[0] #self.num_reals = self.parensemble_0.shape[0] num_reals = self.parensemble.shape[0] self.logger.log('initializing with existing ensembles') if build_empirical_prior: self.reset_parcov(self.parensemble.covariance_matrix()) if self.save_mats: self.parcov.to_binary(self.pst.filename + '.empcov.jcb') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: if build_empirical_prior: self.logger.lraise("can't use build_emprirical_prior without parensemble...") # depends on [control=['if'], data=[]] self.logger.log('initializing with {0} realizations'.format(num_reals)) self.logger.log('initializing parensemble') self.parensemble_0 = pyemu.ParameterEnsemble.from_gaussian_draw(self.pst, self.parcov, num_reals=num_reals) self.parensemble_0.enforce(enforce_bounds=enforce_bounds) self.logger.log('initializing parensemble') self.parensemble = self.parensemble_0.copy() self.parensemble_0.to_csv(self.pst.filename + self.paren_prefix.format(0)) self.logger.log('initializing parensemble') self.logger.log('initializing obsensemble') self.obsensemble_0 = pyemu.ObservationEnsemble.from_id_gaussian_draw(self.pst, num_reals=num_reals) #self.obsensemble = self.obsensemble_0.copy() # save the base obsensemble self.obsensemble_0.to_csv(self.pst.filename + self.obsen_prefix.format(-1)) self.logger.log('initializing obsensemble') self.logger.log('initializing with {0} realizations'.format(num_reals)) self.enforce_bounds = enforce_bounds if restart_obsensemble is not None: self.logger.log('loading restart_obsensemble {0}'.format(restart_obsensemble)) #failed_runs,self.obsensemble = self._load_obs_ensemble(restart_obsensemble) df = pd.read_csv(restart_obsensemble, index_col=0) df.columns = df.columns.str.lower() #df = df.loc[:, self.pst.nnz_obs_names] # df.index = [str(i) for i in df.index] self.obsensemble = pyemu.ObservationEnsemble.from_dataframe(df=df, pst=self.pst) assert self.obsensemble.shape[0] == self.obsensemble_0.shape[0] assert list(self.obsensemble.columns) == list(self.obsensemble_0.columns) self.logger.log('loading restart_obsensemble {0}'.format(restart_obsensemble)) # depends on [control=['if'], data=['restart_obsensemble']] else: # run the initial parameter ensemble self.logger.log('evaluating initial ensembles') self.obsensemble = self.forecast() self.logger.log('evaluating initial ensembles') #if not self.parensemble.istransformed: self.parensemble._transform(inplace=True) #if not self.parensemble_0.istransformed: self.parensemble_0._transform(inplace=True) self._initialized = True
def _convert_xml_to_service_properties(xml): ''' <?xml version="1.0" encoding="utf-8"?> <StorageServiceProperties> <Logging> <Version>version-number</Version> <Delete>true|false</Delete> <Read>true|false</Read> <Write>true|false</Write> <RetentionPolicy> <Enabled>true|false</Enabled> <Days>number-of-days</Days> </RetentionPolicy> </Logging> <HourMetrics> <Version>version-number</Version> <Enabled>true|false</Enabled> <IncludeAPIs>true|false</IncludeAPIs> <RetentionPolicy> <Enabled>true|false</Enabled> <Days>number-of-days</Days> </RetentionPolicy> </HourMetrics> <MinuteMetrics> <Version>version-number</Version> <Enabled>true|false</Enabled> <IncludeAPIs>true|false</IncludeAPIs> <RetentionPolicy> <Enabled>true|false</Enabled> <Days>number-of-days</Days> </RetentionPolicy> </MinuteMetrics> <Cors> <CorsRule> <AllowedOrigins>comma-separated-list-of-allowed-origins</AllowedOrigins> <AllowedMethods>comma-separated-list-of-HTTP-verb</AllowedMethods> <MaxAgeInSeconds>max-caching-age-in-seconds</MaxAgeInSeconds> <ExposedHeaders>comma-seperated-list-of-response-headers</ExposedHeaders> <AllowedHeaders>comma-seperated-list-of-request-headers</AllowedHeaders> </CorsRule> </Cors> </StorageServiceProperties> ''' service_properties_element = ETree.fromstring(xml) service_properties = ServiceProperties() # Logging logging = service_properties_element.find('Logging') if logging is not None: service_properties.logging = Logging() service_properties.logging.version = logging.find('Version').text service_properties.logging.delete = _bool(logging.find('Delete').text) service_properties.logging.read = _bool(logging.find('Read').text) service_properties.logging.write = _bool(logging.find('Write').text) _convert_xml_to_retention_policy(logging.find('RetentionPolicy'), service_properties.logging.retention_policy) # HourMetrics hour_metrics_element = service_properties_element.find('HourMetrics') if hour_metrics_element is not None: service_properties.hour_metrics = Metrics() _convert_xml_to_metrics(hour_metrics_element, service_properties.hour_metrics) # MinuteMetrics minute_metrics_element = service_properties_element.find('MinuteMetrics') if minute_metrics_element is not None: service_properties.minute_metrics = Metrics() _convert_xml_to_metrics(minute_metrics_element, service_properties.minute_metrics) # CORS cors = service_properties_element.find('Cors') if cors is not None: service_properties.cors = list() for rule in cors.findall('CorsRule'): allowed_origins = rule.find('AllowedOrigins').text.split(',') allowed_methods = rule.find('AllowedMethods').text.split(',') max_age_in_seconds = int(rule.find('MaxAgeInSeconds').text) cors_rule = CorsRule(allowed_origins, allowed_methods, max_age_in_seconds) exposed_headers = rule.find('ExposedHeaders').text if exposed_headers is not None: cors_rule.exposed_headers = exposed_headers.split(',') allowed_headers = rule.find('AllowedHeaders').text if allowed_headers is not None: cors_rule.allowed_headers = allowed_headers.split(',') service_properties.cors.append(cors_rule) # Target version target_version = service_properties_element.find('DefaultServiceVersion') if target_version is not None: service_properties.target_version = target_version.text return service_properties
def function[_convert_xml_to_service_properties, parameter[xml]]: constant[ <?xml version="1.0" encoding="utf-8"?> <StorageServiceProperties> <Logging> <Version>version-number</Version> <Delete>true|false</Delete> <Read>true|false</Read> <Write>true|false</Write> <RetentionPolicy> <Enabled>true|false</Enabled> <Days>number-of-days</Days> </RetentionPolicy> </Logging> <HourMetrics> <Version>version-number</Version> <Enabled>true|false</Enabled> <IncludeAPIs>true|false</IncludeAPIs> <RetentionPolicy> <Enabled>true|false</Enabled> <Days>number-of-days</Days> </RetentionPolicy> </HourMetrics> <MinuteMetrics> <Version>version-number</Version> <Enabled>true|false</Enabled> <IncludeAPIs>true|false</IncludeAPIs> <RetentionPolicy> <Enabled>true|false</Enabled> <Days>number-of-days</Days> </RetentionPolicy> </MinuteMetrics> <Cors> <CorsRule> <AllowedOrigins>comma-separated-list-of-allowed-origins</AllowedOrigins> <AllowedMethods>comma-separated-list-of-HTTP-verb</AllowedMethods> <MaxAgeInSeconds>max-caching-age-in-seconds</MaxAgeInSeconds> <ExposedHeaders>comma-seperated-list-of-response-headers</ExposedHeaders> <AllowedHeaders>comma-seperated-list-of-request-headers</AllowedHeaders> </CorsRule> </Cors> </StorageServiceProperties> ] variable[service_properties_element] assign[=] call[name[ETree].fromstring, parameter[name[xml]]] variable[service_properties] assign[=] call[name[ServiceProperties], parameter[]] variable[logging] assign[=] call[name[service_properties_element].find, parameter[constant[Logging]]] if compare[name[logging] is_not constant[None]] begin[:] name[service_properties].logging assign[=] call[name[Logging], parameter[]] name[service_properties].logging.version assign[=] call[name[logging].find, parameter[constant[Version]]].text name[service_properties].logging.delete assign[=] call[name[_bool], parameter[call[name[logging].find, parameter[constant[Delete]]].text]] name[service_properties].logging.read assign[=] call[name[_bool], parameter[call[name[logging].find, parameter[constant[Read]]].text]] name[service_properties].logging.write assign[=] call[name[_bool], parameter[call[name[logging].find, parameter[constant[Write]]].text]] call[name[_convert_xml_to_retention_policy], parameter[call[name[logging].find, parameter[constant[RetentionPolicy]]], name[service_properties].logging.retention_policy]] variable[hour_metrics_element] assign[=] call[name[service_properties_element].find, parameter[constant[HourMetrics]]] if compare[name[hour_metrics_element] is_not constant[None]] begin[:] name[service_properties].hour_metrics assign[=] call[name[Metrics], parameter[]] call[name[_convert_xml_to_metrics], parameter[name[hour_metrics_element], name[service_properties].hour_metrics]] variable[minute_metrics_element] assign[=] call[name[service_properties_element].find, parameter[constant[MinuteMetrics]]] if compare[name[minute_metrics_element] is_not constant[None]] begin[:] name[service_properties].minute_metrics assign[=] call[name[Metrics], parameter[]] call[name[_convert_xml_to_metrics], parameter[name[minute_metrics_element], name[service_properties].minute_metrics]] variable[cors] assign[=] call[name[service_properties_element].find, parameter[constant[Cors]]] if compare[name[cors] is_not constant[None]] begin[:] name[service_properties].cors assign[=] call[name[list], parameter[]] for taget[name[rule]] in starred[call[name[cors].findall, parameter[constant[CorsRule]]]] begin[:] variable[allowed_origins] assign[=] call[call[name[rule].find, parameter[constant[AllowedOrigins]]].text.split, parameter[constant[,]]] variable[allowed_methods] assign[=] call[call[name[rule].find, parameter[constant[AllowedMethods]]].text.split, parameter[constant[,]]] variable[max_age_in_seconds] assign[=] call[name[int], parameter[call[name[rule].find, parameter[constant[MaxAgeInSeconds]]].text]] variable[cors_rule] assign[=] call[name[CorsRule], parameter[name[allowed_origins], name[allowed_methods], name[max_age_in_seconds]]] variable[exposed_headers] assign[=] call[name[rule].find, parameter[constant[ExposedHeaders]]].text if compare[name[exposed_headers] is_not constant[None]] begin[:] name[cors_rule].exposed_headers assign[=] call[name[exposed_headers].split, parameter[constant[,]]] variable[allowed_headers] assign[=] call[name[rule].find, parameter[constant[AllowedHeaders]]].text if compare[name[allowed_headers] is_not constant[None]] begin[:] name[cors_rule].allowed_headers assign[=] call[name[allowed_headers].split, parameter[constant[,]]] call[name[service_properties].cors.append, parameter[name[cors_rule]]] variable[target_version] assign[=] call[name[service_properties_element].find, parameter[constant[DefaultServiceVersion]]] if compare[name[target_version] is_not constant[None]] begin[:] name[service_properties].target_version assign[=] name[target_version].text return[name[service_properties]]
keyword[def] identifier[_convert_xml_to_service_properties] ( identifier[xml] ): literal[string] identifier[service_properties_element] = identifier[ETree] . identifier[fromstring] ( identifier[xml] ) identifier[service_properties] = identifier[ServiceProperties] () identifier[logging] = identifier[service_properties_element] . identifier[find] ( literal[string] ) keyword[if] identifier[logging] keyword[is] keyword[not] keyword[None] : identifier[service_properties] . identifier[logging] = identifier[Logging] () identifier[service_properties] . identifier[logging] . identifier[version] = identifier[logging] . identifier[find] ( literal[string] ). identifier[text] identifier[service_properties] . identifier[logging] . identifier[delete] = identifier[_bool] ( identifier[logging] . identifier[find] ( literal[string] ). identifier[text] ) identifier[service_properties] . identifier[logging] . identifier[read] = identifier[_bool] ( identifier[logging] . identifier[find] ( literal[string] ). identifier[text] ) identifier[service_properties] . identifier[logging] . identifier[write] = identifier[_bool] ( identifier[logging] . identifier[find] ( literal[string] ). identifier[text] ) identifier[_convert_xml_to_retention_policy] ( identifier[logging] . identifier[find] ( literal[string] ), identifier[service_properties] . identifier[logging] . identifier[retention_policy] ) identifier[hour_metrics_element] = identifier[service_properties_element] . identifier[find] ( literal[string] ) keyword[if] identifier[hour_metrics_element] keyword[is] keyword[not] keyword[None] : identifier[service_properties] . identifier[hour_metrics] = identifier[Metrics] () identifier[_convert_xml_to_metrics] ( identifier[hour_metrics_element] , identifier[service_properties] . identifier[hour_metrics] ) identifier[minute_metrics_element] = identifier[service_properties_element] . identifier[find] ( literal[string] ) keyword[if] identifier[minute_metrics_element] keyword[is] keyword[not] keyword[None] : identifier[service_properties] . identifier[minute_metrics] = identifier[Metrics] () identifier[_convert_xml_to_metrics] ( identifier[minute_metrics_element] , identifier[service_properties] . identifier[minute_metrics] ) identifier[cors] = identifier[service_properties_element] . identifier[find] ( literal[string] ) keyword[if] identifier[cors] keyword[is] keyword[not] keyword[None] : identifier[service_properties] . identifier[cors] = identifier[list] () keyword[for] identifier[rule] keyword[in] identifier[cors] . identifier[findall] ( literal[string] ): identifier[allowed_origins] = identifier[rule] . identifier[find] ( literal[string] ). identifier[text] . identifier[split] ( literal[string] ) identifier[allowed_methods] = identifier[rule] . identifier[find] ( literal[string] ). identifier[text] . identifier[split] ( literal[string] ) identifier[max_age_in_seconds] = identifier[int] ( identifier[rule] . identifier[find] ( literal[string] ). identifier[text] ) identifier[cors_rule] = identifier[CorsRule] ( identifier[allowed_origins] , identifier[allowed_methods] , identifier[max_age_in_seconds] ) identifier[exposed_headers] = identifier[rule] . identifier[find] ( literal[string] ). identifier[text] keyword[if] identifier[exposed_headers] keyword[is] keyword[not] keyword[None] : identifier[cors_rule] . identifier[exposed_headers] = identifier[exposed_headers] . identifier[split] ( literal[string] ) identifier[allowed_headers] = identifier[rule] . identifier[find] ( literal[string] ). identifier[text] keyword[if] identifier[allowed_headers] keyword[is] keyword[not] keyword[None] : identifier[cors_rule] . identifier[allowed_headers] = identifier[allowed_headers] . identifier[split] ( literal[string] ) identifier[service_properties] . identifier[cors] . identifier[append] ( identifier[cors_rule] ) identifier[target_version] = identifier[service_properties_element] . identifier[find] ( literal[string] ) keyword[if] identifier[target_version] keyword[is] keyword[not] keyword[None] : identifier[service_properties] . identifier[target_version] = identifier[target_version] . identifier[text] keyword[return] identifier[service_properties]
def _convert_xml_to_service_properties(xml): """ <?xml version="1.0" encoding="utf-8"?> <StorageServiceProperties> <Logging> <Version>version-number</Version> <Delete>true|false</Delete> <Read>true|false</Read> <Write>true|false</Write> <RetentionPolicy> <Enabled>true|false</Enabled> <Days>number-of-days</Days> </RetentionPolicy> </Logging> <HourMetrics> <Version>version-number</Version> <Enabled>true|false</Enabled> <IncludeAPIs>true|false</IncludeAPIs> <RetentionPolicy> <Enabled>true|false</Enabled> <Days>number-of-days</Days> </RetentionPolicy> </HourMetrics> <MinuteMetrics> <Version>version-number</Version> <Enabled>true|false</Enabled> <IncludeAPIs>true|false</IncludeAPIs> <RetentionPolicy> <Enabled>true|false</Enabled> <Days>number-of-days</Days> </RetentionPolicy> </MinuteMetrics> <Cors> <CorsRule> <AllowedOrigins>comma-separated-list-of-allowed-origins</AllowedOrigins> <AllowedMethods>comma-separated-list-of-HTTP-verb</AllowedMethods> <MaxAgeInSeconds>max-caching-age-in-seconds</MaxAgeInSeconds> <ExposedHeaders>comma-seperated-list-of-response-headers</ExposedHeaders> <AllowedHeaders>comma-seperated-list-of-request-headers</AllowedHeaders> </CorsRule> </Cors> </StorageServiceProperties> """ service_properties_element = ETree.fromstring(xml) service_properties = ServiceProperties() # Logging logging = service_properties_element.find('Logging') if logging is not None: service_properties.logging = Logging() service_properties.logging.version = logging.find('Version').text service_properties.logging.delete = _bool(logging.find('Delete').text) service_properties.logging.read = _bool(logging.find('Read').text) service_properties.logging.write = _bool(logging.find('Write').text) _convert_xml_to_retention_policy(logging.find('RetentionPolicy'), service_properties.logging.retention_policy) # depends on [control=['if'], data=['logging']] # HourMetrics hour_metrics_element = service_properties_element.find('HourMetrics') if hour_metrics_element is not None: service_properties.hour_metrics = Metrics() _convert_xml_to_metrics(hour_metrics_element, service_properties.hour_metrics) # depends on [control=['if'], data=['hour_metrics_element']] # MinuteMetrics minute_metrics_element = service_properties_element.find('MinuteMetrics') if minute_metrics_element is not None: service_properties.minute_metrics = Metrics() _convert_xml_to_metrics(minute_metrics_element, service_properties.minute_metrics) # depends on [control=['if'], data=['minute_metrics_element']] # CORS cors = service_properties_element.find('Cors') if cors is not None: service_properties.cors = list() for rule in cors.findall('CorsRule'): allowed_origins = rule.find('AllowedOrigins').text.split(',') allowed_methods = rule.find('AllowedMethods').text.split(',') max_age_in_seconds = int(rule.find('MaxAgeInSeconds').text) cors_rule = CorsRule(allowed_origins, allowed_methods, max_age_in_seconds) exposed_headers = rule.find('ExposedHeaders').text if exposed_headers is not None: cors_rule.exposed_headers = exposed_headers.split(',') # depends on [control=['if'], data=['exposed_headers']] allowed_headers = rule.find('AllowedHeaders').text if allowed_headers is not None: cors_rule.allowed_headers = allowed_headers.split(',') # depends on [control=['if'], data=['allowed_headers']] service_properties.cors.append(cors_rule) # depends on [control=['for'], data=['rule']] # depends on [control=['if'], data=['cors']] # Target version target_version = service_properties_element.find('DefaultServiceVersion') if target_version is not None: service_properties.target_version = target_version.text # depends on [control=['if'], data=['target_version']] return service_properties
def _consolidate_repo_sources(sources): ''' Consolidate APT sources. ''' if not isinstance(sources, sourceslist.SourcesList): raise TypeError( '\'{0}\' not a \'{1}\''.format( type(sources), sourceslist.SourcesList ) ) consolidated = {} delete_files = set() base_file = sourceslist.SourceEntry('').file repos = [s for s in sources.list if not s.invalid] for repo in repos: repo.uri = repo.uri.rstrip('/') # future lint: disable=blacklisted-function key = str((getattr(repo, 'architectures', []), repo.disabled, repo.type, repo.uri, repo.dist)) # future lint: enable=blacklisted-function if key in consolidated: combined = consolidated[key] combined_comps = set(repo.comps).union(set(combined.comps)) consolidated[key].comps = list(combined_comps) else: consolidated[key] = sourceslist.SourceEntry(salt.utils.pkg.deb.strip_uri(repo.line)) if repo.file != base_file: delete_files.add(repo.file) sources.list = list(consolidated.values()) sources.save() for file_ in delete_files: try: os.remove(file_) except OSError: pass return sources
def function[_consolidate_repo_sources, parameter[sources]]: constant[ Consolidate APT sources. ] if <ast.UnaryOp object at 0x7da1b20468f0> begin[:] <ast.Raise object at 0x7da1b2045d50> variable[consolidated] assign[=] dictionary[[], []] variable[delete_files] assign[=] call[name[set], parameter[]] variable[base_file] assign[=] call[name[sourceslist].SourceEntry, parameter[constant[]]].file variable[repos] assign[=] <ast.ListComp object at 0x7da1b21128f0> for taget[name[repo]] in starred[name[repos]] begin[:] name[repo].uri assign[=] call[name[repo].uri.rstrip, parameter[constant[/]]] variable[key] assign[=] call[name[str], parameter[tuple[[<ast.Call object at 0x7da1b2110b20>, <ast.Attribute object at 0x7da1b2110070>, <ast.Attribute object at 0x7da1b2113b20>, <ast.Attribute object at 0x7da1b1cccfa0>, <ast.Attribute object at 0x7da1b1cccd90>]]]] if compare[name[key] in name[consolidated]] begin[:] variable[combined] assign[=] call[name[consolidated]][name[key]] variable[combined_comps] assign[=] call[call[name[set], parameter[name[repo].comps]].union, parameter[call[name[set], parameter[name[combined].comps]]]] call[name[consolidated]][name[key]].comps assign[=] call[name[list], parameter[name[combined_comps]]] if compare[name[repo].file not_equal[!=] name[base_file]] begin[:] call[name[delete_files].add, parameter[name[repo].file]] name[sources].list assign[=] call[name[list], parameter[call[name[consolidated].values, parameter[]]]] call[name[sources].save, parameter[]] for taget[name[file_]] in starred[name[delete_files]] begin[:] <ast.Try object at 0x7da1b1ccdc30> return[name[sources]]
keyword[def] identifier[_consolidate_repo_sources] ( identifier[sources] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[sources] , identifier[sourceslist] . identifier[SourcesList] ): keyword[raise] identifier[TypeError] ( literal[string] . identifier[format] ( identifier[type] ( identifier[sources] ), identifier[sourceslist] . identifier[SourcesList] ) ) identifier[consolidated] ={} identifier[delete_files] = identifier[set] () identifier[base_file] = identifier[sourceslist] . identifier[SourceEntry] ( literal[string] ). identifier[file] identifier[repos] =[ identifier[s] keyword[for] identifier[s] keyword[in] identifier[sources] . identifier[list] keyword[if] keyword[not] identifier[s] . identifier[invalid] ] keyword[for] identifier[repo] keyword[in] identifier[repos] : identifier[repo] . identifier[uri] = identifier[repo] . identifier[uri] . identifier[rstrip] ( literal[string] ) identifier[key] = identifier[str] (( identifier[getattr] ( identifier[repo] , literal[string] ,[]), identifier[repo] . identifier[disabled] , identifier[repo] . identifier[type] , identifier[repo] . identifier[uri] , identifier[repo] . identifier[dist] )) keyword[if] identifier[key] keyword[in] identifier[consolidated] : identifier[combined] = identifier[consolidated] [ identifier[key] ] identifier[combined_comps] = identifier[set] ( identifier[repo] . identifier[comps] ). identifier[union] ( identifier[set] ( identifier[combined] . identifier[comps] )) identifier[consolidated] [ identifier[key] ]. identifier[comps] = identifier[list] ( identifier[combined_comps] ) keyword[else] : identifier[consolidated] [ identifier[key] ]= identifier[sourceslist] . identifier[SourceEntry] ( identifier[salt] . identifier[utils] . identifier[pkg] . identifier[deb] . identifier[strip_uri] ( identifier[repo] . identifier[line] )) keyword[if] identifier[repo] . identifier[file] != identifier[base_file] : identifier[delete_files] . identifier[add] ( identifier[repo] . identifier[file] ) identifier[sources] . identifier[list] = identifier[list] ( identifier[consolidated] . identifier[values] ()) identifier[sources] . identifier[save] () keyword[for] identifier[file_] keyword[in] identifier[delete_files] : keyword[try] : identifier[os] . identifier[remove] ( identifier[file_] ) keyword[except] identifier[OSError] : keyword[pass] keyword[return] identifier[sources]
def _consolidate_repo_sources(sources): """ Consolidate APT sources. """ if not isinstance(sources, sourceslist.SourcesList): raise TypeError("'{0}' not a '{1}'".format(type(sources), sourceslist.SourcesList)) # depends on [control=['if'], data=[]] consolidated = {} delete_files = set() base_file = sourceslist.SourceEntry('').file repos = [s for s in sources.list if not s.invalid] for repo in repos: repo.uri = repo.uri.rstrip('/') # future lint: disable=blacklisted-function key = str((getattr(repo, 'architectures', []), repo.disabled, repo.type, repo.uri, repo.dist)) # future lint: enable=blacklisted-function if key in consolidated: combined = consolidated[key] combined_comps = set(repo.comps).union(set(combined.comps)) consolidated[key].comps = list(combined_comps) # depends on [control=['if'], data=['key', 'consolidated']] else: consolidated[key] = sourceslist.SourceEntry(salt.utils.pkg.deb.strip_uri(repo.line)) if repo.file != base_file: delete_files.add(repo.file) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['repo']] sources.list = list(consolidated.values()) sources.save() for file_ in delete_files: try: os.remove(file_) # depends on [control=['try'], data=[]] except OSError: pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['file_']] return sources
def create_header(self, depth): """Create and return a widget that will be used as a header for the given depth Override this method if you want to have header widgets. The default implementation returns None. You can return None if you do not want a header for the given depth :param depth: the depth level :type depth: int :returns: a Widget that is used for the header or None :rtype: QtGui.QWidget|None :raises: None """ if not (depth >= 0 and depth < len(self._headertexts)): return txt = self._headertexts[depth] if txt is None: return lbl = QtGui.QLabel(txt, self) return lbl
def function[create_header, parameter[self, depth]]: constant[Create and return a widget that will be used as a header for the given depth Override this method if you want to have header widgets. The default implementation returns None. You can return None if you do not want a header for the given depth :param depth: the depth level :type depth: int :returns: a Widget that is used for the header or None :rtype: QtGui.QWidget|None :raises: None ] if <ast.UnaryOp object at 0x7da20c9931f0> begin[:] return[None] variable[txt] assign[=] call[name[self]._headertexts][name[depth]] if compare[name[txt] is constant[None]] begin[:] return[None] variable[lbl] assign[=] call[name[QtGui].QLabel, parameter[name[txt], name[self]]] return[name[lbl]]
keyword[def] identifier[create_header] ( identifier[self] , identifier[depth] ): literal[string] keyword[if] keyword[not] ( identifier[depth] >= literal[int] keyword[and] identifier[depth] < identifier[len] ( identifier[self] . identifier[_headertexts] )): keyword[return] identifier[txt] = identifier[self] . identifier[_headertexts] [ identifier[depth] ] keyword[if] identifier[txt] keyword[is] keyword[None] : keyword[return] identifier[lbl] = identifier[QtGui] . identifier[QLabel] ( identifier[txt] , identifier[self] ) keyword[return] identifier[lbl]
def create_header(self, depth): """Create and return a widget that will be used as a header for the given depth Override this method if you want to have header widgets. The default implementation returns None. You can return None if you do not want a header for the given depth :param depth: the depth level :type depth: int :returns: a Widget that is used for the header or None :rtype: QtGui.QWidget|None :raises: None """ if not (depth >= 0 and depth < len(self._headertexts)): return # depends on [control=['if'], data=[]] txt = self._headertexts[depth] if txt is None: return # depends on [control=['if'], data=[]] lbl = QtGui.QLabel(txt, self) return lbl
def wait_for_completion(self, timeout): """Waits until the task is done (including all sub-operations) with a given timeout in milliseconds; specify -1 for an indefinite wait. Note that the VirtualBox/XPCOM/COM/native event queues of the calling thread are not processed while waiting. Neglecting event queues may have dire consequences (degrade performance, resource hogs, deadlocks, etc.), this is specially so for the main thread on platforms using XPCOM. Callers are advised wait for short periods and service their event queues between calls, or to create a worker thread to do the waiting. in timeout of type int Maximum time in milliseconds to wait or -1 to wait indefinitely. raises :class:`VBoxErrorIprtError` Failed to wait for task completion. """ if not isinstance(timeout, baseinteger): raise TypeError("timeout can only be an instance of type baseinteger") self._call("waitForCompletion", in_p=[timeout])
def function[wait_for_completion, parameter[self, timeout]]: constant[Waits until the task is done (including all sub-operations) with a given timeout in milliseconds; specify -1 for an indefinite wait. Note that the VirtualBox/XPCOM/COM/native event queues of the calling thread are not processed while waiting. Neglecting event queues may have dire consequences (degrade performance, resource hogs, deadlocks, etc.), this is specially so for the main thread on platforms using XPCOM. Callers are advised wait for short periods and service their event queues between calls, or to create a worker thread to do the waiting. in timeout of type int Maximum time in milliseconds to wait or -1 to wait indefinitely. raises :class:`VBoxErrorIprtError` Failed to wait for task completion. ] if <ast.UnaryOp object at 0x7da18eb56c20> begin[:] <ast.Raise object at 0x7da18eb54f70> call[name[self]._call, parameter[constant[waitForCompletion]]]
keyword[def] identifier[wait_for_completion] ( identifier[self] , identifier[timeout] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[timeout] , identifier[baseinteger] ): keyword[raise] identifier[TypeError] ( literal[string] ) identifier[self] . identifier[_call] ( literal[string] , identifier[in_p] =[ identifier[timeout] ])
def wait_for_completion(self, timeout): """Waits until the task is done (including all sub-operations) with a given timeout in milliseconds; specify -1 for an indefinite wait. Note that the VirtualBox/XPCOM/COM/native event queues of the calling thread are not processed while waiting. Neglecting event queues may have dire consequences (degrade performance, resource hogs, deadlocks, etc.), this is specially so for the main thread on platforms using XPCOM. Callers are advised wait for short periods and service their event queues between calls, or to create a worker thread to do the waiting. in timeout of type int Maximum time in milliseconds to wait or -1 to wait indefinitely. raises :class:`VBoxErrorIprtError` Failed to wait for task completion. """ if not isinstance(timeout, baseinteger): raise TypeError('timeout can only be an instance of type baseinteger') # depends on [control=['if'], data=[]] self._call('waitForCompletion', in_p=[timeout])
def get_conf_file(self): """ Get config from local config file, first try cache, then fallback. """ for conf_file in [self.collection_rules_file, self.fallback_file]: logger.debug("trying to read conf from: " + conf_file) conf = self.try_disk(conf_file, self.gpg) if not conf: continue version = conf.get('version', None) if version is None: raise ValueError("ERROR: Could not find version in json") conf['file'] = conf_file logger.debug("Success reading config") logger.debug(json.dumps(conf)) return conf raise ValueError("ERROR: Unable to download conf or read it from disk!")
def function[get_conf_file, parameter[self]]: constant[ Get config from local config file, first try cache, then fallback. ] for taget[name[conf_file]] in starred[list[[<ast.Attribute object at 0x7da18dc98b50>, <ast.Attribute object at 0x7da18dc996f0>]]] begin[:] call[name[logger].debug, parameter[binary_operation[constant[trying to read conf from: ] + name[conf_file]]]] variable[conf] assign[=] call[name[self].try_disk, parameter[name[conf_file], name[self].gpg]] if <ast.UnaryOp object at 0x7da18dc9a230> begin[:] continue variable[version] assign[=] call[name[conf].get, parameter[constant[version], constant[None]]] if compare[name[version] is constant[None]] begin[:] <ast.Raise object at 0x7da18dc98670> call[name[conf]][constant[file]] assign[=] name[conf_file] call[name[logger].debug, parameter[constant[Success reading config]]] call[name[logger].debug, parameter[call[name[json].dumps, parameter[name[conf]]]]] return[name[conf]] <ast.Raise object at 0x7da18dc9b5e0>
keyword[def] identifier[get_conf_file] ( identifier[self] ): literal[string] keyword[for] identifier[conf_file] keyword[in] [ identifier[self] . identifier[collection_rules_file] , identifier[self] . identifier[fallback_file] ]: identifier[logger] . identifier[debug] ( literal[string] + identifier[conf_file] ) identifier[conf] = identifier[self] . identifier[try_disk] ( identifier[conf_file] , identifier[self] . identifier[gpg] ) keyword[if] keyword[not] identifier[conf] : keyword[continue] identifier[version] = identifier[conf] . identifier[get] ( literal[string] , keyword[None] ) keyword[if] identifier[version] keyword[is] keyword[None] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[conf] [ literal[string] ]= identifier[conf_file] identifier[logger] . identifier[debug] ( literal[string] ) identifier[logger] . identifier[debug] ( identifier[json] . identifier[dumps] ( identifier[conf] )) keyword[return] identifier[conf] keyword[raise] identifier[ValueError] ( literal[string] )
def get_conf_file(self): """ Get config from local config file, first try cache, then fallback. """ for conf_file in [self.collection_rules_file, self.fallback_file]: logger.debug('trying to read conf from: ' + conf_file) conf = self.try_disk(conf_file, self.gpg) if not conf: continue # depends on [control=['if'], data=[]] version = conf.get('version', None) if version is None: raise ValueError('ERROR: Could not find version in json') # depends on [control=['if'], data=[]] conf['file'] = conf_file logger.debug('Success reading config') logger.debug(json.dumps(conf)) return conf # depends on [control=['for'], data=['conf_file']] raise ValueError('ERROR: Unable to download conf or read it from disk!')
def cancel_order(self, order_param): """Cancel an open order. Parameters ---------- order_param : str or Order The order_id or order object to cancel. """ order_id = order_param if isinstance(order_param, zipline.protocol.Order): order_id = order_param.id self.blotter.cancel(order_id)
def function[cancel_order, parameter[self, order_param]]: constant[Cancel an open order. Parameters ---------- order_param : str or Order The order_id or order object to cancel. ] variable[order_id] assign[=] name[order_param] if call[name[isinstance], parameter[name[order_param], name[zipline].protocol.Order]] begin[:] variable[order_id] assign[=] name[order_param].id call[name[self].blotter.cancel, parameter[name[order_id]]]
keyword[def] identifier[cancel_order] ( identifier[self] , identifier[order_param] ): literal[string] identifier[order_id] = identifier[order_param] keyword[if] identifier[isinstance] ( identifier[order_param] , identifier[zipline] . identifier[protocol] . identifier[Order] ): identifier[order_id] = identifier[order_param] . identifier[id] identifier[self] . identifier[blotter] . identifier[cancel] ( identifier[order_id] )
def cancel_order(self, order_param): """Cancel an open order. Parameters ---------- order_param : str or Order The order_id or order object to cancel. """ order_id = order_param if isinstance(order_param, zipline.protocol.Order): order_id = order_param.id # depends on [control=['if'], data=[]] self.blotter.cancel(order_id)
def _cas_1(self): '''1 - The desired structure is entirely contained into one image.''' lonc = self._format_lon(self.lonm) latc = self._format_lat(self.latm) img = self._format_name_map(lonc, latc) img_map = BinaryTable(img, self.path_pdsfiles) return img_map.extract_grid(self.lonm, self.lonM, self.latm, self.latM)
def function[_cas_1, parameter[self]]: constant[1 - The desired structure is entirely contained into one image.] variable[lonc] assign[=] call[name[self]._format_lon, parameter[name[self].lonm]] variable[latc] assign[=] call[name[self]._format_lat, parameter[name[self].latm]] variable[img] assign[=] call[name[self]._format_name_map, parameter[name[lonc], name[latc]]] variable[img_map] assign[=] call[name[BinaryTable], parameter[name[img], name[self].path_pdsfiles]] return[call[name[img_map].extract_grid, parameter[name[self].lonm, name[self].lonM, name[self].latm, name[self].latM]]]
keyword[def] identifier[_cas_1] ( identifier[self] ): literal[string] identifier[lonc] = identifier[self] . identifier[_format_lon] ( identifier[self] . identifier[lonm] ) identifier[latc] = identifier[self] . identifier[_format_lat] ( identifier[self] . identifier[latm] ) identifier[img] = identifier[self] . identifier[_format_name_map] ( identifier[lonc] , identifier[latc] ) identifier[img_map] = identifier[BinaryTable] ( identifier[img] , identifier[self] . identifier[path_pdsfiles] ) keyword[return] identifier[img_map] . identifier[extract_grid] ( identifier[self] . identifier[lonm] , identifier[self] . identifier[lonM] , identifier[self] . identifier[latm] , identifier[self] . identifier[latM] )
def _cas_1(self): """1 - The desired structure is entirely contained into one image.""" lonc = self._format_lon(self.lonm) latc = self._format_lat(self.latm) img = self._format_name_map(lonc, latc) img_map = BinaryTable(img, self.path_pdsfiles) return img_map.extract_grid(self.lonm, self.lonM, self.latm, self.latM)
def export_to_xml(self, block, xmlfile): """ Export the block to XML, writing the XML to `xmlfile`. """ root = etree.Element("unknown_root", nsmap=XML_NAMESPACES) tree = etree.ElementTree(root) block.add_xml_to_node(root) # write asides as children for aside in self.get_asides(block): if aside.needs_serialization(): aside_node = etree.Element("unknown_root", nsmap=XML_NAMESPACES) aside.add_xml_to_node(aside_node) block.append(aside_node) tree.write(xmlfile, xml_declaration=True, pretty_print=True, encoding='utf-8')
def function[export_to_xml, parameter[self, block, xmlfile]]: constant[ Export the block to XML, writing the XML to `xmlfile`. ] variable[root] assign[=] call[name[etree].Element, parameter[constant[unknown_root]]] variable[tree] assign[=] call[name[etree].ElementTree, parameter[name[root]]] call[name[block].add_xml_to_node, parameter[name[root]]] for taget[name[aside]] in starred[call[name[self].get_asides, parameter[name[block]]]] begin[:] if call[name[aside].needs_serialization, parameter[]] begin[:] variable[aside_node] assign[=] call[name[etree].Element, parameter[constant[unknown_root]]] call[name[aside].add_xml_to_node, parameter[name[aside_node]]] call[name[block].append, parameter[name[aside_node]]] call[name[tree].write, parameter[name[xmlfile]]]
keyword[def] identifier[export_to_xml] ( identifier[self] , identifier[block] , identifier[xmlfile] ): literal[string] identifier[root] = identifier[etree] . identifier[Element] ( literal[string] , identifier[nsmap] = identifier[XML_NAMESPACES] ) identifier[tree] = identifier[etree] . identifier[ElementTree] ( identifier[root] ) identifier[block] . identifier[add_xml_to_node] ( identifier[root] ) keyword[for] identifier[aside] keyword[in] identifier[self] . identifier[get_asides] ( identifier[block] ): keyword[if] identifier[aside] . identifier[needs_serialization] (): identifier[aside_node] = identifier[etree] . identifier[Element] ( literal[string] , identifier[nsmap] = identifier[XML_NAMESPACES] ) identifier[aside] . identifier[add_xml_to_node] ( identifier[aside_node] ) identifier[block] . identifier[append] ( identifier[aside_node] ) identifier[tree] . identifier[write] ( identifier[xmlfile] , identifier[xml_declaration] = keyword[True] , identifier[pretty_print] = keyword[True] , identifier[encoding] = literal[string] )
def export_to_xml(self, block, xmlfile): """ Export the block to XML, writing the XML to `xmlfile`. """ root = etree.Element('unknown_root', nsmap=XML_NAMESPACES) tree = etree.ElementTree(root) block.add_xml_to_node(root) # write asides as children for aside in self.get_asides(block): if aside.needs_serialization(): aside_node = etree.Element('unknown_root', nsmap=XML_NAMESPACES) aside.add_xml_to_node(aside_node) block.append(aside_node) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['aside']] tree.write(xmlfile, xml_declaration=True, pretty_print=True, encoding='utf-8')
def getInstitutions(self, tags = None, seperator = ";", _getTag = False): """Returns a list with the names of the institution. The optional arguments are ignored # Returns `list [str]` > A list with 1 entry the name of the institution """ if tags is None: tags = [] elif isinstance(tags, str): tags = [tags] for k in self.keys(): if 'institution' in k.lower() and k not in tags: tags.append(k) return super().getInvestigators(tags = tags, seperator = seperator, _getTag = _getTag)
def function[getInstitutions, parameter[self, tags, seperator, _getTag]]: constant[Returns a list with the names of the institution. The optional arguments are ignored # Returns `list [str]` > A list with 1 entry the name of the institution ] if compare[name[tags] is constant[None]] begin[:] variable[tags] assign[=] list[[]] for taget[name[k]] in starred[call[name[self].keys, parameter[]]] begin[:] if <ast.BoolOp object at 0x7da1b0ef43a0> begin[:] call[name[tags].append, parameter[name[k]]] return[call[call[name[super], parameter[]].getInvestigators, parameter[]]]
keyword[def] identifier[getInstitutions] ( identifier[self] , identifier[tags] = keyword[None] , identifier[seperator] = literal[string] , identifier[_getTag] = keyword[False] ): literal[string] keyword[if] identifier[tags] keyword[is] keyword[None] : identifier[tags] =[] keyword[elif] identifier[isinstance] ( identifier[tags] , identifier[str] ): identifier[tags] =[ identifier[tags] ] keyword[for] identifier[k] keyword[in] identifier[self] . identifier[keys] (): keyword[if] literal[string] keyword[in] identifier[k] . identifier[lower] () keyword[and] identifier[k] keyword[not] keyword[in] identifier[tags] : identifier[tags] . identifier[append] ( identifier[k] ) keyword[return] identifier[super] (). identifier[getInvestigators] ( identifier[tags] = identifier[tags] , identifier[seperator] = identifier[seperator] , identifier[_getTag] = identifier[_getTag] )
def getInstitutions(self, tags=None, seperator=';', _getTag=False): """Returns a list with the names of the institution. The optional arguments are ignored # Returns `list [str]` > A list with 1 entry the name of the institution """ if tags is None: tags = [] # depends on [control=['if'], data=['tags']] elif isinstance(tags, str): tags = [tags] # depends on [control=['if'], data=[]] for k in self.keys(): if 'institution' in k.lower() and k not in tags: tags.append(k) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['k']] return super().getInvestigators(tags=tags, seperator=seperator, _getTag=_getTag)
def return_hdr(self): """Return the header for further use. Returns ------- subj_id : str subject identification code start_time : datetime start time of the dataset s_freq : float sampling frequency chan_name : list of str list of all the channels n_samples : int number of samples in the dataset orig : dict additional information taken directly from the header Notes ----- It only reads hdf5 matlab files and the VARiable needs to be called 'data' h5py is necessary for this function """ # fieldtrip does not have this information orig = dict() subj_id = str() start_time = datetime.fromordinal(1) # fake try: ft_data = loadmat(self.filename, struct_as_record=True, squeeze_me=True) if VAR not in ft_data: raise KeyError('Save the FieldTrip variable as ''{}''' ''.format(VAR)) ft_data = ft_data[VAR] s_freq = ft_data['fsample'].astype('float64').item() n_samples = ft_data['trial'].item().shape[1] chan_name = list(ft_data['label'].item()) except NotImplementedError: with File(self.filename) as f: if VAR not in f.keys(): raise KeyError('Save the FieldTrip variable as ''{}''' ''.format(VAR)) s_freq = int(f[VAR]['fsample'].value.squeeze()) chan_name = read_hdf5_chan_name(f, f[VAR]['label']) n_samples = int(around(f[f[VAR]['trial'][0].item()].shape[0])) return subj_id, start_time, s_freq, chan_name, n_samples, orig
def function[return_hdr, parameter[self]]: constant[Return the header for further use. Returns ------- subj_id : str subject identification code start_time : datetime start time of the dataset s_freq : float sampling frequency chan_name : list of str list of all the channels n_samples : int number of samples in the dataset orig : dict additional information taken directly from the header Notes ----- It only reads hdf5 matlab files and the VARiable needs to be called 'data' h5py is necessary for this function ] variable[orig] assign[=] call[name[dict], parameter[]] variable[subj_id] assign[=] call[name[str], parameter[]] variable[start_time] assign[=] call[name[datetime].fromordinal, parameter[constant[1]]] <ast.Try object at 0x7da1b0e71870> return[tuple[[<ast.Name object at 0x7da1b0e8e6b0>, <ast.Name object at 0x7da1b0e8ea40>, <ast.Name object at 0x7da1b0e8c730>, <ast.Name object at 0x7da1b0e8f820>, <ast.Name object at 0x7da1b0e8e7a0>, <ast.Name object at 0x7da1b0e8f1c0>]]]
keyword[def] identifier[return_hdr] ( identifier[self] ): literal[string] identifier[orig] = identifier[dict] () identifier[subj_id] = identifier[str] () identifier[start_time] = identifier[datetime] . identifier[fromordinal] ( literal[int] ) keyword[try] : identifier[ft_data] = identifier[loadmat] ( identifier[self] . identifier[filename] , identifier[struct_as_record] = keyword[True] , identifier[squeeze_me] = keyword[True] ) keyword[if] identifier[VAR] keyword[not] keyword[in] identifier[ft_data] : keyword[raise] identifier[KeyError] ( literal[string] literal[string] literal[string] literal[string] . identifier[format] ( identifier[VAR] )) identifier[ft_data] = identifier[ft_data] [ identifier[VAR] ] identifier[s_freq] = identifier[ft_data] [ literal[string] ]. identifier[astype] ( literal[string] ). identifier[item] () identifier[n_samples] = identifier[ft_data] [ literal[string] ]. identifier[item] (). identifier[shape] [ literal[int] ] identifier[chan_name] = identifier[list] ( identifier[ft_data] [ literal[string] ]. identifier[item] ()) keyword[except] identifier[NotImplementedError] : keyword[with] identifier[File] ( identifier[self] . identifier[filename] ) keyword[as] identifier[f] : keyword[if] identifier[VAR] keyword[not] keyword[in] identifier[f] . identifier[keys] (): keyword[raise] identifier[KeyError] ( literal[string] literal[string] literal[string] literal[string] . identifier[format] ( identifier[VAR] )) identifier[s_freq] = identifier[int] ( identifier[f] [ identifier[VAR] ][ literal[string] ]. identifier[value] . identifier[squeeze] ()) identifier[chan_name] = identifier[read_hdf5_chan_name] ( identifier[f] , identifier[f] [ identifier[VAR] ][ literal[string] ]) identifier[n_samples] = identifier[int] ( identifier[around] ( identifier[f] [ identifier[f] [ identifier[VAR] ][ literal[string] ][ literal[int] ]. identifier[item] ()]. identifier[shape] [ literal[int] ])) keyword[return] identifier[subj_id] , identifier[start_time] , identifier[s_freq] , identifier[chan_name] , identifier[n_samples] , identifier[orig]
def return_hdr(self): """Return the header for further use. Returns ------- subj_id : str subject identification code start_time : datetime start time of the dataset s_freq : float sampling frequency chan_name : list of str list of all the channels n_samples : int number of samples in the dataset orig : dict additional information taken directly from the header Notes ----- It only reads hdf5 matlab files and the VARiable needs to be called 'data' h5py is necessary for this function """ # fieldtrip does not have this information orig = dict() subj_id = str() start_time = datetime.fromordinal(1) # fake try: ft_data = loadmat(self.filename, struct_as_record=True, squeeze_me=True) if VAR not in ft_data: raise KeyError('Save the FieldTrip variable as {}'.format(VAR)) # depends on [control=['if'], data=['VAR']] ft_data = ft_data[VAR] s_freq = ft_data['fsample'].astype('float64').item() n_samples = ft_data['trial'].item().shape[1] chan_name = list(ft_data['label'].item()) # depends on [control=['try'], data=[]] except NotImplementedError: with File(self.filename) as f: if VAR not in f.keys(): raise KeyError('Save the FieldTrip variable as {}'.format(VAR)) # depends on [control=['if'], data=['VAR']] s_freq = int(f[VAR]['fsample'].value.squeeze()) chan_name = read_hdf5_chan_name(f, f[VAR]['label']) n_samples = int(around(f[f[VAR]['trial'][0].item()].shape[0])) # depends on [control=['with'], data=['f']] # depends on [control=['except'], data=[]] return (subj_id, start_time, s_freq, chan_name, n_samples, orig)
def _calc_sfnr(volume, mask, ): """ Calculate the the SFNR of a volume Calculates the Signal to Fluctuation Noise Ratio, the mean divided by the detrended standard deviation of each brain voxel. Based on Friedman and Glover, 2006 Parameters ---------- volume : 4d array, float Take a volume time series mask : 3d array, binary A binary mask the same size as the volume Returns ------- snr : float
 The SFNR of the volume """ # Make a matrix of brain voxels by time brain_voxels = volume[mask > 0] # Take the means of each voxel over time mean_voxels = np.nanmean(brain_voxels, 1) # Detrend (second order polynomial) the voxels over time and then # calculate the standard deviation. order = 2 seq = np.linspace(1, brain_voxels.shape[1], brain_voxels.shape[1]) detrend_poly = np.polyfit(seq, brain_voxels.transpose(), order) # Detrend for each voxel detrend_voxels = np.zeros(brain_voxels.shape) for voxel in range(brain_voxels.shape[0]): trend = detrend_poly[0, voxel] * seq ** 2 + detrend_poly[1, voxel] * \ seq + detrend_poly[2, voxel] detrend_voxels[voxel, :] = brain_voxels[voxel, :] - trend std_voxels = np.nanstd(detrend_voxels, 1) # Calculate the sfnr of all voxels across the brain sfnr_voxels = mean_voxels / std_voxels # Return the average sfnr return np.mean(sfnr_voxels)
def function[_calc_sfnr, parameter[volume, mask]]: constant[ Calculate the the SFNR of a volume Calculates the Signal to Fluctuation Noise Ratio, the mean divided by the detrended standard deviation of each brain voxel. Based on Friedman and Glover, 2006 Parameters ---------- volume : 4d array, float Take a volume time series mask : 3d array, binary A binary mask the same size as the volume Returns ------- snr : float
 The SFNR of the volume ] variable[brain_voxels] assign[=] call[name[volume]][compare[name[mask] greater[>] constant[0]]] variable[mean_voxels] assign[=] call[name[np].nanmean, parameter[name[brain_voxels], constant[1]]] variable[order] assign[=] constant[2] variable[seq] assign[=] call[name[np].linspace, parameter[constant[1], call[name[brain_voxels].shape][constant[1]], call[name[brain_voxels].shape][constant[1]]]] variable[detrend_poly] assign[=] call[name[np].polyfit, parameter[name[seq], call[name[brain_voxels].transpose, parameter[]], name[order]]] variable[detrend_voxels] assign[=] call[name[np].zeros, parameter[name[brain_voxels].shape]] for taget[name[voxel]] in starred[call[name[range], parameter[call[name[brain_voxels].shape][constant[0]]]]] begin[:] variable[trend] assign[=] binary_operation[binary_operation[binary_operation[call[name[detrend_poly]][tuple[[<ast.Constant object at 0x7da1b0731840>, <ast.Name object at 0x7da1b0731870>]]] * binary_operation[name[seq] ** constant[2]]] + binary_operation[call[name[detrend_poly]][tuple[[<ast.Constant object at 0x7da1b0733e20>, <ast.Name object at 0x7da1b0733df0>]]] * name[seq]]] + call[name[detrend_poly]][tuple[[<ast.Constant object at 0x7da1b0733d00>, <ast.Name object at 0x7da1b0733cd0>]]]] call[name[detrend_voxels]][tuple[[<ast.Name object at 0x7da1b0733be0>, <ast.Slice object at 0x7da1b0733bb0>]]] assign[=] binary_operation[call[name[brain_voxels]][tuple[[<ast.Name object at 0x7da1b0733ac0>, <ast.Slice object at 0x7da1b0733a90>]]] - name[trend]] variable[std_voxels] assign[=] call[name[np].nanstd, parameter[name[detrend_voxels], constant[1]]] variable[sfnr_voxels] assign[=] binary_operation[name[mean_voxels] / name[std_voxels]] return[call[name[np].mean, parameter[name[sfnr_voxels]]]]
keyword[def] identifier[_calc_sfnr] ( identifier[volume] , identifier[mask] , ): literal[string] identifier[brain_voxels] = identifier[volume] [ identifier[mask] > literal[int] ] identifier[mean_voxels] = identifier[np] . identifier[nanmean] ( identifier[brain_voxels] , literal[int] ) identifier[order] = literal[int] identifier[seq] = identifier[np] . identifier[linspace] ( literal[int] , identifier[brain_voxels] . identifier[shape] [ literal[int] ], identifier[brain_voxels] . identifier[shape] [ literal[int] ]) identifier[detrend_poly] = identifier[np] . identifier[polyfit] ( identifier[seq] , identifier[brain_voxels] . identifier[transpose] (), identifier[order] ) identifier[detrend_voxels] = identifier[np] . identifier[zeros] ( identifier[brain_voxels] . identifier[shape] ) keyword[for] identifier[voxel] keyword[in] identifier[range] ( identifier[brain_voxels] . identifier[shape] [ literal[int] ]): identifier[trend] = identifier[detrend_poly] [ literal[int] , identifier[voxel] ]* identifier[seq] ** literal[int] + identifier[detrend_poly] [ literal[int] , identifier[voxel] ]* identifier[seq] + identifier[detrend_poly] [ literal[int] , identifier[voxel] ] identifier[detrend_voxels] [ identifier[voxel] ,:]= identifier[brain_voxels] [ identifier[voxel] ,:]- identifier[trend] identifier[std_voxels] = identifier[np] . identifier[nanstd] ( identifier[detrend_voxels] , literal[int] ) identifier[sfnr_voxels] = identifier[mean_voxels] / identifier[std_voxels] keyword[return] identifier[np] . identifier[mean] ( identifier[sfnr_voxels] )
def _calc_sfnr(volume, mask): """ Calculate the the SFNR of a volume Calculates the Signal to Fluctuation Noise Ratio, the mean divided by the detrended standard deviation of each brain voxel. Based on Friedman and Glover, 2006 Parameters ---------- volume : 4d array, float Take a volume time series mask : 3d array, binary A binary mask the same size as the volume Returns ------- snr : float\u2028 The SFNR of the volume """ brain_voxels = volume[mask > 0] # Make a matrix of brain voxels by time mean_voxels = np.nanmean(brain_voxels, 1) # Take the means of each voxel over time # Detrend (second order polynomial) the voxels over time and then order = 2 # calculate the standard deviation. seq = np.linspace(1, brain_voxels.shape[1], brain_voxels.shape[1]) detrend_poly = np.polyfit(seq, brain_voxels.transpose(), order) detrend_voxels = np.zeros(brain_voxels.shape) # Detrend for each voxel for voxel in range(brain_voxels.shape[0]): trend = detrend_poly[0, voxel] * seq ** 2 + detrend_poly[1, voxel] * seq + detrend_poly[2, voxel] detrend_voxels[voxel, :] = brain_voxels[voxel, :] - trend # depends on [control=['for'], data=['voxel']] std_voxels = np.nanstd(detrend_voxels, 1) sfnr_voxels = mean_voxels / std_voxels # Calculate the sfnr of all voxels across the brain return np.mean(sfnr_voxels) # Return the average sfnr
def cull_portals(self, stat, threshold=0.5, comparator=ge): """Delete portals whose stat >= ``threshold`` (default 0.5). Optional argument ``comparator`` will replace >= as the test for whether to cull. You can use the name of a stored function. """ comparator = self._lookup_comparator(comparator) dead = [] for u in self.portal: for v in self.portal[u]: if stat in self.portal[u][v] and comparator( self.portal[u][v][stat], threshold ): dead.append((u, v)) self.remove_edges_from(dead) return self
def function[cull_portals, parameter[self, stat, threshold, comparator]]: constant[Delete portals whose stat >= ``threshold`` (default 0.5). Optional argument ``comparator`` will replace >= as the test for whether to cull. You can use the name of a stored function. ] variable[comparator] assign[=] call[name[self]._lookup_comparator, parameter[name[comparator]]] variable[dead] assign[=] list[[]] for taget[name[u]] in starred[name[self].portal] begin[:] for taget[name[v]] in starred[call[name[self].portal][name[u]]] begin[:] if <ast.BoolOp object at 0x7da2047e98a0> begin[:] call[name[dead].append, parameter[tuple[[<ast.Name object at 0x7da20c7cb910>, <ast.Name object at 0x7da20c7cbee0>]]]] call[name[self].remove_edges_from, parameter[name[dead]]] return[name[self]]
keyword[def] identifier[cull_portals] ( identifier[self] , identifier[stat] , identifier[threshold] = literal[int] , identifier[comparator] = identifier[ge] ): literal[string] identifier[comparator] = identifier[self] . identifier[_lookup_comparator] ( identifier[comparator] ) identifier[dead] =[] keyword[for] identifier[u] keyword[in] identifier[self] . identifier[portal] : keyword[for] identifier[v] keyword[in] identifier[self] . identifier[portal] [ identifier[u] ]: keyword[if] identifier[stat] keyword[in] identifier[self] . identifier[portal] [ identifier[u] ][ identifier[v] ] keyword[and] identifier[comparator] ( identifier[self] . identifier[portal] [ identifier[u] ][ identifier[v] ][ identifier[stat] ], identifier[threshold] ): identifier[dead] . identifier[append] (( identifier[u] , identifier[v] )) identifier[self] . identifier[remove_edges_from] ( identifier[dead] ) keyword[return] identifier[self]
def cull_portals(self, stat, threshold=0.5, comparator=ge): """Delete portals whose stat >= ``threshold`` (default 0.5). Optional argument ``comparator`` will replace >= as the test for whether to cull. You can use the name of a stored function. """ comparator = self._lookup_comparator(comparator) dead = [] for u in self.portal: for v in self.portal[u]: if stat in self.portal[u][v] and comparator(self.portal[u][v][stat], threshold): dead.append((u, v)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['v']] # depends on [control=['for'], data=['u']] self.remove_edges_from(dead) return self
def apply_train(self, df:DataFrame): "Transform `self.cat_names` columns in categorical." self.categories = {} for n in self.cat_names: df.loc[:,n] = df.loc[:,n].astype('category').cat.as_ordered() self.categories[n] = df[n].cat.categories
def function[apply_train, parameter[self, df]]: constant[Transform `self.cat_names` columns in categorical.] name[self].categories assign[=] dictionary[[], []] for taget[name[n]] in starred[name[self].cat_names] begin[:] call[name[df].loc][tuple[[<ast.Slice object at 0x7da1b1ddb910>, <ast.Name object at 0x7da1b1dda0b0>]]] assign[=] call[call[call[name[df].loc][tuple[[<ast.Slice object at 0x7da1b1dda920>, <ast.Name object at 0x7da1b1dd8d00>]]].astype, parameter[constant[category]]].cat.as_ordered, parameter[]] call[name[self].categories][name[n]] assign[=] call[name[df]][name[n]].cat.categories
keyword[def] identifier[apply_train] ( identifier[self] , identifier[df] : identifier[DataFrame] ): literal[string] identifier[self] . identifier[categories] ={} keyword[for] identifier[n] keyword[in] identifier[self] . identifier[cat_names] : identifier[df] . identifier[loc] [:, identifier[n] ]= identifier[df] . identifier[loc] [:, identifier[n] ]. identifier[astype] ( literal[string] ). identifier[cat] . identifier[as_ordered] () identifier[self] . identifier[categories] [ identifier[n] ]= identifier[df] [ identifier[n] ]. identifier[cat] . identifier[categories]
def apply_train(self, df: DataFrame): """Transform `self.cat_names` columns in categorical.""" self.categories = {} for n in self.cat_names: df.loc[:, n] = df.loc[:, n].astype('category').cat.as_ordered() self.categories[n] = df[n].cat.categories # depends on [control=['for'], data=['n']]
def _register_jobs(self): """ This method extracts only the "ConcreteJob" class from modules that were collected by ConfigReader._get_modules(). And, this method called Subject.notify(), append "ConcreteJob" classes to JobObserver.jobs. """ # job_name is hard-corded job_name = 'ConcreteJob' modules = self._get_modules() for section, options in self.config.items(): if section == 'global': continue try: name = options['module'] except KeyError: raise ConfigMissingValue(section, 'module') try: job = getattr(modules[name], job_name) self.notify(name, job) except KeyError: raise NotSupportedError(name)
def function[_register_jobs, parameter[self]]: constant[ This method extracts only the "ConcreteJob" class from modules that were collected by ConfigReader._get_modules(). And, this method called Subject.notify(), append "ConcreteJob" classes to JobObserver.jobs. ] variable[job_name] assign[=] constant[ConcreteJob] variable[modules] assign[=] call[name[self]._get_modules, parameter[]] for taget[tuple[[<ast.Name object at 0x7da1b0a60d90>, <ast.Name object at 0x7da1b0a60730>]]] in starred[call[name[self].config.items, parameter[]]] begin[:] if compare[name[section] equal[==] constant[global]] begin[:] continue <ast.Try object at 0x7da1b0a606a0> <ast.Try object at 0x7da1b0a61c60>
keyword[def] identifier[_register_jobs] ( identifier[self] ): literal[string] identifier[job_name] = literal[string] identifier[modules] = identifier[self] . identifier[_get_modules] () keyword[for] identifier[section] , identifier[options] keyword[in] identifier[self] . identifier[config] . identifier[items] (): keyword[if] identifier[section] == literal[string] : keyword[continue] keyword[try] : identifier[name] = identifier[options] [ literal[string] ] keyword[except] identifier[KeyError] : keyword[raise] identifier[ConfigMissingValue] ( identifier[section] , literal[string] ) keyword[try] : identifier[job] = identifier[getattr] ( identifier[modules] [ identifier[name] ], identifier[job_name] ) identifier[self] . identifier[notify] ( identifier[name] , identifier[job] ) keyword[except] identifier[KeyError] : keyword[raise] identifier[NotSupportedError] ( identifier[name] )
def _register_jobs(self): """ This method extracts only the "ConcreteJob" class from modules that were collected by ConfigReader._get_modules(). And, this method called Subject.notify(), append "ConcreteJob" classes to JobObserver.jobs. """ # job_name is hard-corded job_name = 'ConcreteJob' modules = self._get_modules() for (section, options) in self.config.items(): if section == 'global': continue # depends on [control=['if'], data=[]] try: name = options['module'] # depends on [control=['try'], data=[]] except KeyError: raise ConfigMissingValue(section, 'module') # depends on [control=['except'], data=[]] try: job = getattr(modules[name], job_name) self.notify(name, job) # depends on [control=['try'], data=[]] except KeyError: raise NotSupportedError(name) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]]
def _enum_from_op_string(op_string): """Convert a string representation of a binary operator to an enum. These enums come from the protobuf message definition ``StructuredQuery.FieldFilter.Operator``. Args: op_string (str): A comparison operation in the form of a string. Acceptable values are ``<``, ``<=``, ``==``, ``>=`` and ``>``. Returns: int: The enum corresponding to ``op_string``. Raises: ValueError: If ``op_string`` is not a valid operator. """ try: return _COMPARISON_OPERATORS[op_string] except KeyError: choices = ", ".join(sorted(_COMPARISON_OPERATORS.keys())) msg = _BAD_OP_STRING.format(op_string, choices) raise ValueError(msg)
def function[_enum_from_op_string, parameter[op_string]]: constant[Convert a string representation of a binary operator to an enum. These enums come from the protobuf message definition ``StructuredQuery.FieldFilter.Operator``. Args: op_string (str): A comparison operation in the form of a string. Acceptable values are ``<``, ``<=``, ``==``, ``>=`` and ``>``. Returns: int: The enum corresponding to ``op_string``. Raises: ValueError: If ``op_string`` is not a valid operator. ] <ast.Try object at 0x7da20e954a00>
keyword[def] identifier[_enum_from_op_string] ( identifier[op_string] ): literal[string] keyword[try] : keyword[return] identifier[_COMPARISON_OPERATORS] [ identifier[op_string] ] keyword[except] identifier[KeyError] : identifier[choices] = literal[string] . identifier[join] ( identifier[sorted] ( identifier[_COMPARISON_OPERATORS] . identifier[keys] ())) identifier[msg] = identifier[_BAD_OP_STRING] . identifier[format] ( identifier[op_string] , identifier[choices] ) keyword[raise] identifier[ValueError] ( identifier[msg] )
def _enum_from_op_string(op_string): """Convert a string representation of a binary operator to an enum. These enums come from the protobuf message definition ``StructuredQuery.FieldFilter.Operator``. Args: op_string (str): A comparison operation in the form of a string. Acceptable values are ``<``, ``<=``, ``==``, ``>=`` and ``>``. Returns: int: The enum corresponding to ``op_string``. Raises: ValueError: If ``op_string`` is not a valid operator. """ try: return _COMPARISON_OPERATORS[op_string] # depends on [control=['try'], data=[]] except KeyError: choices = ', '.join(sorted(_COMPARISON_OPERATORS.keys())) msg = _BAD_OP_STRING.format(op_string, choices) raise ValueError(msg) # depends on [control=['except'], data=[]]
def full_path(path): """Get the real path, expanding links and bashisms""" return os.path.realpath(os.path.expanduser(os.path.expandvars(path)))
def function[full_path, parameter[path]]: constant[Get the real path, expanding links and bashisms] return[call[name[os].path.realpath, parameter[call[name[os].path.expanduser, parameter[call[name[os].path.expandvars, parameter[name[path]]]]]]]]
keyword[def] identifier[full_path] ( identifier[path] ): literal[string] keyword[return] identifier[os] . identifier[path] . identifier[realpath] ( identifier[os] . identifier[path] . identifier[expanduser] ( identifier[os] . identifier[path] . identifier[expandvars] ( identifier[path] )))
def full_path(path): """Get the real path, expanding links and bashisms""" return os.path.realpath(os.path.expanduser(os.path.expandvars(path)))
def issubset(list1, list2): """ Examples: >>> issubset([], [65, 66, 67]) True >>> issubset([65], [65, 66, 67]) True >>> issubset([65, 66], [65, 66, 67]) True >>> issubset([65, 67], [65, 66, 67]) False """ n = len(list1) for startpos in range(len(list2) - n + 1): if list2[startpos:startpos+n] == list1: return True return False
def function[issubset, parameter[list1, list2]]: constant[ Examples: >>> issubset([], [65, 66, 67]) True >>> issubset([65], [65, 66, 67]) True >>> issubset([65, 66], [65, 66, 67]) True >>> issubset([65, 67], [65, 66, 67]) False ] variable[n] assign[=] call[name[len], parameter[name[list1]]] for taget[name[startpos]] in starred[call[name[range], parameter[binary_operation[binary_operation[call[name[len], parameter[name[list2]]] - name[n]] + constant[1]]]]] begin[:] if compare[call[name[list2]][<ast.Slice object at 0x7da18dc07bb0>] equal[==] name[list1]] begin[:] return[constant[True]] return[constant[False]]
keyword[def] identifier[issubset] ( identifier[list1] , identifier[list2] ): literal[string] identifier[n] = identifier[len] ( identifier[list1] ) keyword[for] identifier[startpos] keyword[in] identifier[range] ( identifier[len] ( identifier[list2] )- identifier[n] + literal[int] ): keyword[if] identifier[list2] [ identifier[startpos] : identifier[startpos] + identifier[n] ]== identifier[list1] : keyword[return] keyword[True] keyword[return] keyword[False]
def issubset(list1, list2): """ Examples: >>> issubset([], [65, 66, 67]) True >>> issubset([65], [65, 66, 67]) True >>> issubset([65, 66], [65, 66, 67]) True >>> issubset([65, 67], [65, 66, 67]) False """ n = len(list1) for startpos in range(len(list2) - n + 1): if list2[startpos:startpos + n] == list1: return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['startpos']] return False
def context(self, *notes): """ A context manager that appends ``note`` to every diagnostic processed by this engine. """ self._appended_notes += notes yield del self._appended_notes[-len(notes):]
def function[context, parameter[self]]: constant[ A context manager that appends ``note`` to every diagnostic processed by this engine. ] <ast.AugAssign object at 0x7da18dc98910> <ast.Yield object at 0x7da18bccba30> <ast.Delete object at 0x7da18bcc9120>
keyword[def] identifier[context] ( identifier[self] ,* identifier[notes] ): literal[string] identifier[self] . identifier[_appended_notes] += identifier[notes] keyword[yield] keyword[del] identifier[self] . identifier[_appended_notes] [- identifier[len] ( identifier[notes] ):]
def context(self, *notes): """ A context manager that appends ``note`` to every diagnostic processed by this engine. """ self._appended_notes += notes yield del self._appended_notes[-len(notes):]
def from_response(self, response): """ Populates a given objects attributes from a parsed JSON API response. This helper handles all necessary type coercions as it assigns attribute values. """ for name in self.PROPERTIES: attr = '_{0}'.format(name) transform = self.PROPERTIES[name].get('transform', None) value = response.get(name, None) if transform and transform == TRANSFORM.TIME and value: setattr(self, attr, dateutil.parser.parse(value)) if isinstance(value, int) and value == 0: continue # skip attribute else: setattr(self, attr, value) return self
def function[from_response, parameter[self, response]]: constant[ Populates a given objects attributes from a parsed JSON API response. This helper handles all necessary type coercions as it assigns attribute values. ] for taget[name[name]] in starred[name[self].PROPERTIES] begin[:] variable[attr] assign[=] call[constant[_{0}].format, parameter[name[name]]] variable[transform] assign[=] call[call[name[self].PROPERTIES][name[name]].get, parameter[constant[transform], constant[None]]] variable[value] assign[=] call[name[response].get, parameter[name[name], constant[None]]] if <ast.BoolOp object at 0x7da1b06547c0> begin[:] call[name[setattr], parameter[name[self], name[attr], call[name[dateutil].parser.parse, parameter[name[value]]]]] if <ast.BoolOp object at 0x7da1b07bb970> begin[:] continue return[name[self]]
keyword[def] identifier[from_response] ( identifier[self] , identifier[response] ): literal[string] keyword[for] identifier[name] keyword[in] identifier[self] . identifier[PROPERTIES] : identifier[attr] = literal[string] . identifier[format] ( identifier[name] ) identifier[transform] = identifier[self] . identifier[PROPERTIES] [ identifier[name] ]. identifier[get] ( literal[string] , keyword[None] ) identifier[value] = identifier[response] . identifier[get] ( identifier[name] , keyword[None] ) keyword[if] identifier[transform] keyword[and] identifier[transform] == identifier[TRANSFORM] . identifier[TIME] keyword[and] identifier[value] : identifier[setattr] ( identifier[self] , identifier[attr] , identifier[dateutil] . identifier[parser] . identifier[parse] ( identifier[value] )) keyword[if] identifier[isinstance] ( identifier[value] , identifier[int] ) keyword[and] identifier[value] == literal[int] : keyword[continue] keyword[else] : identifier[setattr] ( identifier[self] , identifier[attr] , identifier[value] ) keyword[return] identifier[self]
def from_response(self, response): """ Populates a given objects attributes from a parsed JSON API response. This helper handles all necessary type coercions as it assigns attribute values. """ for name in self.PROPERTIES: attr = '_{0}'.format(name) transform = self.PROPERTIES[name].get('transform', None) value = response.get(name, None) if transform and transform == TRANSFORM.TIME and value: setattr(self, attr, dateutil.parser.parse(value)) # depends on [control=['if'], data=[]] if isinstance(value, int) and value == 0: continue # skip attribute # depends on [control=['if'], data=[]] else: setattr(self, attr, value) # depends on [control=['for'], data=['name']] return self
def setup_dirs(self, storage_dir): '''Calculates all the storage and build dirs, and makes sure the directories exist where necessary.''' self.storage_dir = expanduser(storage_dir) if ' ' in self.storage_dir: raise ValueError('storage dir path cannot contain spaces, please ' 'specify a path with --storage-dir') self.build_dir = join(self.storage_dir, 'build') self.dist_dir = join(self.storage_dir, 'dists')
def function[setup_dirs, parameter[self, storage_dir]]: constant[Calculates all the storage and build dirs, and makes sure the directories exist where necessary.] name[self].storage_dir assign[=] call[name[expanduser], parameter[name[storage_dir]]] if compare[constant[ ] in name[self].storage_dir] begin[:] <ast.Raise object at 0x7da1b1c8a5c0> name[self].build_dir assign[=] call[name[join], parameter[name[self].storage_dir, constant[build]]] name[self].dist_dir assign[=] call[name[join], parameter[name[self].storage_dir, constant[dists]]]
keyword[def] identifier[setup_dirs] ( identifier[self] , identifier[storage_dir] ): literal[string] identifier[self] . identifier[storage_dir] = identifier[expanduser] ( identifier[storage_dir] ) keyword[if] literal[string] keyword[in] identifier[self] . identifier[storage_dir] : keyword[raise] identifier[ValueError] ( literal[string] literal[string] ) identifier[self] . identifier[build_dir] = identifier[join] ( identifier[self] . identifier[storage_dir] , literal[string] ) identifier[self] . identifier[dist_dir] = identifier[join] ( identifier[self] . identifier[storage_dir] , literal[string] )
def setup_dirs(self, storage_dir): """Calculates all the storage and build dirs, and makes sure the directories exist where necessary.""" self.storage_dir = expanduser(storage_dir) if ' ' in self.storage_dir: raise ValueError('storage dir path cannot contain spaces, please specify a path with --storage-dir') # depends on [control=['if'], data=[]] self.build_dir = join(self.storage_dir, 'build') self.dist_dir = join(self.storage_dir, 'dists')
def bbin(obj: Union[str, Element]) -> str: """ Boldify built in types @param obj: object name or id @return: """ return obj.name if isinstance(obj, Element ) else f'**{obj}**' if obj in builtin_names else obj
def function[bbin, parameter[obj]]: constant[ Boldify built in types @param obj: object name or id @return: ] return[<ast.IfExp object at 0x7da1b0627af0>]
keyword[def] identifier[bbin] ( identifier[obj] : identifier[Union] [ identifier[str] , identifier[Element] ])-> identifier[str] : literal[string] keyword[return] identifier[obj] . identifier[name] keyword[if] identifier[isinstance] ( identifier[obj] , identifier[Element] ) keyword[else] literal[string] keyword[if] identifier[obj] keyword[in] identifier[builtin_names] keyword[else] identifier[obj]
def bbin(obj: Union[str, Element]) -> str: """ Boldify built in types @param obj: object name or id @return: """ return obj.name if isinstance(obj, Element) else f'**{obj}**' if obj in builtin_names else obj
def transform_log_prob_fn(log_prob_fn: PotentialFn, bijector: BijectorNest, init_state: State = None ) -> Union[PotentialFn, Tuple[PotentialFn, State]]: """Transforms a log-prob function using a bijector. This takes a log-prob function and creates a new log-prob function that now takes takes state in the domain of the bijector, forward transforms that state and calls the original log-prob function. It then returns the log-probability that correctly accounts for this transformation. The forward-transformed state is pre-pended to the original log-prob function's extra returns and returned as the new extra return. For convenience you can also pass the initial state (in the original space), and this function will return the inverse transformed as the 2nd return value. You'd use this to initialize MCMC operators that operate in the transformed space. Args: log_prob_fn: Log prob fn. bijector: Bijector(s), must be of the same structure as the `log_prob_fn` inputs. init_state: Initial state, in the original space. Returns: transformed_log_prob_fn: Transformed log prob fn. transformed_init_state: If `init_state` is provided. Initial state in the transformed space. """ def wrapper(*args): """Transformed wrapper.""" bijector_ = bijector args = tf.nest.map_structure(lambda x: 0. + x, args) if len(args) == 1: args = args[0] elif isinstance(bijector_, list): bijector_ = tuple(bijector_) original_space_args = tf.nest.map_structure(lambda b, x: b.forward(x), bijector_, args) original_space_args = original_space_args # type: Tuple[Any] original_space_log_prob, extra = call_fn(log_prob_fn, original_space_args) event_ndims = tf.nest.map_structure( lambda x: tf.rank(x) - tf.rank(original_space_log_prob), args) return original_space_log_prob + sum( tf.nest.flatten( tf.nest.map_structure( lambda b, x, e: b.forward_log_det_jacobian(x, event_ndims=e), bijector_, args, event_ndims))), [original_space_args, extra] if init_state is None: return wrapper else: return wrapper, tf.nest.map_structure(lambda b, s: b.inverse(s), bijector, init_state)
def function[transform_log_prob_fn, parameter[log_prob_fn, bijector, init_state]]: constant[Transforms a log-prob function using a bijector. This takes a log-prob function and creates a new log-prob function that now takes takes state in the domain of the bijector, forward transforms that state and calls the original log-prob function. It then returns the log-probability that correctly accounts for this transformation. The forward-transformed state is pre-pended to the original log-prob function's extra returns and returned as the new extra return. For convenience you can also pass the initial state (in the original space), and this function will return the inverse transformed as the 2nd return value. You'd use this to initialize MCMC operators that operate in the transformed space. Args: log_prob_fn: Log prob fn. bijector: Bijector(s), must be of the same structure as the `log_prob_fn` inputs. init_state: Initial state, in the original space. Returns: transformed_log_prob_fn: Transformed log prob fn. transformed_init_state: If `init_state` is provided. Initial state in the transformed space. ] def function[wrapper, parameter[]]: constant[Transformed wrapper.] variable[bijector_] assign[=] name[bijector] variable[args] assign[=] call[name[tf].nest.map_structure, parameter[<ast.Lambda object at 0x7da1b03a1f30>, name[args]]] if compare[call[name[len], parameter[name[args]]] equal[==] constant[1]] begin[:] variable[args] assign[=] call[name[args]][constant[0]] variable[original_space_args] assign[=] call[name[tf].nest.map_structure, parameter[<ast.Lambda object at 0x7da1b03a2b90>, name[bijector_], name[args]]] variable[original_space_args] assign[=] name[original_space_args] <ast.Tuple object at 0x7da1b03a2ce0> assign[=] call[name[call_fn], parameter[name[log_prob_fn], name[original_space_args]]] variable[event_ndims] assign[=] call[name[tf].nest.map_structure, parameter[<ast.Lambda object at 0x7da1b03482e0>, name[args]]] return[tuple[[<ast.BinOp object at 0x7da1b0348400>, <ast.List object at 0x7da1b0348670>]]] if compare[name[init_state] is constant[None]] begin[:] return[name[wrapper]]
keyword[def] identifier[transform_log_prob_fn] ( identifier[log_prob_fn] : identifier[PotentialFn] , identifier[bijector] : identifier[BijectorNest] , identifier[init_state] : identifier[State] = keyword[None] )-> identifier[Union] [ identifier[PotentialFn] , identifier[Tuple] [ identifier[PotentialFn] , identifier[State] ]]: literal[string] keyword[def] identifier[wrapper] (* identifier[args] ): literal[string] identifier[bijector_] = identifier[bijector] identifier[args] = identifier[tf] . identifier[nest] . identifier[map_structure] ( keyword[lambda] identifier[x] : literal[int] + identifier[x] , identifier[args] ) keyword[if] identifier[len] ( identifier[args] )== literal[int] : identifier[args] = identifier[args] [ literal[int] ] keyword[elif] identifier[isinstance] ( identifier[bijector_] , identifier[list] ): identifier[bijector_] = identifier[tuple] ( identifier[bijector_] ) identifier[original_space_args] = identifier[tf] . identifier[nest] . identifier[map_structure] ( keyword[lambda] identifier[b] , identifier[x] : identifier[b] . identifier[forward] ( identifier[x] ), identifier[bijector_] , identifier[args] ) identifier[original_space_args] = identifier[original_space_args] identifier[original_space_log_prob] , identifier[extra] = identifier[call_fn] ( identifier[log_prob_fn] , identifier[original_space_args] ) identifier[event_ndims] = identifier[tf] . identifier[nest] . identifier[map_structure] ( keyword[lambda] identifier[x] : identifier[tf] . identifier[rank] ( identifier[x] )- identifier[tf] . identifier[rank] ( identifier[original_space_log_prob] ), identifier[args] ) keyword[return] identifier[original_space_log_prob] + identifier[sum] ( identifier[tf] . identifier[nest] . identifier[flatten] ( identifier[tf] . identifier[nest] . identifier[map_structure] ( keyword[lambda] identifier[b] , identifier[x] , identifier[e] : identifier[b] . identifier[forward_log_det_jacobian] ( identifier[x] , identifier[event_ndims] = identifier[e] ), identifier[bijector_] , identifier[args] , identifier[event_ndims] ))),[ identifier[original_space_args] , identifier[extra] ] keyword[if] identifier[init_state] keyword[is] keyword[None] : keyword[return] identifier[wrapper] keyword[else] : keyword[return] identifier[wrapper] , identifier[tf] . identifier[nest] . identifier[map_structure] ( keyword[lambda] identifier[b] , identifier[s] : identifier[b] . identifier[inverse] ( identifier[s] ), identifier[bijector] , identifier[init_state] )
def transform_log_prob_fn(log_prob_fn: PotentialFn, bijector: BijectorNest, init_state: State=None) -> Union[PotentialFn, Tuple[PotentialFn, State]]: """Transforms a log-prob function using a bijector. This takes a log-prob function and creates a new log-prob function that now takes takes state in the domain of the bijector, forward transforms that state and calls the original log-prob function. It then returns the log-probability that correctly accounts for this transformation. The forward-transformed state is pre-pended to the original log-prob function's extra returns and returned as the new extra return. For convenience you can also pass the initial state (in the original space), and this function will return the inverse transformed as the 2nd return value. You'd use this to initialize MCMC operators that operate in the transformed space. Args: log_prob_fn: Log prob fn. bijector: Bijector(s), must be of the same structure as the `log_prob_fn` inputs. init_state: Initial state, in the original space. Returns: transformed_log_prob_fn: Transformed log prob fn. transformed_init_state: If `init_state` is provided. Initial state in the transformed space. """ def wrapper(*args): """Transformed wrapper.""" bijector_ = bijector args = tf.nest.map_structure(lambda x: 0.0 + x, args) if len(args) == 1: args = args[0] # depends on [control=['if'], data=[]] elif isinstance(bijector_, list): bijector_ = tuple(bijector_) # depends on [control=['if'], data=[]] original_space_args = tf.nest.map_structure(lambda b, x: b.forward(x), bijector_, args) original_space_args = original_space_args # type: Tuple[Any] (original_space_log_prob, extra) = call_fn(log_prob_fn, original_space_args) event_ndims = tf.nest.map_structure(lambda x: tf.rank(x) - tf.rank(original_space_log_prob), args) return (original_space_log_prob + sum(tf.nest.flatten(tf.nest.map_structure(lambda b, x, e: b.forward_log_det_jacobian(x, event_ndims=e), bijector_, args, event_ndims))), [original_space_args, extra]) if init_state is None: return wrapper # depends on [control=['if'], data=[]] else: return (wrapper, tf.nest.map_structure(lambda b, s: b.inverse(s), bijector, init_state))
def setdiff(left, *rights, **kwargs): """ Exclude data from a collection, like `except` clause in SQL. All collections involved should have same schema. :param left: collection to drop data from :param rights: collection or list of collections :param distinct: whether to preserve duplicate entries :return: collection :Examples: >>> import pandas as pd >>> df1 = DataFrame(pd.DataFrame({'a': [1, 2, 3, 3, 3], 'b': [1, 2, 3, 3, 3]})) >>> df2 = DataFrame(pd.DataFrame({'a': [1, 3], 'b': [1, 3]})) >>> df1.setdiff(df2) a b 0 2 2 1 3 3 2 3 3 >>> df1.setdiff(df2, distinct=True) a b 0 2 2 """ import time from ..utils import output distinct = kwargs.get('distinct', False) if isinstance(rights[0], list): rights = rights[0] cols = [n for n in left.schema.names] types = [n for n in left.schema.types] counter_col_name = 'exc_counter_%d' % int(time.time()) left = left[left, Scalar(1).rename(counter_col_name)] rights = [r[r, Scalar(-1).rename(counter_col_name)] for r in rights] unioned = left for r in rights: unioned = unioned.union(r) if distinct: aggregated = unioned.groupby(*cols).agg(**{counter_col_name: unioned[counter_col_name].min()}) return aggregated.filter(aggregated[counter_col_name] == 1).select(*cols) else: aggregated = unioned.groupby(*cols).agg(**{counter_col_name: unioned[counter_col_name].sum()}) @output(cols, types) def exploder(row): import sys irange = xrange if sys.version_info[0] < 3 else range for _ in irange(getattr(row, counter_col_name)): yield row[:-1] return aggregated.map_reduce(mapper=exploder).select(*cols)
def function[setdiff, parameter[left]]: constant[ Exclude data from a collection, like `except` clause in SQL. All collections involved should have same schema. :param left: collection to drop data from :param rights: collection or list of collections :param distinct: whether to preserve duplicate entries :return: collection :Examples: >>> import pandas as pd >>> df1 = DataFrame(pd.DataFrame({'a': [1, 2, 3, 3, 3], 'b': [1, 2, 3, 3, 3]})) >>> df2 = DataFrame(pd.DataFrame({'a': [1, 3], 'b': [1, 3]})) >>> df1.setdiff(df2) a b 0 2 2 1 3 3 2 3 3 >>> df1.setdiff(df2, distinct=True) a b 0 2 2 ] import module[time] from relative_module[utils] import module[output] variable[distinct] assign[=] call[name[kwargs].get, parameter[constant[distinct], constant[False]]] if call[name[isinstance], parameter[call[name[rights]][constant[0]], name[list]]] begin[:] variable[rights] assign[=] call[name[rights]][constant[0]] variable[cols] assign[=] <ast.ListComp object at 0x7da1b2347610> variable[types] assign[=] <ast.ListComp object at 0x7da1b2345b40> variable[counter_col_name] assign[=] binary_operation[constant[exc_counter_%d] <ast.Mod object at 0x7da2590d6920> call[name[int], parameter[call[name[time].time, parameter[]]]]] variable[left] assign[=] call[name[left]][tuple[[<ast.Name object at 0x7da1b2344130>, <ast.Call object at 0x7da1b23448b0>]]] variable[rights] assign[=] <ast.ListComp object at 0x7da1b2344040> variable[unioned] assign[=] name[left] for taget[name[r]] in starred[name[rights]] begin[:] variable[unioned] assign[=] call[name[unioned].union, parameter[name[r]]] if name[distinct] begin[:] variable[aggregated] assign[=] call[call[name[unioned].groupby, parameter[<ast.Starred object at 0x7da1b2347970>]].agg, parameter[]] return[call[call[name[aggregated].filter, parameter[compare[call[name[aggregated]][name[counter_col_name]] equal[==] constant[1]]]].select, parameter[<ast.Starred object at 0x7da1b2347f10>]]]
keyword[def] identifier[setdiff] ( identifier[left] ,* identifier[rights] ,** identifier[kwargs] ): literal[string] keyword[import] identifier[time] keyword[from] .. identifier[utils] keyword[import] identifier[output] identifier[distinct] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[False] ) keyword[if] identifier[isinstance] ( identifier[rights] [ literal[int] ], identifier[list] ): identifier[rights] = identifier[rights] [ literal[int] ] identifier[cols] =[ identifier[n] keyword[for] identifier[n] keyword[in] identifier[left] . identifier[schema] . identifier[names] ] identifier[types] =[ identifier[n] keyword[for] identifier[n] keyword[in] identifier[left] . identifier[schema] . identifier[types] ] identifier[counter_col_name] = literal[string] % identifier[int] ( identifier[time] . identifier[time] ()) identifier[left] = identifier[left] [ identifier[left] , identifier[Scalar] ( literal[int] ). identifier[rename] ( identifier[counter_col_name] )] identifier[rights] =[ identifier[r] [ identifier[r] , identifier[Scalar] (- literal[int] ). identifier[rename] ( identifier[counter_col_name] )] keyword[for] identifier[r] keyword[in] identifier[rights] ] identifier[unioned] = identifier[left] keyword[for] identifier[r] keyword[in] identifier[rights] : identifier[unioned] = identifier[unioned] . identifier[union] ( identifier[r] ) keyword[if] identifier[distinct] : identifier[aggregated] = identifier[unioned] . identifier[groupby] (* identifier[cols] ). identifier[agg] (**{ identifier[counter_col_name] : identifier[unioned] [ identifier[counter_col_name] ]. identifier[min] ()}) keyword[return] identifier[aggregated] . identifier[filter] ( identifier[aggregated] [ identifier[counter_col_name] ]== literal[int] ). identifier[select] (* identifier[cols] ) keyword[else] : identifier[aggregated] = identifier[unioned] . identifier[groupby] (* identifier[cols] ). identifier[agg] (**{ identifier[counter_col_name] : identifier[unioned] [ identifier[counter_col_name] ]. identifier[sum] ()}) @ identifier[output] ( identifier[cols] , identifier[types] ) keyword[def] identifier[exploder] ( identifier[row] ): keyword[import] identifier[sys] identifier[irange] = identifier[xrange] keyword[if] identifier[sys] . identifier[version_info] [ literal[int] ]< literal[int] keyword[else] identifier[range] keyword[for] identifier[_] keyword[in] identifier[irange] ( identifier[getattr] ( identifier[row] , identifier[counter_col_name] )): keyword[yield] identifier[row] [:- literal[int] ] keyword[return] identifier[aggregated] . identifier[map_reduce] ( identifier[mapper] = identifier[exploder] ). identifier[select] (* identifier[cols] )
def setdiff(left, *rights, **kwargs): """ Exclude data from a collection, like `except` clause in SQL. All collections involved should have same schema. :param left: collection to drop data from :param rights: collection or list of collections :param distinct: whether to preserve duplicate entries :return: collection :Examples: >>> import pandas as pd >>> df1 = DataFrame(pd.DataFrame({'a': [1, 2, 3, 3, 3], 'b': [1, 2, 3, 3, 3]})) >>> df2 = DataFrame(pd.DataFrame({'a': [1, 3], 'b': [1, 3]})) >>> df1.setdiff(df2) a b 0 2 2 1 3 3 2 3 3 >>> df1.setdiff(df2, distinct=True) a b 0 2 2 """ import time from ..utils import output distinct = kwargs.get('distinct', False) if isinstance(rights[0], list): rights = rights[0] # depends on [control=['if'], data=[]] cols = [n for n in left.schema.names] types = [n for n in left.schema.types] counter_col_name = 'exc_counter_%d' % int(time.time()) left = left[left, Scalar(1).rename(counter_col_name)] rights = [r[r, Scalar(-1).rename(counter_col_name)] for r in rights] unioned = left for r in rights: unioned = unioned.union(r) # depends on [control=['for'], data=['r']] if distinct: aggregated = unioned.groupby(*cols).agg(**{counter_col_name: unioned[counter_col_name].min()}) return aggregated.filter(aggregated[counter_col_name] == 1).select(*cols) # depends on [control=['if'], data=[]] else: aggregated = unioned.groupby(*cols).agg(**{counter_col_name: unioned[counter_col_name].sum()}) @output(cols, types) def exploder(row): import sys irange = xrange if sys.version_info[0] < 3 else range for _ in irange(getattr(row, counter_col_name)): yield row[:-1] # depends on [control=['for'], data=[]] return aggregated.map_reduce(mapper=exploder).select(*cols)
def index_service(self, service_id): """ Index a service in search engine. """ from hypermap.aggregator.models import Service service = Service.objects.get(id=service_id) if not service.is_valid: LOGGER.debug('Not indexing service with id %s in search engine as it is not valid' % service.id) return LOGGER.debug('Indexing service %s' % service.id) layer_to_process = service.layer_set.all() for layer in layer_to_process: if not settings.REGISTRY_SKIP_CELERY: index_layer(layer.id, use_cache=True) else: index_layer(layer.id)
def function[index_service, parameter[self, service_id]]: constant[ Index a service in search engine. ] from relative_module[hypermap.aggregator.models] import module[Service] variable[service] assign[=] call[name[Service].objects.get, parameter[]] if <ast.UnaryOp object at 0x7da18c4cf130> begin[:] call[name[LOGGER].debug, parameter[binary_operation[constant[Not indexing service with id %s in search engine as it is not valid] <ast.Mod object at 0x7da2590d6920> name[service].id]]] return[None] call[name[LOGGER].debug, parameter[binary_operation[constant[Indexing service %s] <ast.Mod object at 0x7da2590d6920> name[service].id]]] variable[layer_to_process] assign[=] call[name[service].layer_set.all, parameter[]] for taget[name[layer]] in starred[name[layer_to_process]] begin[:] if <ast.UnaryOp object at 0x7da18c4cdb70> begin[:] call[name[index_layer], parameter[name[layer].id]]
keyword[def] identifier[index_service] ( identifier[self] , identifier[service_id] ): literal[string] keyword[from] identifier[hypermap] . identifier[aggregator] . identifier[models] keyword[import] identifier[Service] identifier[service] = identifier[Service] . identifier[objects] . identifier[get] ( identifier[id] = identifier[service_id] ) keyword[if] keyword[not] identifier[service] . identifier[is_valid] : identifier[LOGGER] . identifier[debug] ( literal[string] % identifier[service] . identifier[id] ) keyword[return] identifier[LOGGER] . identifier[debug] ( literal[string] % identifier[service] . identifier[id] ) identifier[layer_to_process] = identifier[service] . identifier[layer_set] . identifier[all] () keyword[for] identifier[layer] keyword[in] identifier[layer_to_process] : keyword[if] keyword[not] identifier[settings] . identifier[REGISTRY_SKIP_CELERY] : identifier[index_layer] ( identifier[layer] . identifier[id] , identifier[use_cache] = keyword[True] ) keyword[else] : identifier[index_layer] ( identifier[layer] . identifier[id] )
def index_service(self, service_id): """ Index a service in search engine. """ from hypermap.aggregator.models import Service service = Service.objects.get(id=service_id) if not service.is_valid: LOGGER.debug('Not indexing service with id %s in search engine as it is not valid' % service.id) return # depends on [control=['if'], data=[]] LOGGER.debug('Indexing service %s' % service.id) layer_to_process = service.layer_set.all() for layer in layer_to_process: if not settings.REGISTRY_SKIP_CELERY: index_layer(layer.id, use_cache=True) # depends on [control=['if'], data=[]] else: index_layer(layer.id) # depends on [control=['for'], data=['layer']]
def dfa_dot_importer(input_file: str) -> dict: """ Imports a DFA from a DOT file. Of DOT files are recognized the following attributes: • nodeX shape=doublecircle -> accepting node; • nodeX root=true -> initial node; • edgeX label="a" -> action in alphabet; • fake [style=invisible] -> dummy invisible node pointing to initial state (they will be skipped); • fake-> S [style=bold] -> dummy transition to draw the arrow pointing to initial state (it will be skipped). Forbidden names: • 'fake' used for graphical purpose to drawn the arrow of the initial state; • 'sink' used as additional state when completing a DFA; • 'None' used when no initial state is present. Forbidden characters: • " • ' • ( • ) • spaces :param str input_file: path to the DOT file; :return: *(dict)* representing a DFA. """ # pyDot Object g = pydot.graph_from_dot_file(input_file)[0] states = set() initial_state = None accepting_states = set() replacements = {'"': '', "'": '', '(': '', ')': '', ' ': ''} for node in g.get_nodes(): if node.get_name() == 'fake' \ or node.get_name() == 'None' \ or node.get_name() == 'graph' \ or node.get_name() == 'node': continue if 'style' in node.get_attributes() \ and node.get_attributes()['style'] == 'invisible': continue node_reference = __replace_all(replacements, node.get_name()).split(',') if len(node_reference) > 1: node_reference = tuple(node_reference) else: node_reference = node_reference[0] states.add(node_reference) for attribute in node.get_attributes(): if attribute == 'root': initial_state = node_reference if attribute == 'shape' and node.get_attributes()[ 'shape'] == 'doublecircle': accepting_states.add(node_reference) alphabet = set() transitions = {} for edge in g.get_edges(): if edge.get_source() == 'fake': continue label = __replace_all(replacements, edge.get_label()) alphabet.add(label) source = __replace_all(replacements, edge.get_source()).split(',') if len(source) > 1: source = tuple(source) else: source = source[0] destination = __replace_all(replacements, edge.get_destination()).split(',') if len(destination) > 1: destination = tuple(destination) else: destination = destination[0] transitions[source, label] = destination dfa = { 'alphabet': alphabet, 'states': states, 'initial_state': initial_state, 'accepting_states': accepting_states, 'transitions': transitions} return dfa
def function[dfa_dot_importer, parameter[input_file]]: constant[ Imports a DFA from a DOT file. Of DOT files are recognized the following attributes: • nodeX shape=doublecircle -> accepting node; • nodeX root=true -> initial node; • edgeX label="a" -> action in alphabet; • fake [style=invisible] -> dummy invisible node pointing to initial state (they will be skipped); • fake-> S [style=bold] -> dummy transition to draw the arrow pointing to initial state (it will be skipped). Forbidden names: • 'fake' used for graphical purpose to drawn the arrow of the initial state; • 'sink' used as additional state when completing a DFA; • 'None' used when no initial state is present. Forbidden characters: • " • ' • ( • ) • spaces :param str input_file: path to the DOT file; :return: *(dict)* representing a DFA. ] variable[g] assign[=] call[call[name[pydot].graph_from_dot_file, parameter[name[input_file]]]][constant[0]] variable[states] assign[=] call[name[set], parameter[]] variable[initial_state] assign[=] constant[None] variable[accepting_states] assign[=] call[name[set], parameter[]] variable[replacements] assign[=] dictionary[[<ast.Constant object at 0x7da1b268f820>, <ast.Constant object at 0x7da1b268c9d0>, <ast.Constant object at 0x7da1b268d1b0>, <ast.Constant object at 0x7da1b268e1a0>, <ast.Constant object at 0x7da1b268c9a0>], [<ast.Constant object at 0x7da1b268cd60>, <ast.Constant object at 0x7da1b268c1f0>, <ast.Constant object at 0x7da1b268cdf0>, <ast.Constant object at 0x7da1b268c8e0>, <ast.Constant object at 0x7da1b268d3f0>]] for taget[name[node]] in starred[call[name[g].get_nodes, parameter[]]] begin[:] if <ast.BoolOp object at 0x7da1b2544970> begin[:] continue if <ast.BoolOp object at 0x7da1b2544f40> begin[:] continue variable[node_reference] assign[=] call[call[name[__replace_all], parameter[name[replacements], call[name[node].get_name, parameter[]]]].split, parameter[constant[,]]] if compare[call[name[len], parameter[name[node_reference]]] greater[>] constant[1]] begin[:] variable[node_reference] assign[=] call[name[tuple], parameter[name[node_reference]]] call[name[states].add, parameter[name[node_reference]]] for taget[name[attribute]] in starred[call[name[node].get_attributes, parameter[]]] begin[:] if compare[name[attribute] equal[==] constant[root]] begin[:] variable[initial_state] assign[=] name[node_reference] if <ast.BoolOp object at 0x7da1b2545120> begin[:] call[name[accepting_states].add, parameter[name[node_reference]]] variable[alphabet] assign[=] call[name[set], parameter[]] variable[transitions] assign[=] dictionary[[], []] for taget[name[edge]] in starred[call[name[g].get_edges, parameter[]]] begin[:] if compare[call[name[edge].get_source, parameter[]] equal[==] constant[fake]] begin[:] continue variable[label] assign[=] call[name[__replace_all], parameter[name[replacements], call[name[edge].get_label, parameter[]]]] call[name[alphabet].add, parameter[name[label]]] variable[source] assign[=] call[call[name[__replace_all], parameter[name[replacements], call[name[edge].get_source, parameter[]]]].split, parameter[constant[,]]] if compare[call[name[len], parameter[name[source]]] greater[>] constant[1]] begin[:] variable[source] assign[=] call[name[tuple], parameter[name[source]]] variable[destination] assign[=] call[call[name[__replace_all], parameter[name[replacements], call[name[edge].get_destination, parameter[]]]].split, parameter[constant[,]]] if compare[call[name[len], parameter[name[destination]]] greater[>] constant[1]] begin[:] variable[destination] assign[=] call[name[tuple], parameter[name[destination]]] call[name[transitions]][tuple[[<ast.Name object at 0x7da1b2546830>, <ast.Name object at 0x7da1b2545060>]]] assign[=] name[destination] variable[dfa] assign[=] dictionary[[<ast.Constant object at 0x7da1b25451e0>, <ast.Constant object at 0x7da1b2545f90>, <ast.Constant object at 0x7da1b2545c90>, <ast.Constant object at 0x7da1b25459c0>, <ast.Constant object at 0x7da1b25474f0>], [<ast.Name object at 0x7da1b25458d0>, <ast.Name object at 0x7da1b2547910>, <ast.Name object at 0x7da1b25449a0>, <ast.Name object at 0x7da1b2545750>, <ast.Name object at 0x7da1b2547fd0>]] return[name[dfa]]
keyword[def] identifier[dfa_dot_importer] ( identifier[input_file] : identifier[str] )-> identifier[dict] : literal[string] identifier[g] = identifier[pydot] . identifier[graph_from_dot_file] ( identifier[input_file] )[ literal[int] ] identifier[states] = identifier[set] () identifier[initial_state] = keyword[None] identifier[accepting_states] = identifier[set] () identifier[replacements] ={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] } keyword[for] identifier[node] keyword[in] identifier[g] . identifier[get_nodes] (): keyword[if] identifier[node] . identifier[get_name] ()== literal[string] keyword[or] identifier[node] . identifier[get_name] ()== literal[string] keyword[or] identifier[node] . identifier[get_name] ()== literal[string] keyword[or] identifier[node] . identifier[get_name] ()== literal[string] : keyword[continue] keyword[if] literal[string] keyword[in] identifier[node] . identifier[get_attributes] () keyword[and] identifier[node] . identifier[get_attributes] ()[ literal[string] ]== literal[string] : keyword[continue] identifier[node_reference] = identifier[__replace_all] ( identifier[replacements] , identifier[node] . identifier[get_name] ()). identifier[split] ( literal[string] ) keyword[if] identifier[len] ( identifier[node_reference] )> literal[int] : identifier[node_reference] = identifier[tuple] ( identifier[node_reference] ) keyword[else] : identifier[node_reference] = identifier[node_reference] [ literal[int] ] identifier[states] . identifier[add] ( identifier[node_reference] ) keyword[for] identifier[attribute] keyword[in] identifier[node] . identifier[get_attributes] (): keyword[if] identifier[attribute] == literal[string] : identifier[initial_state] = identifier[node_reference] keyword[if] identifier[attribute] == literal[string] keyword[and] identifier[node] . identifier[get_attributes] ()[ literal[string] ]== literal[string] : identifier[accepting_states] . identifier[add] ( identifier[node_reference] ) identifier[alphabet] = identifier[set] () identifier[transitions] ={} keyword[for] identifier[edge] keyword[in] identifier[g] . identifier[get_edges] (): keyword[if] identifier[edge] . identifier[get_source] ()== literal[string] : keyword[continue] identifier[label] = identifier[__replace_all] ( identifier[replacements] , identifier[edge] . identifier[get_label] ()) identifier[alphabet] . identifier[add] ( identifier[label] ) identifier[source] = identifier[__replace_all] ( identifier[replacements] , identifier[edge] . identifier[get_source] ()). identifier[split] ( literal[string] ) keyword[if] identifier[len] ( identifier[source] )> literal[int] : identifier[source] = identifier[tuple] ( identifier[source] ) keyword[else] : identifier[source] = identifier[source] [ literal[int] ] identifier[destination] = identifier[__replace_all] ( identifier[replacements] , identifier[edge] . identifier[get_destination] ()). identifier[split] ( literal[string] ) keyword[if] identifier[len] ( identifier[destination] )> literal[int] : identifier[destination] = identifier[tuple] ( identifier[destination] ) keyword[else] : identifier[destination] = identifier[destination] [ literal[int] ] identifier[transitions] [ identifier[source] , identifier[label] ]= identifier[destination] identifier[dfa] ={ literal[string] : identifier[alphabet] , literal[string] : identifier[states] , literal[string] : identifier[initial_state] , literal[string] : identifier[accepting_states] , literal[string] : identifier[transitions] } keyword[return] identifier[dfa]
def dfa_dot_importer(input_file: str) -> dict: """ Imports a DFA from a DOT file. Of DOT files are recognized the following attributes: • nodeX shape=doublecircle -> accepting node; • nodeX root=true -> initial node; • edgeX label="a" -> action in alphabet; • fake [style=invisible] -> dummy invisible node pointing to initial state (they will be skipped); • fake-> S [style=bold] -> dummy transition to draw the arrow pointing to initial state (it will be skipped). Forbidden names: • 'fake' used for graphical purpose to drawn the arrow of the initial state; • 'sink' used as additional state when completing a DFA; • 'None' used when no initial state is present. Forbidden characters: • " • ' • ( • ) • spaces :param str input_file: path to the DOT file; :return: *(dict)* representing a DFA. """ # pyDot Object g = pydot.graph_from_dot_file(input_file)[0] states = set() initial_state = None accepting_states = set() replacements = {'"': '', "'": '', '(': '', ')': '', ' ': ''} for node in g.get_nodes(): if node.get_name() == 'fake' or node.get_name() == 'None' or node.get_name() == 'graph' or (node.get_name() == 'node'): continue # depends on [control=['if'], data=[]] if 'style' in node.get_attributes() and node.get_attributes()['style'] == 'invisible': continue # depends on [control=['if'], data=[]] node_reference = __replace_all(replacements, node.get_name()).split(',') if len(node_reference) > 1: node_reference = tuple(node_reference) # depends on [control=['if'], data=[]] else: node_reference = node_reference[0] states.add(node_reference) for attribute in node.get_attributes(): if attribute == 'root': initial_state = node_reference # depends on [control=['if'], data=[]] if attribute == 'shape' and node.get_attributes()['shape'] == 'doublecircle': accepting_states.add(node_reference) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['attribute']] # depends on [control=['for'], data=['node']] alphabet = set() transitions = {} for edge in g.get_edges(): if edge.get_source() == 'fake': continue # depends on [control=['if'], data=[]] label = __replace_all(replacements, edge.get_label()) alphabet.add(label) source = __replace_all(replacements, edge.get_source()).split(',') if len(source) > 1: source = tuple(source) # depends on [control=['if'], data=[]] else: source = source[0] destination = __replace_all(replacements, edge.get_destination()).split(',') if len(destination) > 1: destination = tuple(destination) # depends on [control=['if'], data=[]] else: destination = destination[0] transitions[source, label] = destination # depends on [control=['for'], data=['edge']] dfa = {'alphabet': alphabet, 'states': states, 'initial_state': initial_state, 'accepting_states': accepting_states, 'transitions': transitions} return dfa
def _cmp_fstruct(self, s1, s2, frac_tol, mask): """ Returns true if a matching exists between s2 and s2 under frac_tol. s2 should be a subset of s1 """ if len(s2) > len(s1): raise ValueError("s1 must be larger than s2") if mask.shape != (len(s2), len(s1)): raise ValueError("mask has incorrect shape") return is_coord_subset_pbc(s2, s1, frac_tol, mask)
def function[_cmp_fstruct, parameter[self, s1, s2, frac_tol, mask]]: constant[ Returns true if a matching exists between s2 and s2 under frac_tol. s2 should be a subset of s1 ] if compare[call[name[len], parameter[name[s2]]] greater[>] call[name[len], parameter[name[s1]]]] begin[:] <ast.Raise object at 0x7da1b1bd3dc0> if compare[name[mask].shape not_equal[!=] tuple[[<ast.Call object at 0x7da1b1bd3400>, <ast.Call object at 0x7da1b1bd3280>]]] begin[:] <ast.Raise object at 0x7da1b1bd3eb0> return[call[name[is_coord_subset_pbc], parameter[name[s2], name[s1], name[frac_tol], name[mask]]]]
keyword[def] identifier[_cmp_fstruct] ( identifier[self] , identifier[s1] , identifier[s2] , identifier[frac_tol] , identifier[mask] ): literal[string] keyword[if] identifier[len] ( identifier[s2] )> identifier[len] ( identifier[s1] ): keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] identifier[mask] . identifier[shape] !=( identifier[len] ( identifier[s2] ), identifier[len] ( identifier[s1] )): keyword[raise] identifier[ValueError] ( literal[string] ) keyword[return] identifier[is_coord_subset_pbc] ( identifier[s2] , identifier[s1] , identifier[frac_tol] , identifier[mask] )
def _cmp_fstruct(self, s1, s2, frac_tol, mask): """ Returns true if a matching exists between s2 and s2 under frac_tol. s2 should be a subset of s1 """ if len(s2) > len(s1): raise ValueError('s1 must be larger than s2') # depends on [control=['if'], data=[]] if mask.shape != (len(s2), len(s1)): raise ValueError('mask has incorrect shape') # depends on [control=['if'], data=[]] return is_coord_subset_pbc(s2, s1, frac_tol, mask)
def emit(self, content, request=None, emitter=None): """ Serialize response. :return response: Instance of django.http.Response """ # Get emitter for request emitter = emitter or self.determine_emitter(request) emitter = emitter(self, request=request, response=content) # Serialize the response content response = emitter.emit() if not isinstance(response, HttpResponse): raise AssertionError("Emitter must return HttpResponse") # Append pagination headers if isinstance(content, Paginator): linked_resources = [] if content.next_page: linked_resources.append('<{0}>; rel="next"'.format( content.next_page)) if content.previous_page: linked_resources.append( '<{0}>; rel="previous"'.format(content.previous_page)) response["Link"] = ", ".join(linked_resources) return response
def function[emit, parameter[self, content, request, emitter]]: constant[ Serialize response. :return response: Instance of django.http.Response ] variable[emitter] assign[=] <ast.BoolOp object at 0x7da204621b10> variable[emitter] assign[=] call[name[emitter], parameter[name[self]]] variable[response] assign[=] call[name[emitter].emit, parameter[]] if <ast.UnaryOp object at 0x7da204623970> begin[:] <ast.Raise object at 0x7da204623610> if call[name[isinstance], parameter[name[content], name[Paginator]]] begin[:] variable[linked_resources] assign[=] list[[]] if name[content].next_page begin[:] call[name[linked_resources].append, parameter[call[constant[<{0}>; rel="next"].format, parameter[name[content].next_page]]]] if name[content].previous_page begin[:] call[name[linked_resources].append, parameter[call[constant[<{0}>; rel="previous"].format, parameter[name[content].previous_page]]]] call[name[response]][constant[Link]] assign[=] call[constant[, ].join, parameter[name[linked_resources]]] return[name[response]]
keyword[def] identifier[emit] ( identifier[self] , identifier[content] , identifier[request] = keyword[None] , identifier[emitter] = keyword[None] ): literal[string] identifier[emitter] = identifier[emitter] keyword[or] identifier[self] . identifier[determine_emitter] ( identifier[request] ) identifier[emitter] = identifier[emitter] ( identifier[self] , identifier[request] = identifier[request] , identifier[response] = identifier[content] ) identifier[response] = identifier[emitter] . identifier[emit] () keyword[if] keyword[not] identifier[isinstance] ( identifier[response] , identifier[HttpResponse] ): keyword[raise] identifier[AssertionError] ( literal[string] ) keyword[if] identifier[isinstance] ( identifier[content] , identifier[Paginator] ): identifier[linked_resources] =[] keyword[if] identifier[content] . identifier[next_page] : identifier[linked_resources] . identifier[append] ( literal[string] . identifier[format] ( identifier[content] . identifier[next_page] )) keyword[if] identifier[content] . identifier[previous_page] : identifier[linked_resources] . identifier[append] ( literal[string] . identifier[format] ( identifier[content] . identifier[previous_page] )) identifier[response] [ literal[string] ]= literal[string] . identifier[join] ( identifier[linked_resources] ) keyword[return] identifier[response]
def emit(self, content, request=None, emitter=None): """ Serialize response. :return response: Instance of django.http.Response """ # Get emitter for request emitter = emitter or self.determine_emitter(request) emitter = emitter(self, request=request, response=content) # Serialize the response content response = emitter.emit() if not isinstance(response, HttpResponse): raise AssertionError('Emitter must return HttpResponse') # depends on [control=['if'], data=[]] # Append pagination headers if isinstance(content, Paginator): linked_resources = [] if content.next_page: linked_resources.append('<{0}>; rel="next"'.format(content.next_page)) # depends on [control=['if'], data=[]] if content.previous_page: linked_resources.append('<{0}>; rel="previous"'.format(content.previous_page)) # depends on [control=['if'], data=[]] response['Link'] = ', '.join(linked_resources) # depends on [control=['if'], data=[]] return response
def ckw05(handle, subtype, degree, begtim, endtim, inst, ref, avflag, segid, sclkdp, packts, rate, nints, starts): """ Write a type 5 segment to a CK file. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ckw05_c.html :param handle: Handle of an open CK file. :type handle: int :param subtype: CK type 5 subtype code. Can be: 0, 1, 2, 3 see naif docs via link above. :type subtype: int :param degree: Degree of interpolating polynomials. :type degree: int :param begtim: The beginning encoded SCLK of the segment. :type begtim: float :param endtim: The ending encoded SCLK of the segment. :type endtim: float :param inst: The NAIF instrument ID code. :type inst: int :param ref: The reference frame of the segment. :type ref: str :param avflag: True if the segment will contain angular velocity. :type avflag: bool :param segid: Segment identifier. :type segid: str :param sclkdp: Encoded SCLK times. :type sclkdp: Array of floats :param packts: Array of packets. :type packts: Some NxM vector of floats :param rate: Nominal SCLK rate in seconds per tick. :type rate: float :param nints: Number of intervals. :type nints: int :param starts: Encoded SCLK interval start times. :type starts: Array of floats """ handle = ctypes.c_int(handle) subtype = ctypes.c_int(subtype) degree = ctypes.c_int(degree) begtim = ctypes.c_double(begtim) endtim = ctypes.c_double(endtim) inst = ctypes.c_int(inst) ref = stypes.stringToCharP(ref) avflag = ctypes.c_int(avflag) segid = stypes.stringToCharP(segid) n = ctypes.c_int(len(packts)) sclkdp = stypes.toDoubleVector(sclkdp) packts = stypes.toDoubleMatrix(packts) rate = ctypes.c_double(rate) nints = ctypes.c_int(nints) starts = stypes.toDoubleVector(starts) libspice.ckw05_c(handle, subtype, degree, begtim, endtim, inst, ref, avflag, segid, n, sclkdp, packts, rate, nints, starts)
def function[ckw05, parameter[handle, subtype, degree, begtim, endtim, inst, ref, avflag, segid, sclkdp, packts, rate, nints, starts]]: constant[ Write a type 5 segment to a CK file. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ckw05_c.html :param handle: Handle of an open CK file. :type handle: int :param subtype: CK type 5 subtype code. Can be: 0, 1, 2, 3 see naif docs via link above. :type subtype: int :param degree: Degree of interpolating polynomials. :type degree: int :param begtim: The beginning encoded SCLK of the segment. :type begtim: float :param endtim: The ending encoded SCLK of the segment. :type endtim: float :param inst: The NAIF instrument ID code. :type inst: int :param ref: The reference frame of the segment. :type ref: str :param avflag: True if the segment will contain angular velocity. :type avflag: bool :param segid: Segment identifier. :type segid: str :param sclkdp: Encoded SCLK times. :type sclkdp: Array of floats :param packts: Array of packets. :type packts: Some NxM vector of floats :param rate: Nominal SCLK rate in seconds per tick. :type rate: float :param nints: Number of intervals. :type nints: int :param starts: Encoded SCLK interval start times. :type starts: Array of floats ] variable[handle] assign[=] call[name[ctypes].c_int, parameter[name[handle]]] variable[subtype] assign[=] call[name[ctypes].c_int, parameter[name[subtype]]] variable[degree] assign[=] call[name[ctypes].c_int, parameter[name[degree]]] variable[begtim] assign[=] call[name[ctypes].c_double, parameter[name[begtim]]] variable[endtim] assign[=] call[name[ctypes].c_double, parameter[name[endtim]]] variable[inst] assign[=] call[name[ctypes].c_int, parameter[name[inst]]] variable[ref] assign[=] call[name[stypes].stringToCharP, parameter[name[ref]]] variable[avflag] assign[=] call[name[ctypes].c_int, parameter[name[avflag]]] variable[segid] assign[=] call[name[stypes].stringToCharP, parameter[name[segid]]] variable[n] assign[=] call[name[ctypes].c_int, parameter[call[name[len], parameter[name[packts]]]]] variable[sclkdp] assign[=] call[name[stypes].toDoubleVector, parameter[name[sclkdp]]] variable[packts] assign[=] call[name[stypes].toDoubleMatrix, parameter[name[packts]]] variable[rate] assign[=] call[name[ctypes].c_double, parameter[name[rate]]] variable[nints] assign[=] call[name[ctypes].c_int, parameter[name[nints]]] variable[starts] assign[=] call[name[stypes].toDoubleVector, parameter[name[starts]]] call[name[libspice].ckw05_c, parameter[name[handle], name[subtype], name[degree], name[begtim], name[endtim], name[inst], name[ref], name[avflag], name[segid], name[n], name[sclkdp], name[packts], name[rate], name[nints], name[starts]]]
keyword[def] identifier[ckw05] ( identifier[handle] , identifier[subtype] , identifier[degree] , identifier[begtim] , identifier[endtim] , identifier[inst] , identifier[ref] , identifier[avflag] , identifier[segid] , identifier[sclkdp] , identifier[packts] , identifier[rate] , identifier[nints] , identifier[starts] ): literal[string] identifier[handle] = identifier[ctypes] . identifier[c_int] ( identifier[handle] ) identifier[subtype] = identifier[ctypes] . identifier[c_int] ( identifier[subtype] ) identifier[degree] = identifier[ctypes] . identifier[c_int] ( identifier[degree] ) identifier[begtim] = identifier[ctypes] . identifier[c_double] ( identifier[begtim] ) identifier[endtim] = identifier[ctypes] . identifier[c_double] ( identifier[endtim] ) identifier[inst] = identifier[ctypes] . identifier[c_int] ( identifier[inst] ) identifier[ref] = identifier[stypes] . identifier[stringToCharP] ( identifier[ref] ) identifier[avflag] = identifier[ctypes] . identifier[c_int] ( identifier[avflag] ) identifier[segid] = identifier[stypes] . identifier[stringToCharP] ( identifier[segid] ) identifier[n] = identifier[ctypes] . identifier[c_int] ( identifier[len] ( identifier[packts] )) identifier[sclkdp] = identifier[stypes] . identifier[toDoubleVector] ( identifier[sclkdp] ) identifier[packts] = identifier[stypes] . identifier[toDoubleMatrix] ( identifier[packts] ) identifier[rate] = identifier[ctypes] . identifier[c_double] ( identifier[rate] ) identifier[nints] = identifier[ctypes] . identifier[c_int] ( identifier[nints] ) identifier[starts] = identifier[stypes] . identifier[toDoubleVector] ( identifier[starts] ) identifier[libspice] . identifier[ckw05_c] ( identifier[handle] , identifier[subtype] , identifier[degree] , identifier[begtim] , identifier[endtim] , identifier[inst] , identifier[ref] , identifier[avflag] , identifier[segid] , identifier[n] , identifier[sclkdp] , identifier[packts] , identifier[rate] , identifier[nints] , identifier[starts] )
def ckw05(handle, subtype, degree, begtim, endtim, inst, ref, avflag, segid, sclkdp, packts, rate, nints, starts): """ Write a type 5 segment to a CK file. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ckw05_c.html :param handle: Handle of an open CK file. :type handle: int :param subtype: CK type 5 subtype code. Can be: 0, 1, 2, 3 see naif docs via link above. :type subtype: int :param degree: Degree of interpolating polynomials. :type degree: int :param begtim: The beginning encoded SCLK of the segment. :type begtim: float :param endtim: The ending encoded SCLK of the segment. :type endtim: float :param inst: The NAIF instrument ID code. :type inst: int :param ref: The reference frame of the segment. :type ref: str :param avflag: True if the segment will contain angular velocity. :type avflag: bool :param segid: Segment identifier. :type segid: str :param sclkdp: Encoded SCLK times. :type sclkdp: Array of floats :param packts: Array of packets. :type packts: Some NxM vector of floats :param rate: Nominal SCLK rate in seconds per tick. :type rate: float :param nints: Number of intervals. :type nints: int :param starts: Encoded SCLK interval start times. :type starts: Array of floats """ handle = ctypes.c_int(handle) subtype = ctypes.c_int(subtype) degree = ctypes.c_int(degree) begtim = ctypes.c_double(begtim) endtim = ctypes.c_double(endtim) inst = ctypes.c_int(inst) ref = stypes.stringToCharP(ref) avflag = ctypes.c_int(avflag) segid = stypes.stringToCharP(segid) n = ctypes.c_int(len(packts)) sclkdp = stypes.toDoubleVector(sclkdp) packts = stypes.toDoubleMatrix(packts) rate = ctypes.c_double(rate) nints = ctypes.c_int(nints) starts = stypes.toDoubleVector(starts) libspice.ckw05_c(handle, subtype, degree, begtim, endtim, inst, ref, avflag, segid, n, sclkdp, packts, rate, nints, starts)
def combine_heads(self, x): """Combine tensor that has been split. Args: x: A tensor [batch_size, num_heads, length, hidden_size/num_heads] Returns: A tensor with shape [batch_size, length, hidden_size] """ with tf.name_scope("combine_heads"): batch_size = tf.shape(x)[0] length = tf.shape(x)[2] x = tf.transpose(x, [0, 2, 1, 3]) # --> [batch, length, num_heads, depth] return tf.reshape(x, [batch_size, length, self.hidden_size])
def function[combine_heads, parameter[self, x]]: constant[Combine tensor that has been split. Args: x: A tensor [batch_size, num_heads, length, hidden_size/num_heads] Returns: A tensor with shape [batch_size, length, hidden_size] ] with call[name[tf].name_scope, parameter[constant[combine_heads]]] begin[:] variable[batch_size] assign[=] call[call[name[tf].shape, parameter[name[x]]]][constant[0]] variable[length] assign[=] call[call[name[tf].shape, parameter[name[x]]]][constant[2]] variable[x] assign[=] call[name[tf].transpose, parameter[name[x], list[[<ast.Constant object at 0x7da18bcc80d0>, <ast.Constant object at 0x7da18bcca7d0>, <ast.Constant object at 0x7da18bcca920>, <ast.Constant object at 0x7da18bccb400>]]]] return[call[name[tf].reshape, parameter[name[x], list[[<ast.Name object at 0x7da18bcc9510>, <ast.Name object at 0x7da18bcc8ca0>, <ast.Attribute object at 0x7da18bcc83a0>]]]]]
keyword[def] identifier[combine_heads] ( identifier[self] , identifier[x] ): literal[string] keyword[with] identifier[tf] . identifier[name_scope] ( literal[string] ): identifier[batch_size] = identifier[tf] . identifier[shape] ( identifier[x] )[ literal[int] ] identifier[length] = identifier[tf] . identifier[shape] ( identifier[x] )[ literal[int] ] identifier[x] = identifier[tf] . identifier[transpose] ( identifier[x] ,[ literal[int] , literal[int] , literal[int] , literal[int] ]) keyword[return] identifier[tf] . identifier[reshape] ( identifier[x] ,[ identifier[batch_size] , identifier[length] , identifier[self] . identifier[hidden_size] ])
def combine_heads(self, x): """Combine tensor that has been split. Args: x: A tensor [batch_size, num_heads, length, hidden_size/num_heads] Returns: A tensor with shape [batch_size, length, hidden_size] """ with tf.name_scope('combine_heads'): batch_size = tf.shape(x)[0] length = tf.shape(x)[2] x = tf.transpose(x, [0, 2, 1, 3]) # --> [batch, length, num_heads, depth] return tf.reshape(x, [batch_size, length, self.hidden_size]) # depends on [control=['with'], data=[]]
def template_name(self, path, base): """Find out the name of a JS template""" if not base: path = os.path.basename(path) if path == base: base = os.path.dirname(path) name = re.sub(r"^%s[\/\\]?(.*)%s$" % ( re.escape(base), re.escape(settings.TEMPLATE_EXT) ), r"\1", path) return re.sub(r"[\/\\]", settings.TEMPLATE_SEPARATOR, name)
def function[template_name, parameter[self, path, base]]: constant[Find out the name of a JS template] if <ast.UnaryOp object at 0x7da1b080a800> begin[:] variable[path] assign[=] call[name[os].path.basename, parameter[name[path]]] if compare[name[path] equal[==] name[base]] begin[:] variable[base] assign[=] call[name[os].path.dirname, parameter[name[path]]] variable[name] assign[=] call[name[re].sub, parameter[binary_operation[constant[^%s[\/\\]?(.*)%s$] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b080a0b0>, <ast.Call object at 0x7da1b080a410>]]], constant[\1], name[path]]] return[call[name[re].sub, parameter[constant[[\/\\]], name[settings].TEMPLATE_SEPARATOR, name[name]]]]
keyword[def] identifier[template_name] ( identifier[self] , identifier[path] , identifier[base] ): literal[string] keyword[if] keyword[not] identifier[base] : identifier[path] = identifier[os] . identifier[path] . identifier[basename] ( identifier[path] ) keyword[if] identifier[path] == identifier[base] : identifier[base] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[path] ) identifier[name] = identifier[re] . identifier[sub] ( literal[string] %( identifier[re] . identifier[escape] ( identifier[base] ), identifier[re] . identifier[escape] ( identifier[settings] . identifier[TEMPLATE_EXT] ) ), literal[string] , identifier[path] ) keyword[return] identifier[re] . identifier[sub] ( literal[string] , identifier[settings] . identifier[TEMPLATE_SEPARATOR] , identifier[name] )
def template_name(self, path, base): """Find out the name of a JS template""" if not base: path = os.path.basename(path) # depends on [control=['if'], data=[]] if path == base: base = os.path.dirname(path) # depends on [control=['if'], data=['path', 'base']] name = re.sub('^%s[\\/\\\\]?(.*)%s$' % (re.escape(base), re.escape(settings.TEMPLATE_EXT)), '\\1', path) return re.sub('[\\/\\\\]', settings.TEMPLATE_SEPARATOR, name)
def contains_relevant_concept( s: Influence, relevant_concepts: List[str], cutoff=0.7 ) -> bool: """ Returns true if a given Influence statement has a relevant concept, and false otherwise. """ return any( map(lambda c: contains_concept(s, c, cutoff=cutoff), relevant_concepts) )
def function[contains_relevant_concept, parameter[s, relevant_concepts, cutoff]]: constant[ Returns true if a given Influence statement has a relevant concept, and false otherwise. ] return[call[name[any], parameter[call[name[map], parameter[<ast.Lambda object at 0x7da20c6a8b80>, name[relevant_concepts]]]]]]
keyword[def] identifier[contains_relevant_concept] ( identifier[s] : identifier[Influence] , identifier[relevant_concepts] : identifier[List] [ identifier[str] ], identifier[cutoff] = literal[int] )-> identifier[bool] : literal[string] keyword[return] identifier[any] ( identifier[map] ( keyword[lambda] identifier[c] : identifier[contains_concept] ( identifier[s] , identifier[c] , identifier[cutoff] = identifier[cutoff] ), identifier[relevant_concepts] ) )
def contains_relevant_concept(s: Influence, relevant_concepts: List[str], cutoff=0.7) -> bool: """ Returns true if a given Influence statement has a relevant concept, and false otherwise. """ return any(map(lambda c: contains_concept(s, c, cutoff=cutoff), relevant_concepts))
def print_brokers(cluster_config, brokers): """Print the list of brokers that will be restarted. :param cluster_config: the cluster configuration :type cluster_config: map :param brokers: the brokers that will be restarted :type brokers: map of broker ids and host names """ print("Will restart the following brokers in {0}:".format(cluster_config.name)) for id, host in brokers: print(" {0}: {1}".format(id, host))
def function[print_brokers, parameter[cluster_config, brokers]]: constant[Print the list of brokers that will be restarted. :param cluster_config: the cluster configuration :type cluster_config: map :param brokers: the brokers that will be restarted :type brokers: map of broker ids and host names ] call[name[print], parameter[call[constant[Will restart the following brokers in {0}:].format, parameter[name[cluster_config].name]]]] for taget[tuple[[<ast.Name object at 0x7da1b077a020>, <ast.Name object at 0x7da1b077a0b0>]]] in starred[name[brokers]] begin[:] call[name[print], parameter[call[constant[ {0}: {1}].format, parameter[name[id], name[host]]]]]
keyword[def] identifier[print_brokers] ( identifier[cluster_config] , identifier[brokers] ): literal[string] identifier[print] ( literal[string] . identifier[format] ( identifier[cluster_config] . identifier[name] )) keyword[for] identifier[id] , identifier[host] keyword[in] identifier[brokers] : identifier[print] ( literal[string] . identifier[format] ( identifier[id] , identifier[host] ))
def print_brokers(cluster_config, brokers): """Print the list of brokers that will be restarted. :param cluster_config: the cluster configuration :type cluster_config: map :param brokers: the brokers that will be restarted :type brokers: map of broker ids and host names """ print('Will restart the following brokers in {0}:'.format(cluster_config.name)) for (id, host) in brokers: print(' {0}: {1}'.format(id, host)) # depends on [control=['for'], data=[]]
def compute_output_shape(self, input_shape): """Computes the output shape of the layer. Args: input_shape: Shape tuple (tuple of integers) or list of shape tuples (one per output tensor of the layer). Shape tuples can include None for free dimensions, instead of an integer. Returns: output_shape: A tuple representing the output shape. """ input_shape = tf.TensorShape(input_shape).as_list() if self.data_format == 'channels_last': space = input_shape[1:-1] new_space = [] for i in range(len(space)): new_dim = tf_layers_util.conv_output_length( space[i], self.kernel_size[i], padding=self.padding, stride=self.strides[i], dilation=self.dilation_rate[i]) new_space.append(new_dim) return tf.TensorShape([input_shape[0]] + new_space + [self.filters]) else: space = input_shape[2:] new_space = [] for i in range(len(space)): new_dim = tf_layers_util.conv_output_length( space[i], self.kernel_size[i], padding=self.padding, stride=self.strides[i], dilation=self.dilation_rate[i]) new_space.append(new_dim) return tf.TensorShape([input_shape[0], self.filters] + new_space)
def function[compute_output_shape, parameter[self, input_shape]]: constant[Computes the output shape of the layer. Args: input_shape: Shape tuple (tuple of integers) or list of shape tuples (one per output tensor of the layer). Shape tuples can include None for free dimensions, instead of an integer. Returns: output_shape: A tuple representing the output shape. ] variable[input_shape] assign[=] call[call[name[tf].TensorShape, parameter[name[input_shape]]].as_list, parameter[]] if compare[name[self].data_format equal[==] constant[channels_last]] begin[:] variable[space] assign[=] call[name[input_shape]][<ast.Slice object at 0x7da1b05be7a0>] variable[new_space] assign[=] list[[]] for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[space]]]]]] begin[:] variable[new_dim] assign[=] call[name[tf_layers_util].conv_output_length, parameter[call[name[space]][name[i]], call[name[self].kernel_size][name[i]]]] call[name[new_space].append, parameter[name[new_dim]]] return[call[name[tf].TensorShape, parameter[binary_operation[binary_operation[list[[<ast.Subscript object at 0x7da1b0354190>]] + name[new_space]] + list[[<ast.Attribute object at 0x7da1b0354070>]]]]]]
keyword[def] identifier[compute_output_shape] ( identifier[self] , identifier[input_shape] ): literal[string] identifier[input_shape] = identifier[tf] . identifier[TensorShape] ( identifier[input_shape] ). identifier[as_list] () keyword[if] identifier[self] . identifier[data_format] == literal[string] : identifier[space] = identifier[input_shape] [ literal[int] :- literal[int] ] identifier[new_space] =[] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[space] )): identifier[new_dim] = identifier[tf_layers_util] . identifier[conv_output_length] ( identifier[space] [ identifier[i] ], identifier[self] . identifier[kernel_size] [ identifier[i] ], identifier[padding] = identifier[self] . identifier[padding] , identifier[stride] = identifier[self] . identifier[strides] [ identifier[i] ], identifier[dilation] = identifier[self] . identifier[dilation_rate] [ identifier[i] ]) identifier[new_space] . identifier[append] ( identifier[new_dim] ) keyword[return] identifier[tf] . identifier[TensorShape] ([ identifier[input_shape] [ literal[int] ]]+ identifier[new_space] +[ identifier[self] . identifier[filters] ]) keyword[else] : identifier[space] = identifier[input_shape] [ literal[int] :] identifier[new_space] =[] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[space] )): identifier[new_dim] = identifier[tf_layers_util] . identifier[conv_output_length] ( identifier[space] [ identifier[i] ], identifier[self] . identifier[kernel_size] [ identifier[i] ], identifier[padding] = identifier[self] . identifier[padding] , identifier[stride] = identifier[self] . identifier[strides] [ identifier[i] ], identifier[dilation] = identifier[self] . identifier[dilation_rate] [ identifier[i] ]) identifier[new_space] . identifier[append] ( identifier[new_dim] ) keyword[return] identifier[tf] . identifier[TensorShape] ([ identifier[input_shape] [ literal[int] ], identifier[self] . identifier[filters] ]+ identifier[new_space] )
def compute_output_shape(self, input_shape): """Computes the output shape of the layer. Args: input_shape: Shape tuple (tuple of integers) or list of shape tuples (one per output tensor of the layer). Shape tuples can include None for free dimensions, instead of an integer. Returns: output_shape: A tuple representing the output shape. """ input_shape = tf.TensorShape(input_shape).as_list() if self.data_format == 'channels_last': space = input_shape[1:-1] new_space = [] for i in range(len(space)): new_dim = tf_layers_util.conv_output_length(space[i], self.kernel_size[i], padding=self.padding, stride=self.strides[i], dilation=self.dilation_rate[i]) new_space.append(new_dim) # depends on [control=['for'], data=['i']] return tf.TensorShape([input_shape[0]] + new_space + [self.filters]) # depends on [control=['if'], data=[]] else: space = input_shape[2:] new_space = [] for i in range(len(space)): new_dim = tf_layers_util.conv_output_length(space[i], self.kernel_size[i], padding=self.padding, stride=self.strides[i], dilation=self.dilation_rate[i]) new_space.append(new_dim) # depends on [control=['for'], data=['i']] return tf.TensorShape([input_shape[0], self.filters] + new_space)
def ticket_field_options(self, field_id, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/ticket_fields#list-ticket-field-options" api_path = "/api/v2/ticket_fields/{field_id}/options.json" api_path = api_path.format(field_id=field_id) return self.call(api_path, **kwargs)
def function[ticket_field_options, parameter[self, field_id]]: constant[https://developer.zendesk.com/rest_api/docs/core/ticket_fields#list-ticket-field-options] variable[api_path] assign[=] constant[/api/v2/ticket_fields/{field_id}/options.json] variable[api_path] assign[=] call[name[api_path].format, parameter[]] return[call[name[self].call, parameter[name[api_path]]]]
keyword[def] identifier[ticket_field_options] ( identifier[self] , identifier[field_id] ,** identifier[kwargs] ): literal[string] identifier[api_path] = literal[string] identifier[api_path] = identifier[api_path] . identifier[format] ( identifier[field_id] = identifier[field_id] ) keyword[return] identifier[self] . identifier[call] ( identifier[api_path] ,** identifier[kwargs] )
def ticket_field_options(self, field_id, **kwargs): """https://developer.zendesk.com/rest_api/docs/core/ticket_fields#list-ticket-field-options""" api_path = '/api/v2/ticket_fields/{field_id}/options.json' api_path = api_path.format(field_id=field_id) return self.call(api_path, **kwargs)
def all_qubits(self) -> FrozenSet[ops.Qid]: """Returns the qubits acted upon by Operations in this circuit.""" return frozenset(q for m in self._moments for q in m.qubits)
def function[all_qubits, parameter[self]]: constant[Returns the qubits acted upon by Operations in this circuit.] return[call[name[frozenset], parameter[<ast.GeneratorExp object at 0x7da1b1cec220>]]]
keyword[def] identifier[all_qubits] ( identifier[self] )-> identifier[FrozenSet] [ identifier[ops] . identifier[Qid] ]: literal[string] keyword[return] identifier[frozenset] ( identifier[q] keyword[for] identifier[m] keyword[in] identifier[self] . identifier[_moments] keyword[for] identifier[q] keyword[in] identifier[m] . identifier[qubits] )
def all_qubits(self) -> FrozenSet[ops.Qid]: """Returns the qubits acted upon by Operations in this circuit.""" return frozenset((q for m in self._moments for q in m.qubits))
def process_wait_close(process, timeout=0): """ Pauses script execution until a given process does not exist. :param process: :param timeout: :return: """ ret = AUTO_IT.AU3_ProcessWaitClose(LPCWSTR(process), INT(timeout)) return ret
def function[process_wait_close, parameter[process, timeout]]: constant[ Pauses script execution until a given process does not exist. :param process: :param timeout: :return: ] variable[ret] assign[=] call[name[AUTO_IT].AU3_ProcessWaitClose, parameter[call[name[LPCWSTR], parameter[name[process]]], call[name[INT], parameter[name[timeout]]]]] return[name[ret]]
keyword[def] identifier[process_wait_close] ( identifier[process] , identifier[timeout] = literal[int] ): literal[string] identifier[ret] = identifier[AUTO_IT] . identifier[AU3_ProcessWaitClose] ( identifier[LPCWSTR] ( identifier[process] ), identifier[INT] ( identifier[timeout] )) keyword[return] identifier[ret]
def process_wait_close(process, timeout=0): """ Pauses script execution until a given process does not exist. :param process: :param timeout: :return: """ ret = AUTO_IT.AU3_ProcessWaitClose(LPCWSTR(process), INT(timeout)) return ret
def merge_dicts(a, b, path=None): """ Merge dict :b: into dict :a: Code snippet from http://stackoverflow.com/a/7205107 """ if path is None: path = [] for key in b: if key in a: if isinstance(a[key], dict) and isinstance(b[key], dict): merge_dicts(a[key], b[key], path + [str(key)]) elif a[key] == b[key]: pass # same leaf value else: raise Exception( 'Conflict at %s' % '.'.join(path + [str(key)])) else: a[key] = b[key] return a
def function[merge_dicts, parameter[a, b, path]]: constant[ Merge dict :b: into dict :a: Code snippet from http://stackoverflow.com/a/7205107 ] if compare[name[path] is constant[None]] begin[:] variable[path] assign[=] list[[]] for taget[name[key]] in starred[name[b]] begin[:] if compare[name[key] in name[a]] begin[:] if <ast.BoolOp object at 0x7da20e956920> begin[:] call[name[merge_dicts], parameter[call[name[a]][name[key]], call[name[b]][name[key]], binary_operation[name[path] + list[[<ast.Call object at 0x7da20e954d00>]]]]] return[name[a]]
keyword[def] identifier[merge_dicts] ( identifier[a] , identifier[b] , identifier[path] = keyword[None] ): literal[string] keyword[if] identifier[path] keyword[is] keyword[None] : identifier[path] =[] keyword[for] identifier[key] keyword[in] identifier[b] : keyword[if] identifier[key] keyword[in] identifier[a] : keyword[if] identifier[isinstance] ( identifier[a] [ identifier[key] ], identifier[dict] ) keyword[and] identifier[isinstance] ( identifier[b] [ identifier[key] ], identifier[dict] ): identifier[merge_dicts] ( identifier[a] [ identifier[key] ], identifier[b] [ identifier[key] ], identifier[path] +[ identifier[str] ( identifier[key] )]) keyword[elif] identifier[a] [ identifier[key] ]== identifier[b] [ identifier[key] ]: keyword[pass] keyword[else] : keyword[raise] identifier[Exception] ( literal[string] % literal[string] . identifier[join] ( identifier[path] +[ identifier[str] ( identifier[key] )])) keyword[else] : identifier[a] [ identifier[key] ]= identifier[b] [ identifier[key] ] keyword[return] identifier[a]
def merge_dicts(a, b, path=None): """ Merge dict :b: into dict :a: Code snippet from http://stackoverflow.com/a/7205107 """ if path is None: path = [] # depends on [control=['if'], data=['path']] for key in b: if key in a: if isinstance(a[key], dict) and isinstance(b[key], dict): merge_dicts(a[key], b[key], path + [str(key)]) # depends on [control=['if'], data=[]] elif a[key] == b[key]: pass # same leaf value # depends on [control=['if'], data=[]] else: raise Exception('Conflict at %s' % '.'.join(path + [str(key)])) # depends on [control=['if'], data=['key', 'a']] else: a[key] = b[key] # depends on [control=['for'], data=['key']] return a
def prettify(self, elem): """Parse xml elements for pretty printing""" from xml.etree import ElementTree from re import sub rawString = ElementTree.tostring(elem, 'utf-8') parsedString = sub(r'(?=<[^/].*>)', '\n', rawString) # Adds newline after each closing tag return parsedString[1:]
def function[prettify, parameter[self, elem]]: constant[Parse xml elements for pretty printing] from relative_module[xml.etree] import module[ElementTree] from relative_module[re] import module[sub] variable[rawString] assign[=] call[name[ElementTree].tostring, parameter[name[elem], constant[utf-8]]] variable[parsedString] assign[=] call[name[sub], parameter[constant[(?=<[^/].*>)], constant[ ], name[rawString]]] return[call[name[parsedString]][<ast.Slice object at 0x7da2054a77f0>]]
keyword[def] identifier[prettify] ( identifier[self] , identifier[elem] ): literal[string] keyword[from] identifier[xml] . identifier[etree] keyword[import] identifier[ElementTree] keyword[from] identifier[re] keyword[import] identifier[sub] identifier[rawString] = identifier[ElementTree] . identifier[tostring] ( identifier[elem] , literal[string] ) identifier[parsedString] = identifier[sub] ( literal[string] , literal[string] , identifier[rawString] ) keyword[return] identifier[parsedString] [ literal[int] :]
def prettify(self, elem): """Parse xml elements for pretty printing""" from xml.etree import ElementTree from re import sub rawString = ElementTree.tostring(elem, 'utf-8') parsedString = sub('(?=<[^/].*>)', '\n', rawString) # Adds newline after each closing tag return parsedString[1:]
def pointcut(self, value): """Change of pointcut. """ pointcut = getattr(self, Interceptor.POINTCUT) # for all targets for target in self.targets: # unweave old advices unweave(target, pointcut=pointcut, advices=self.intercepts) # weave new advices with new pointcut weave(target, pointcut=value, advices=self.intercepts) # and save new pointcut setattr(self, Interceptor._POINTCUT, value)
def function[pointcut, parameter[self, value]]: constant[Change of pointcut. ] variable[pointcut] assign[=] call[name[getattr], parameter[name[self], name[Interceptor].POINTCUT]] for taget[name[target]] in starred[name[self].targets] begin[:] call[name[unweave], parameter[name[target]]] call[name[weave], parameter[name[target]]] call[name[setattr], parameter[name[self], name[Interceptor]._POINTCUT, name[value]]]
keyword[def] identifier[pointcut] ( identifier[self] , identifier[value] ): literal[string] identifier[pointcut] = identifier[getattr] ( identifier[self] , identifier[Interceptor] . identifier[POINTCUT] ) keyword[for] identifier[target] keyword[in] identifier[self] . identifier[targets] : identifier[unweave] ( identifier[target] , identifier[pointcut] = identifier[pointcut] , identifier[advices] = identifier[self] . identifier[intercepts] ) identifier[weave] ( identifier[target] , identifier[pointcut] = identifier[value] , identifier[advices] = identifier[self] . identifier[intercepts] ) identifier[setattr] ( identifier[self] , identifier[Interceptor] . identifier[_POINTCUT] , identifier[value] )
def pointcut(self, value): """Change of pointcut. """ pointcut = getattr(self, Interceptor.POINTCUT) # for all targets for target in self.targets: # unweave old advices unweave(target, pointcut=pointcut, advices=self.intercepts) # weave new advices with new pointcut weave(target, pointcut=value, advices=self.intercepts) # depends on [control=['for'], data=['target']] # and save new pointcut setattr(self, Interceptor._POINTCUT, value)
def log_exception(exc_info=None, stream=None): """Log the 'exc_info' tuple in the server log.""" exc_info = exc_info or sys.exc_info() stream = stream or sys.stderr try: from traceback import print_exception print_exception(exc_info[0], exc_info[1], exc_info[2], None, stream) stream.flush() finally: exc_info = None
def function[log_exception, parameter[exc_info, stream]]: constant[Log the 'exc_info' tuple in the server log.] variable[exc_info] assign[=] <ast.BoolOp object at 0x7da20c6aace0> variable[stream] assign[=] <ast.BoolOp object at 0x7da20c6a8040> <ast.Try object at 0x7da20c6a9600>
keyword[def] identifier[log_exception] ( identifier[exc_info] = keyword[None] , identifier[stream] = keyword[None] ): literal[string] identifier[exc_info] = identifier[exc_info] keyword[or] identifier[sys] . identifier[exc_info] () identifier[stream] = identifier[stream] keyword[or] identifier[sys] . identifier[stderr] keyword[try] : keyword[from] identifier[traceback] keyword[import] identifier[print_exception] identifier[print_exception] ( identifier[exc_info] [ literal[int] ], identifier[exc_info] [ literal[int] ], identifier[exc_info] [ literal[int] ], keyword[None] , identifier[stream] ) identifier[stream] . identifier[flush] () keyword[finally] : identifier[exc_info] = keyword[None]
def log_exception(exc_info=None, stream=None): """Log the 'exc_info' tuple in the server log.""" exc_info = exc_info or sys.exc_info() stream = stream or sys.stderr try: from traceback import print_exception print_exception(exc_info[0], exc_info[1], exc_info[2], None, stream) stream.flush() # depends on [control=['try'], data=[]] finally: exc_info = None
def parse_response(self, response): """ Parse XMLRPC response """ parser, unmarshaller = self.getparser() parser.feed(response.text.encode('utf-8')) parser.close() return unmarshaller.close()
def function[parse_response, parameter[self, response]]: constant[ Parse XMLRPC response ] <ast.Tuple object at 0x7da1b0dbf490> assign[=] call[name[self].getparser, parameter[]] call[name[parser].feed, parameter[call[name[response].text.encode, parameter[constant[utf-8]]]]] call[name[parser].close, parameter[]] return[call[name[unmarshaller].close, parameter[]]]
keyword[def] identifier[parse_response] ( identifier[self] , identifier[response] ): literal[string] identifier[parser] , identifier[unmarshaller] = identifier[self] . identifier[getparser] () identifier[parser] . identifier[feed] ( identifier[response] . identifier[text] . identifier[encode] ( literal[string] )) identifier[parser] . identifier[close] () keyword[return] identifier[unmarshaller] . identifier[close] ()
def parse_response(self, response): """ Parse XMLRPC response """ (parser, unmarshaller) = self.getparser() parser.feed(response.text.encode('utf-8')) parser.close() return unmarshaller.close()
def _construct_approximation(self, basis_kwargs, coefs_list): """ Construct a collection of derivatives and functions that approximate the solution to the boundary value problem. Parameters ---------- basis_kwargs : dict(str: ) coefs_list : list(numpy.ndarray) Returns ------- basis_derivs : list(function) basis_funcs : list(function) """ derivs = self._construct_derivatives(coefs_list, **basis_kwargs) funcs = self._construct_functions(coefs_list, **basis_kwargs) return derivs, funcs
def function[_construct_approximation, parameter[self, basis_kwargs, coefs_list]]: constant[ Construct a collection of derivatives and functions that approximate the solution to the boundary value problem. Parameters ---------- basis_kwargs : dict(str: ) coefs_list : list(numpy.ndarray) Returns ------- basis_derivs : list(function) basis_funcs : list(function) ] variable[derivs] assign[=] call[name[self]._construct_derivatives, parameter[name[coefs_list]]] variable[funcs] assign[=] call[name[self]._construct_functions, parameter[name[coefs_list]]] return[tuple[[<ast.Name object at 0x7da1b252b0a0>, <ast.Name object at 0x7da1b2529150>]]]
keyword[def] identifier[_construct_approximation] ( identifier[self] , identifier[basis_kwargs] , identifier[coefs_list] ): literal[string] identifier[derivs] = identifier[self] . identifier[_construct_derivatives] ( identifier[coefs_list] ,** identifier[basis_kwargs] ) identifier[funcs] = identifier[self] . identifier[_construct_functions] ( identifier[coefs_list] ,** identifier[basis_kwargs] ) keyword[return] identifier[derivs] , identifier[funcs]
def _construct_approximation(self, basis_kwargs, coefs_list): """ Construct a collection of derivatives and functions that approximate the solution to the boundary value problem. Parameters ---------- basis_kwargs : dict(str: ) coefs_list : list(numpy.ndarray) Returns ------- basis_derivs : list(function) basis_funcs : list(function) """ derivs = self._construct_derivatives(coefs_list, **basis_kwargs) funcs = self._construct_functions(coefs_list, **basis_kwargs) return (derivs, funcs)
def computeSVD(self, k, computeU=False, rCond=1e-9): """ Computes the singular value decomposition of the RowMatrix. The given row matrix A of dimension (m X n) is decomposed into U * s * V'T where * U: (m X k) (left singular vectors) is a RowMatrix whose columns are the eigenvectors of (A X A') * s: DenseVector consisting of square root of the eigenvalues (singular values) in descending order. * v: (n X k) (right singular vectors) is a Matrix whose columns are the eigenvectors of (A' X A) For more specific details on implementation, please refer the Scala documentation. :param k: Number of leading singular values to keep (`0 < k <= n`). It might return less than k if there are numerically zero singular values or there are not enough Ritz values converged before the maximum number of Arnoldi update iterations is reached (in case that matrix A is ill-conditioned). :param computeU: Whether or not to compute U. If set to be True, then U is computed by A * V * s^-1 :param rCond: Reciprocal condition number. All singular values smaller than rCond * s[0] are treated as zero where s[0] is the largest singular value. :returns: :py:class:`SingularValueDecomposition` >>> rows = sc.parallelize([[3, 1, 1], [-1, 3, 1]]) >>> rm = RowMatrix(rows) >>> svd_model = rm.computeSVD(2, True) >>> svd_model.U.rows.collect() [DenseVector([-0.7071, 0.7071]), DenseVector([-0.7071, -0.7071])] >>> svd_model.s DenseVector([3.4641, 3.1623]) >>> svd_model.V DenseMatrix(3, 2, [-0.4082, -0.8165, -0.4082, 0.8944, -0.4472, 0.0], 0) """ j_model = self._java_matrix_wrapper.call( "computeSVD", int(k), bool(computeU), float(rCond)) return SingularValueDecomposition(j_model)
def function[computeSVD, parameter[self, k, computeU, rCond]]: constant[ Computes the singular value decomposition of the RowMatrix. The given row matrix A of dimension (m X n) is decomposed into U * s * V'T where * U: (m X k) (left singular vectors) is a RowMatrix whose columns are the eigenvectors of (A X A') * s: DenseVector consisting of square root of the eigenvalues (singular values) in descending order. * v: (n X k) (right singular vectors) is a Matrix whose columns are the eigenvectors of (A' X A) For more specific details on implementation, please refer the Scala documentation. :param k: Number of leading singular values to keep (`0 < k <= n`). It might return less than k if there are numerically zero singular values or there are not enough Ritz values converged before the maximum number of Arnoldi update iterations is reached (in case that matrix A is ill-conditioned). :param computeU: Whether or not to compute U. If set to be True, then U is computed by A * V * s^-1 :param rCond: Reciprocal condition number. All singular values smaller than rCond * s[0] are treated as zero where s[0] is the largest singular value. :returns: :py:class:`SingularValueDecomposition` >>> rows = sc.parallelize([[3, 1, 1], [-1, 3, 1]]) >>> rm = RowMatrix(rows) >>> svd_model = rm.computeSVD(2, True) >>> svd_model.U.rows.collect() [DenseVector([-0.7071, 0.7071]), DenseVector([-0.7071, -0.7071])] >>> svd_model.s DenseVector([3.4641, 3.1623]) >>> svd_model.V DenseMatrix(3, 2, [-0.4082, -0.8165, -0.4082, 0.8944, -0.4472, 0.0], 0) ] variable[j_model] assign[=] call[name[self]._java_matrix_wrapper.call, parameter[constant[computeSVD], call[name[int], parameter[name[k]]], call[name[bool], parameter[name[computeU]]], call[name[float], parameter[name[rCond]]]]] return[call[name[SingularValueDecomposition], parameter[name[j_model]]]]
keyword[def] identifier[computeSVD] ( identifier[self] , identifier[k] , identifier[computeU] = keyword[False] , identifier[rCond] = literal[int] ): literal[string] identifier[j_model] = identifier[self] . identifier[_java_matrix_wrapper] . identifier[call] ( literal[string] , identifier[int] ( identifier[k] ), identifier[bool] ( identifier[computeU] ), identifier[float] ( identifier[rCond] )) keyword[return] identifier[SingularValueDecomposition] ( identifier[j_model] )
def computeSVD(self, k, computeU=False, rCond=1e-09): """ Computes the singular value decomposition of the RowMatrix. The given row matrix A of dimension (m X n) is decomposed into U * s * V'T where * U: (m X k) (left singular vectors) is a RowMatrix whose columns are the eigenvectors of (A X A') * s: DenseVector consisting of square root of the eigenvalues (singular values) in descending order. * v: (n X k) (right singular vectors) is a Matrix whose columns are the eigenvectors of (A' X A) For more specific details on implementation, please refer the Scala documentation. :param k: Number of leading singular values to keep (`0 < k <= n`). It might return less than k if there are numerically zero singular values or there are not enough Ritz values converged before the maximum number of Arnoldi update iterations is reached (in case that matrix A is ill-conditioned). :param computeU: Whether or not to compute U. If set to be True, then U is computed by A * V * s^-1 :param rCond: Reciprocal condition number. All singular values smaller than rCond * s[0] are treated as zero where s[0] is the largest singular value. :returns: :py:class:`SingularValueDecomposition` >>> rows = sc.parallelize([[3, 1, 1], [-1, 3, 1]]) >>> rm = RowMatrix(rows) >>> svd_model = rm.computeSVD(2, True) >>> svd_model.U.rows.collect() [DenseVector([-0.7071, 0.7071]), DenseVector([-0.7071, -0.7071])] >>> svd_model.s DenseVector([3.4641, 3.1623]) >>> svd_model.V DenseMatrix(3, 2, [-0.4082, -0.8165, -0.4082, 0.8944, -0.4472, 0.0], 0) """ j_model = self._java_matrix_wrapper.call('computeSVD', int(k), bool(computeU), float(rCond)) return SingularValueDecomposition(j_model)
def MakeDynamicPotentialFunc(kBT_Gamma, density, SpringPotnlFunc): """ Creates the function that calculates the potential given the position (in volts) and the radius of the particle. Parameters ---------- kBT_Gamma : float Value of kB*T/Gamma density : float density of the nanoparticle SpringPotnlFunc : function Function which takes the value of position (in volts) and returns the spring potential Returns ------- PotentialFunc : function function that calculates the potential given the position (in volts) and the radius of the particle. """ def PotentialFunc(xdata, Radius): """ calculates the potential given the position (in volts) and the radius of the particle. Parameters ---------- xdata : ndarray Positon data (in volts) Radius : float Radius in units of nm Returns ------- Potential : ndarray Dynamical Spring Potential at positions given by xdata """ mass = ((4/3)*np.pi*((Radius*10**-9)**3))*density yfit=(kBT_Gamma/mass) Y = yfit*SpringPotnlFunc(xdata) return Y return PotentialFunc
def function[MakeDynamicPotentialFunc, parameter[kBT_Gamma, density, SpringPotnlFunc]]: constant[ Creates the function that calculates the potential given the position (in volts) and the radius of the particle. Parameters ---------- kBT_Gamma : float Value of kB*T/Gamma density : float density of the nanoparticle SpringPotnlFunc : function Function which takes the value of position (in volts) and returns the spring potential Returns ------- PotentialFunc : function function that calculates the potential given the position (in volts) and the radius of the particle. ] def function[PotentialFunc, parameter[xdata, Radius]]: constant[ calculates the potential given the position (in volts) and the radius of the particle. Parameters ---------- xdata : ndarray Positon data (in volts) Radius : float Radius in units of nm Returns ------- Potential : ndarray Dynamical Spring Potential at positions given by xdata ] variable[mass] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[constant[4] / constant[3]] * name[np].pi] * binary_operation[binary_operation[name[Radius] * binary_operation[constant[10] ** <ast.UnaryOp object at 0x7da1b2726500>]] ** constant[3]]] * name[density]] variable[yfit] assign[=] binary_operation[name[kBT_Gamma] / name[mass]] variable[Y] assign[=] binary_operation[name[yfit] * call[name[SpringPotnlFunc], parameter[name[xdata]]]] return[name[Y]] return[name[PotentialFunc]]
keyword[def] identifier[MakeDynamicPotentialFunc] ( identifier[kBT_Gamma] , identifier[density] , identifier[SpringPotnlFunc] ): literal[string] keyword[def] identifier[PotentialFunc] ( identifier[xdata] , identifier[Radius] ): literal[string] identifier[mass] =(( literal[int] / literal[int] )* identifier[np] . identifier[pi] *(( identifier[Radius] * literal[int] **- literal[int] )** literal[int] ))* identifier[density] identifier[yfit] =( identifier[kBT_Gamma] / identifier[mass] ) identifier[Y] = identifier[yfit] * identifier[SpringPotnlFunc] ( identifier[xdata] ) keyword[return] identifier[Y] keyword[return] identifier[PotentialFunc]
def MakeDynamicPotentialFunc(kBT_Gamma, density, SpringPotnlFunc): """ Creates the function that calculates the potential given the position (in volts) and the radius of the particle. Parameters ---------- kBT_Gamma : float Value of kB*T/Gamma density : float density of the nanoparticle SpringPotnlFunc : function Function which takes the value of position (in volts) and returns the spring potential Returns ------- PotentialFunc : function function that calculates the potential given the position (in volts) and the radius of the particle. """ def PotentialFunc(xdata, Radius): """ calculates the potential given the position (in volts) and the radius of the particle. Parameters ---------- xdata : ndarray Positon data (in volts) Radius : float Radius in units of nm Returns ------- Potential : ndarray Dynamical Spring Potential at positions given by xdata """ mass = 4 / 3 * np.pi * (Radius * 10 ** (-9)) ** 3 * density yfit = kBT_Gamma / mass Y = yfit * SpringPotnlFunc(xdata) return Y return PotentialFunc
def patch(self, client=None): """Sends all changed properties in a PATCH request. Updates the ``_properties`` with the response from the backend. If :attr:`user_project` is set, bills the API request to that project. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: the client to use. If not passed, falls back to the ``client`` stored on the current object. """ client = self._require_client(client) query_params = self._query_params # Pass '?projection=full' here because 'PATCH' documented not # to work properly w/ 'noAcl'. query_params["projection"] = "full" update_properties = {key: self._properties[key] for key in self._changes} # Make the API call. api_response = client._connection.api_request( method="PATCH", path=self.path, data=update_properties, query_params=query_params, _target_object=self, ) self._set_properties(api_response)
def function[patch, parameter[self, client]]: constant[Sends all changed properties in a PATCH request. Updates the ``_properties`` with the response from the backend. If :attr:`user_project` is set, bills the API request to that project. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: the client to use. If not passed, falls back to the ``client`` stored on the current object. ] variable[client] assign[=] call[name[self]._require_client, parameter[name[client]]] variable[query_params] assign[=] name[self]._query_params call[name[query_params]][constant[projection]] assign[=] constant[full] variable[update_properties] assign[=] <ast.DictComp object at 0x7da18ede7e20> variable[api_response] assign[=] call[name[client]._connection.api_request, parameter[]] call[name[self]._set_properties, parameter[name[api_response]]]
keyword[def] identifier[patch] ( identifier[self] , identifier[client] = keyword[None] ): literal[string] identifier[client] = identifier[self] . identifier[_require_client] ( identifier[client] ) identifier[query_params] = identifier[self] . identifier[_query_params] identifier[query_params] [ literal[string] ]= literal[string] identifier[update_properties] ={ identifier[key] : identifier[self] . identifier[_properties] [ identifier[key] ] keyword[for] identifier[key] keyword[in] identifier[self] . identifier[_changes] } identifier[api_response] = identifier[client] . identifier[_connection] . identifier[api_request] ( identifier[method] = literal[string] , identifier[path] = identifier[self] . identifier[path] , identifier[data] = identifier[update_properties] , identifier[query_params] = identifier[query_params] , identifier[_target_object] = identifier[self] , ) identifier[self] . identifier[_set_properties] ( identifier[api_response] )
def patch(self, client=None): """Sends all changed properties in a PATCH request. Updates the ``_properties`` with the response from the backend. If :attr:`user_project` is set, bills the API request to that project. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: the client to use. If not passed, falls back to the ``client`` stored on the current object. """ client = self._require_client(client) query_params = self._query_params # Pass '?projection=full' here because 'PATCH' documented not # to work properly w/ 'noAcl'. query_params['projection'] = 'full' update_properties = {key: self._properties[key] for key in self._changes} # Make the API call. api_response = client._connection.api_request(method='PATCH', path=self.path, data=update_properties, query_params=query_params, _target_object=self) self._set_properties(api_response)
def fleet_ttb(unit_type, quantity, factories, is_techno=False, is_dict=False, stasis_enabled=False): """ Calculate the time taken to construct a given fleet """ unit_weights = { UNIT_SCOUT: 1, UNIT_DESTROYER: 13, UNIT_BOMBER: 10, UNIT_CRUISER: 85, UNIT_STARBASE: 1, } govt_weight = 80 if is_dict else 100 prod_weight = 85 if is_techno else 100 weighted_qty = unit_weights[unit_type] * quantity ttb = (weighted_qty * govt_weight * prod_weight) * (2 * factories) # TTB is 66% longer with stasis enabled return ttb + (ttb * 0.66) if stasis_enabled else ttb
def function[fleet_ttb, parameter[unit_type, quantity, factories, is_techno, is_dict, stasis_enabled]]: constant[ Calculate the time taken to construct a given fleet ] variable[unit_weights] assign[=] dictionary[[<ast.Name object at 0x7da20e9b1780>, <ast.Name object at 0x7da20e9b1d50>, <ast.Name object at 0x7da20e9b14b0>, <ast.Name object at 0x7da20e9b1930>, <ast.Name object at 0x7da20e9b2680>], [<ast.Constant object at 0x7da20e9b34c0>, <ast.Constant object at 0x7da20e9b3f10>, <ast.Constant object at 0x7da20e9b39a0>, <ast.Constant object at 0x7da20e9b0430>, <ast.Constant object at 0x7da20e9b0f10>]] variable[govt_weight] assign[=] <ast.IfExp object at 0x7da20c7cb850> variable[prod_weight] assign[=] <ast.IfExp object at 0x7da20c7cb610> variable[weighted_qty] assign[=] binary_operation[call[name[unit_weights]][name[unit_type]] * name[quantity]] variable[ttb] assign[=] binary_operation[binary_operation[binary_operation[name[weighted_qty] * name[govt_weight]] * name[prod_weight]] * binary_operation[constant[2] * name[factories]]] return[<ast.IfExp object at 0x7da20c7c83a0>]
keyword[def] identifier[fleet_ttb] ( identifier[unit_type] , identifier[quantity] , identifier[factories] , identifier[is_techno] = keyword[False] , identifier[is_dict] = keyword[False] , identifier[stasis_enabled] = keyword[False] ): literal[string] identifier[unit_weights] ={ identifier[UNIT_SCOUT] : literal[int] , identifier[UNIT_DESTROYER] : literal[int] , identifier[UNIT_BOMBER] : literal[int] , identifier[UNIT_CRUISER] : literal[int] , identifier[UNIT_STARBASE] : literal[int] , } identifier[govt_weight] = literal[int] keyword[if] identifier[is_dict] keyword[else] literal[int] identifier[prod_weight] = literal[int] keyword[if] identifier[is_techno] keyword[else] literal[int] identifier[weighted_qty] = identifier[unit_weights] [ identifier[unit_type] ]* identifier[quantity] identifier[ttb] =( identifier[weighted_qty] * identifier[govt_weight] * identifier[prod_weight] )*( literal[int] * identifier[factories] ) keyword[return] identifier[ttb] +( identifier[ttb] * literal[int] ) keyword[if] identifier[stasis_enabled] keyword[else] identifier[ttb]
def fleet_ttb(unit_type, quantity, factories, is_techno=False, is_dict=False, stasis_enabled=False): """ Calculate the time taken to construct a given fleet """ unit_weights = {UNIT_SCOUT: 1, UNIT_DESTROYER: 13, UNIT_BOMBER: 10, UNIT_CRUISER: 85, UNIT_STARBASE: 1} govt_weight = 80 if is_dict else 100 prod_weight = 85 if is_techno else 100 weighted_qty = unit_weights[unit_type] * quantity ttb = weighted_qty * govt_weight * prod_weight * (2 * factories) # TTB is 66% longer with stasis enabled return ttb + ttb * 0.66 if stasis_enabled else ttb
def getClassAllSupers(self, aURI): """ note: requires SPARQL 1.1 2015-06-04: currenlty not used, inferred from above """ aURI = aURI try: qres = self.rdflib_graph.query("""SELECT DISTINCT ?x WHERE { { <%s> rdfs:subClassOf+ ?x } FILTER (!isBlank(?x)) } """ % (aURI)) except: printDebug( "... warning: the 'getClassAllSupers' query failed (maybe missing SPARQL 1.1 support?)" ) qres = [] return list(qres)
def function[getClassAllSupers, parameter[self, aURI]]: constant[ note: requires SPARQL 1.1 2015-06-04: currenlty not used, inferred from above ] variable[aURI] assign[=] name[aURI] <ast.Try object at 0x7da1b112abf0> return[call[name[list], parameter[name[qres]]]]
keyword[def] identifier[getClassAllSupers] ( identifier[self] , identifier[aURI] ): literal[string] identifier[aURI] = identifier[aURI] keyword[try] : identifier[qres] = identifier[self] . identifier[rdflib_graph] . identifier[query] ( literal[string] %( identifier[aURI] )) keyword[except] : identifier[printDebug] ( literal[string] ) identifier[qres] =[] keyword[return] identifier[list] ( identifier[qres] )
def getClassAllSupers(self, aURI): """ note: requires SPARQL 1.1 2015-06-04: currenlty not used, inferred from above """ aURI = aURI try: qres = self.rdflib_graph.query('SELECT DISTINCT ?x\n WHERE {\n { <%s> rdfs:subClassOf+ ?x }\n FILTER (!isBlank(?x))\n }\n ' % aURI) # depends on [control=['try'], data=[]] except: printDebug("... warning: the 'getClassAllSupers' query failed (maybe missing SPARQL 1.1 support?)") qres = [] # depends on [control=['except'], data=[]] return list(qres)
def generate(env): """Add Builders and construction variables for dvips to an Environment.""" global PSAction if PSAction is None: PSAction = SCons.Action.Action('$PSCOM', '$PSCOMSTR') global DVIPSAction if DVIPSAction is None: DVIPSAction = SCons.Action.Action(DviPsFunction, strfunction = DviPsStrFunction) global PSBuilder if PSBuilder is None: PSBuilder = SCons.Builder.Builder(action = PSAction, prefix = '$PSPREFIX', suffix = '$PSSUFFIX', src_suffix = '.dvi', src_builder = 'DVI', single_source=True) env['BUILDERS']['PostScript'] = PSBuilder env['DVIPS'] = 'dvips' env['DVIPSFLAGS'] = SCons.Util.CLVar('') # I'm not quite sure I got the directories and filenames right for variant_dir # We need to be in the correct directory for the sake of latex \includegraphics eps included files. env['PSCOM'] = 'cd ${TARGET.dir} && $DVIPS $DVIPSFLAGS -o ${TARGET.file} ${SOURCE.file}' env['PSPREFIX'] = '' env['PSSUFFIX'] = '.ps'
def function[generate, parameter[env]]: constant[Add Builders and construction variables for dvips to an Environment.] <ast.Global object at 0x7da18f58e890> if compare[name[PSAction] is constant[None]] begin[:] variable[PSAction] assign[=] call[name[SCons].Action.Action, parameter[constant[$PSCOM], constant[$PSCOMSTR]]] <ast.Global object at 0x7da18f58de10> if compare[name[DVIPSAction] is constant[None]] begin[:] variable[DVIPSAction] assign[=] call[name[SCons].Action.Action, parameter[name[DviPsFunction]]] <ast.Global object at 0x7da18f58d810> if compare[name[PSBuilder] is constant[None]] begin[:] variable[PSBuilder] assign[=] call[name[SCons].Builder.Builder, parameter[]] call[call[name[env]][constant[BUILDERS]]][constant[PostScript]] assign[=] name[PSBuilder] call[name[env]][constant[DVIPS]] assign[=] constant[dvips] call[name[env]][constant[DVIPSFLAGS]] assign[=] call[name[SCons].Util.CLVar, parameter[constant[]]] call[name[env]][constant[PSCOM]] assign[=] constant[cd ${TARGET.dir} && $DVIPS $DVIPSFLAGS -o ${TARGET.file} ${SOURCE.file}] call[name[env]][constant[PSPREFIX]] assign[=] constant[] call[name[env]][constant[PSSUFFIX]] assign[=] constant[.ps]
keyword[def] identifier[generate] ( identifier[env] ): literal[string] keyword[global] identifier[PSAction] keyword[if] identifier[PSAction] keyword[is] keyword[None] : identifier[PSAction] = identifier[SCons] . identifier[Action] . identifier[Action] ( literal[string] , literal[string] ) keyword[global] identifier[DVIPSAction] keyword[if] identifier[DVIPSAction] keyword[is] keyword[None] : identifier[DVIPSAction] = identifier[SCons] . identifier[Action] . identifier[Action] ( identifier[DviPsFunction] , identifier[strfunction] = identifier[DviPsStrFunction] ) keyword[global] identifier[PSBuilder] keyword[if] identifier[PSBuilder] keyword[is] keyword[None] : identifier[PSBuilder] = identifier[SCons] . identifier[Builder] . identifier[Builder] ( identifier[action] = identifier[PSAction] , identifier[prefix] = literal[string] , identifier[suffix] = literal[string] , identifier[src_suffix] = literal[string] , identifier[src_builder] = literal[string] , identifier[single_source] = keyword[True] ) identifier[env] [ literal[string] ][ literal[string] ]= identifier[PSBuilder] identifier[env] [ literal[string] ]= literal[string] identifier[env] [ literal[string] ]= identifier[SCons] . identifier[Util] . identifier[CLVar] ( literal[string] ) identifier[env] [ literal[string] ]= literal[string] identifier[env] [ literal[string] ]= literal[string] identifier[env] [ literal[string] ]= literal[string]
def generate(env): """Add Builders and construction variables for dvips to an Environment.""" global PSAction if PSAction is None: PSAction = SCons.Action.Action('$PSCOM', '$PSCOMSTR') # depends on [control=['if'], data=['PSAction']] global DVIPSAction if DVIPSAction is None: DVIPSAction = SCons.Action.Action(DviPsFunction, strfunction=DviPsStrFunction) # depends on [control=['if'], data=['DVIPSAction']] global PSBuilder if PSBuilder is None: PSBuilder = SCons.Builder.Builder(action=PSAction, prefix='$PSPREFIX', suffix='$PSSUFFIX', src_suffix='.dvi', src_builder='DVI', single_source=True) # depends on [control=['if'], data=['PSBuilder']] env['BUILDERS']['PostScript'] = PSBuilder env['DVIPS'] = 'dvips' env['DVIPSFLAGS'] = SCons.Util.CLVar('') # I'm not quite sure I got the directories and filenames right for variant_dir # We need to be in the correct directory for the sake of latex \includegraphics eps included files. env['PSCOM'] = 'cd ${TARGET.dir} && $DVIPS $DVIPSFLAGS -o ${TARGET.file} ${SOURCE.file}' env['PSPREFIX'] = '' env['PSSUFFIX'] = '.ps'
def update(connection=None, urls=None, force_download=False, taxids=None, silent=False): """Updates CTD database :param urls: list of urls to download :type urls: iterable :param connection: custom database connection string :type connection: str :param force_download: force method to download :type force_download: bool :param int,list,tuple taxids: int or iterable of NCBI taxonomy identifiers (default is None = load all) """ if isinstance(taxids, int): taxids = (taxids,) db = DbManager(connection) db.db_import_xml(urls, force_download, taxids, silent) db.session.close()
def function[update, parameter[connection, urls, force_download, taxids, silent]]: constant[Updates CTD database :param urls: list of urls to download :type urls: iterable :param connection: custom database connection string :type connection: str :param force_download: force method to download :type force_download: bool :param int,list,tuple taxids: int or iterable of NCBI taxonomy identifiers (default is None = load all) ] if call[name[isinstance], parameter[name[taxids], name[int]]] begin[:] variable[taxids] assign[=] tuple[[<ast.Name object at 0x7da18f723df0>]] variable[db] assign[=] call[name[DbManager], parameter[name[connection]]] call[name[db].db_import_xml, parameter[name[urls], name[force_download], name[taxids], name[silent]]] call[name[db].session.close, parameter[]]
keyword[def] identifier[update] ( identifier[connection] = keyword[None] , identifier[urls] = keyword[None] , identifier[force_download] = keyword[False] , identifier[taxids] = keyword[None] , identifier[silent] = keyword[False] ): literal[string] keyword[if] identifier[isinstance] ( identifier[taxids] , identifier[int] ): identifier[taxids] =( identifier[taxids] ,) identifier[db] = identifier[DbManager] ( identifier[connection] ) identifier[db] . identifier[db_import_xml] ( identifier[urls] , identifier[force_download] , identifier[taxids] , identifier[silent] ) identifier[db] . identifier[session] . identifier[close] ()
def update(connection=None, urls=None, force_download=False, taxids=None, silent=False): """Updates CTD database :param urls: list of urls to download :type urls: iterable :param connection: custom database connection string :type connection: str :param force_download: force method to download :type force_download: bool :param int,list,tuple taxids: int or iterable of NCBI taxonomy identifiers (default is None = load all) """ if isinstance(taxids, int): taxids = (taxids,) # depends on [control=['if'], data=[]] db = DbManager(connection) db.db_import_xml(urls, force_download, taxids, silent) db.session.close()
def __get_dash_menu(self, kibiter_major): """Order the dashboard menu""" # omenu = OrderedDict() omenu = [] # Start with Overview omenu.append(self.menu_panels_common['Overview']) # Now the data _getsources ds_menu = self.__get_menu_entries(kibiter_major) # Remove the kafka and community menus, they will be included at the end kafka_menu = None community_menu = None found_kafka = [pos for pos, menu in enumerate(ds_menu) if menu['name'] == KAFKA_NAME] if found_kafka: kafka_menu = ds_menu.pop(found_kafka[0]) found_community = [pos for pos, menu in enumerate(ds_menu) if menu['name'] == COMMUNITY_NAME] if found_community: community_menu = ds_menu.pop(found_community[0]) ds_menu.sort(key=operator.itemgetter('name')) omenu += ds_menu # If kafka and community are present add them before the Data Status and About if kafka_menu: omenu.append(kafka_menu) if community_menu: omenu.append(community_menu) # At the end Data Status, About omenu.append(self.menu_panels_common['Data Status']) omenu.append(self.menu_panels_common['About']) logger.debug("Menu for panels: %s", json.dumps(ds_menu, indent=4)) return omenu
def function[__get_dash_menu, parameter[self, kibiter_major]]: constant[Order the dashboard menu] variable[omenu] assign[=] list[[]] call[name[omenu].append, parameter[call[name[self].menu_panels_common][constant[Overview]]]] variable[ds_menu] assign[=] call[name[self].__get_menu_entries, parameter[name[kibiter_major]]] variable[kafka_menu] assign[=] constant[None] variable[community_menu] assign[=] constant[None] variable[found_kafka] assign[=] <ast.ListComp object at 0x7da1b009dc60> if name[found_kafka] begin[:] variable[kafka_menu] assign[=] call[name[ds_menu].pop, parameter[call[name[found_kafka]][constant[0]]]] variable[found_community] assign[=] <ast.ListComp object at 0x7da1b009de70> if name[found_community] begin[:] variable[community_menu] assign[=] call[name[ds_menu].pop, parameter[call[name[found_community]][constant[0]]]] call[name[ds_menu].sort, parameter[]] <ast.AugAssign object at 0x7da1b009e620> if name[kafka_menu] begin[:] call[name[omenu].append, parameter[name[kafka_menu]]] if name[community_menu] begin[:] call[name[omenu].append, parameter[name[community_menu]]] call[name[omenu].append, parameter[call[name[self].menu_panels_common][constant[Data Status]]]] call[name[omenu].append, parameter[call[name[self].menu_panels_common][constant[About]]]] call[name[logger].debug, parameter[constant[Menu for panels: %s], call[name[json].dumps, parameter[name[ds_menu]]]]] return[name[omenu]]
keyword[def] identifier[__get_dash_menu] ( identifier[self] , identifier[kibiter_major] ): literal[string] identifier[omenu] =[] identifier[omenu] . identifier[append] ( identifier[self] . identifier[menu_panels_common] [ literal[string] ]) identifier[ds_menu] = identifier[self] . identifier[__get_menu_entries] ( identifier[kibiter_major] ) identifier[kafka_menu] = keyword[None] identifier[community_menu] = keyword[None] identifier[found_kafka] =[ identifier[pos] keyword[for] identifier[pos] , identifier[menu] keyword[in] identifier[enumerate] ( identifier[ds_menu] ) keyword[if] identifier[menu] [ literal[string] ]== identifier[KAFKA_NAME] ] keyword[if] identifier[found_kafka] : identifier[kafka_menu] = identifier[ds_menu] . identifier[pop] ( identifier[found_kafka] [ literal[int] ]) identifier[found_community] =[ identifier[pos] keyword[for] identifier[pos] , identifier[menu] keyword[in] identifier[enumerate] ( identifier[ds_menu] ) keyword[if] identifier[menu] [ literal[string] ]== identifier[COMMUNITY_NAME] ] keyword[if] identifier[found_community] : identifier[community_menu] = identifier[ds_menu] . identifier[pop] ( identifier[found_community] [ literal[int] ]) identifier[ds_menu] . identifier[sort] ( identifier[key] = identifier[operator] . identifier[itemgetter] ( literal[string] )) identifier[omenu] += identifier[ds_menu] keyword[if] identifier[kafka_menu] : identifier[omenu] . identifier[append] ( identifier[kafka_menu] ) keyword[if] identifier[community_menu] : identifier[omenu] . identifier[append] ( identifier[community_menu] ) identifier[omenu] . identifier[append] ( identifier[self] . identifier[menu_panels_common] [ literal[string] ]) identifier[omenu] . identifier[append] ( identifier[self] . identifier[menu_panels_common] [ literal[string] ]) identifier[logger] . identifier[debug] ( literal[string] , identifier[json] . identifier[dumps] ( identifier[ds_menu] , identifier[indent] = literal[int] )) keyword[return] identifier[omenu]
def __get_dash_menu(self, kibiter_major): """Order the dashboard menu""" # omenu = OrderedDict() omenu = [] # Start with Overview omenu.append(self.menu_panels_common['Overview']) # Now the data _getsources ds_menu = self.__get_menu_entries(kibiter_major) # Remove the kafka and community menus, they will be included at the end kafka_menu = None community_menu = None found_kafka = [pos for (pos, menu) in enumerate(ds_menu) if menu['name'] == KAFKA_NAME] if found_kafka: kafka_menu = ds_menu.pop(found_kafka[0]) # depends on [control=['if'], data=[]] found_community = [pos for (pos, menu) in enumerate(ds_menu) if menu['name'] == COMMUNITY_NAME] if found_community: community_menu = ds_menu.pop(found_community[0]) # depends on [control=['if'], data=[]] ds_menu.sort(key=operator.itemgetter('name')) omenu += ds_menu # If kafka and community are present add them before the Data Status and About if kafka_menu: omenu.append(kafka_menu) # depends on [control=['if'], data=[]] if community_menu: omenu.append(community_menu) # depends on [control=['if'], data=[]] # At the end Data Status, About omenu.append(self.menu_panels_common['Data Status']) omenu.append(self.menu_panels_common['About']) logger.debug('Menu for panels: %s', json.dumps(ds_menu, indent=4)) return omenu
def set(self, status_item, status): """ sets the status item to the passed in paramaters args: status_item: the name if the item to set status: boolean value to set the item """ lg = logging.getLogger("%s.%s" % (self.ln, inspect.stack()[0][3])) lg.setLevel(self.log_level) sparql = ''' DELETE {{ kdr:{0} kds:{1} ?o }} INSERT {{ kdr:{0} kds:{1} "{2}"^^xsd:boolean }} WHERE {{ OPTIONAL {{ kdr:{0} kds:{1} ?o }} }}''' return self.conn.query(sparql=sparql.format(self.group, status_item, str(status).lower()), mode='update')
def function[set, parameter[self, status_item, status]]: constant[ sets the status item to the passed in paramaters args: status_item: the name if the item to set status: boolean value to set the item ] variable[lg] assign[=] call[name[logging].getLogger, parameter[binary_operation[constant[%s.%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da204963520>, <ast.Subscript object at 0x7da204962890>]]]]] call[name[lg].setLevel, parameter[name[self].log_level]] variable[sparql] assign[=] constant[ DELETE {{ kdr:{0} kds:{1} ?o }} INSERT {{ kdr:{0} kds:{1} "{2}"^^xsd:boolean }} WHERE {{ OPTIONAL {{ kdr:{0} kds:{1} ?o }} }}] return[call[name[self].conn.query, parameter[]]]
keyword[def] identifier[set] ( identifier[self] , identifier[status_item] , identifier[status] ): literal[string] identifier[lg] = identifier[logging] . identifier[getLogger] ( literal[string] %( identifier[self] . identifier[ln] , identifier[inspect] . identifier[stack] ()[ literal[int] ][ literal[int] ])) identifier[lg] . identifier[setLevel] ( identifier[self] . identifier[log_level] ) identifier[sparql] = literal[string] keyword[return] identifier[self] . identifier[conn] . identifier[query] ( identifier[sparql] = identifier[sparql] . identifier[format] ( identifier[self] . identifier[group] , identifier[status_item] , identifier[str] ( identifier[status] ). identifier[lower] ()), identifier[mode] = literal[string] )
def set(self, status_item, status): """ sets the status item to the passed in paramaters args: status_item: the name if the item to set status: boolean value to set the item """ lg = logging.getLogger('%s.%s' % (self.ln, inspect.stack()[0][3])) lg.setLevel(self.log_level) sparql = '\n DELETE {{\n kdr:{0} kds:{1} ?o\n }}\n INSERT {{\n kdr:{0} kds:{1} "{2}"^^xsd:boolean\n }}\n WHERE {{\n OPTIONAL {{ kdr:{0} kds:{1} ?o }}\n }}' return self.conn.query(sparql=sparql.format(self.group, status_item, str(status).lower()), mode='update')
def get_resource_class_collection_attribute_iterator(rc): """ Returns an iterator over all terminal attributes in the given registered resource. """ for attr in itervalues_(rc.__everest_attributes__): if attr.kind == RESOURCE_ATTRIBUTE_KINDS.COLLECTION: yield attr
def function[get_resource_class_collection_attribute_iterator, parameter[rc]]: constant[ Returns an iterator over all terminal attributes in the given registered resource. ] for taget[name[attr]] in starred[call[name[itervalues_], parameter[name[rc].__everest_attributes__]]] begin[:] if compare[name[attr].kind equal[==] name[RESOURCE_ATTRIBUTE_KINDS].COLLECTION] begin[:] <ast.Yield object at 0x7da18f720370>
keyword[def] identifier[get_resource_class_collection_attribute_iterator] ( identifier[rc] ): literal[string] keyword[for] identifier[attr] keyword[in] identifier[itervalues_] ( identifier[rc] . identifier[__everest_attributes__] ): keyword[if] identifier[attr] . identifier[kind] == identifier[RESOURCE_ATTRIBUTE_KINDS] . identifier[COLLECTION] : keyword[yield] identifier[attr]
def get_resource_class_collection_attribute_iterator(rc): """ Returns an iterator over all terminal attributes in the given registered resource. """ for attr in itervalues_(rc.__everest_attributes__): if attr.kind == RESOURCE_ATTRIBUTE_KINDS.COLLECTION: yield attr # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['attr']]
def get_tags_of_letter_per_page(self, letter_id, per_page=1000, page=1): """ Get tags of letter per page :param letter_id: the letter id :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :return: list """ return self._get_resource_per_page( resource=LETTER_TAGS, per_page=per_page, page=page, params={'letter_id': letter_id}, )
def function[get_tags_of_letter_per_page, parameter[self, letter_id, per_page, page]]: constant[ Get tags of letter per page :param letter_id: the letter id :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :return: list ] return[call[name[self]._get_resource_per_page, parameter[]]]
keyword[def] identifier[get_tags_of_letter_per_page] ( identifier[self] , identifier[letter_id] , identifier[per_page] = literal[int] , identifier[page] = literal[int] ): literal[string] keyword[return] identifier[self] . identifier[_get_resource_per_page] ( identifier[resource] = identifier[LETTER_TAGS] , identifier[per_page] = identifier[per_page] , identifier[page] = identifier[page] , identifier[params] ={ literal[string] : identifier[letter_id] }, )
def get_tags_of_letter_per_page(self, letter_id, per_page=1000, page=1): """ Get tags of letter per page :param letter_id: the letter id :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :return: list """ return self._get_resource_per_page(resource=LETTER_TAGS, per_page=per_page, page=page, params={'letter_id': letter_id})
def _ctypes_assign(parameter): """Returns the Fortran code lines to allocate and assign values to the *original* parameter ValueElement that exists now only as a local variable so that the signatures match for the compiler. """ #If the array is set to only have intent(out), we don't want to allocate or assign #a value to it; otherwise we do. if (parameter.direction != "(out)" and ("allocate" in parameter.modifiers or "pointer" in parameter.modifiers or "target" in parameter.modifiers)): if "in" in parameter.direction: splice = _ctypes_splice(parameter) result = [] result.append(" allocate({}({}))".format(parameter.name, splice)) result.append(" {0} = {0}_c".format(parameter.name)) return ('\n'.join(result), True) elif parameter.dtype == "logical" and "in" in parameter.direction: return (" {0} = {0}_c".format(parameter.name), False)
def function[_ctypes_assign, parameter[parameter]]: constant[Returns the Fortran code lines to allocate and assign values to the *original* parameter ValueElement that exists now only as a local variable so that the signatures match for the compiler. ] if <ast.BoolOp object at 0x7da20e954b50> begin[:] if compare[constant[in] in name[parameter].direction] begin[:] variable[splice] assign[=] call[name[_ctypes_splice], parameter[name[parameter]]] variable[result] assign[=] list[[]] call[name[result].append, parameter[call[constant[ allocate({}({}))].format, parameter[name[parameter].name, name[splice]]]]] call[name[result].append, parameter[call[constant[ {0} = {0}_c].format, parameter[name[parameter].name]]]] return[tuple[[<ast.Call object at 0x7da20e956aa0>, <ast.Constant object at 0x7da20e954cd0>]]]
keyword[def] identifier[_ctypes_assign] ( identifier[parameter] ): literal[string] keyword[if] ( identifier[parameter] . identifier[direction] != literal[string] keyword[and] ( literal[string] keyword[in] identifier[parameter] . identifier[modifiers] keyword[or] literal[string] keyword[in] identifier[parameter] . identifier[modifiers] keyword[or] literal[string] keyword[in] identifier[parameter] . identifier[modifiers] )): keyword[if] literal[string] keyword[in] identifier[parameter] . identifier[direction] : identifier[splice] = identifier[_ctypes_splice] ( identifier[parameter] ) identifier[result] =[] identifier[result] . identifier[append] ( literal[string] . identifier[format] ( identifier[parameter] . identifier[name] , identifier[splice] )) identifier[result] . identifier[append] ( literal[string] . identifier[format] ( identifier[parameter] . identifier[name] )) keyword[return] ( literal[string] . identifier[join] ( identifier[result] ), keyword[True] ) keyword[elif] identifier[parameter] . identifier[dtype] == literal[string] keyword[and] literal[string] keyword[in] identifier[parameter] . identifier[direction] : keyword[return] ( literal[string] . identifier[format] ( identifier[parameter] . identifier[name] ), keyword[False] )
def _ctypes_assign(parameter): """Returns the Fortran code lines to allocate and assign values to the *original* parameter ValueElement that exists now only as a local variable so that the signatures match for the compiler. """ #If the array is set to only have intent(out), we don't want to allocate or assign #a value to it; otherwise we do. if parameter.direction != '(out)' and ('allocate' in parameter.modifiers or 'pointer' in parameter.modifiers or 'target' in parameter.modifiers): if 'in' in parameter.direction: splice = _ctypes_splice(parameter) result = [] result.append(' allocate({}({}))'.format(parameter.name, splice)) result.append(' {0} = {0}_c'.format(parameter.name)) return ('\n'.join(result), True) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif parameter.dtype == 'logical' and 'in' in parameter.direction: return (' {0} = {0}_c'.format(parameter.name), False) # depends on [control=['if'], data=[]]
def _check_timestamp(self, timestamp): """Verify that timestamp is recentish.""" timestamp = int(timestamp) now = int(time.time()) lapsed = now - timestamp if lapsed > self.timestamp_threshold: raise Error('Expired timestamp: given %d and now %s has a ' 'greater difference than threshold %d' % (timestamp, now, self.timestamp_threshold))
def function[_check_timestamp, parameter[self, timestamp]]: constant[Verify that timestamp is recentish.] variable[timestamp] assign[=] call[name[int], parameter[name[timestamp]]] variable[now] assign[=] call[name[int], parameter[call[name[time].time, parameter[]]]] variable[lapsed] assign[=] binary_operation[name[now] - name[timestamp]] if compare[name[lapsed] greater[>] name[self].timestamp_threshold] begin[:] <ast.Raise object at 0x7da1b1d36bc0>
keyword[def] identifier[_check_timestamp] ( identifier[self] , identifier[timestamp] ): literal[string] identifier[timestamp] = identifier[int] ( identifier[timestamp] ) identifier[now] = identifier[int] ( identifier[time] . identifier[time] ()) identifier[lapsed] = identifier[now] - identifier[timestamp] keyword[if] identifier[lapsed] > identifier[self] . identifier[timestamp_threshold] : keyword[raise] identifier[Error] ( literal[string] literal[string] %( identifier[timestamp] , identifier[now] , identifier[self] . identifier[timestamp_threshold] ))
def _check_timestamp(self, timestamp): """Verify that timestamp is recentish.""" timestamp = int(timestamp) now = int(time.time()) lapsed = now - timestamp if lapsed > self.timestamp_threshold: raise Error('Expired timestamp: given %d and now %s has a greater difference than threshold %d' % (timestamp, now, self.timestamp_threshold)) # depends on [control=['if'], data=[]]
def setDragTable(self, table): """ Sets the table that will be linked with the drag query for this record. This information will be added to the drag & drop information when this record is dragged from the tree and will be set into the application/x-table format for mime data. :sa setDragQuery, XTreeWidgetItem.setDragData :param table | <subclass of orb.Table> """ if table and table.schema(): self.setDragData('application/x-orb-table', table.schema().name()) else: self.setDragData('application/x-orb-table', None)
def function[setDragTable, parameter[self, table]]: constant[ Sets the table that will be linked with the drag query for this record. This information will be added to the drag & drop information when this record is dragged from the tree and will be set into the application/x-table format for mime data. :sa setDragQuery, XTreeWidgetItem.setDragData :param table | <subclass of orb.Table> ] if <ast.BoolOp object at 0x7da18fe926b0> begin[:] call[name[self].setDragData, parameter[constant[application/x-orb-table], call[call[name[table].schema, parameter[]].name, parameter[]]]]
keyword[def] identifier[setDragTable] ( identifier[self] , identifier[table] ): literal[string] keyword[if] identifier[table] keyword[and] identifier[table] . identifier[schema] (): identifier[self] . identifier[setDragData] ( literal[string] , identifier[table] . identifier[schema] (). identifier[name] ()) keyword[else] : identifier[self] . identifier[setDragData] ( literal[string] , keyword[None] )
def setDragTable(self, table): """ Sets the table that will be linked with the drag query for this record. This information will be added to the drag & drop information when this record is dragged from the tree and will be set into the application/x-table format for mime data. :sa setDragQuery, XTreeWidgetItem.setDragData :param table | <subclass of orb.Table> """ if table and table.schema(): self.setDragData('application/x-orb-table', table.schema().name()) # depends on [control=['if'], data=[]] else: self.setDragData('application/x-orb-table', None)
def peering_connection_pending_from_vpc(conn_id=None, conn_name=None, vpc_id=None, vpc_name=None, region=None, key=None, keyid=None, profile=None): ''' Check if a VPC peering connection is in the pending state, and requested from the given VPC. .. versionadded:: 2016.11.0 conn_id The connection ID to check. Exclusive with conn_name. conn_name The connection name to check. Exclusive with conn_id. vpc_id Is this the ID of the requesting VPC for this peering connection. Exclusive with vpc_name. vpc_name Is this the Name of the requesting VPC for this peering connection. Exclusive with vpc_id. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. CLI Example: .. code-block:: bash salt myminion boto_vpc.is_peering_connection_pending name=salt-vpc ''' if not _exactly_one((conn_id, conn_name)): raise SaltInvocationError('Exactly one of conn_id or conn_name must be provided.') if not _exactly_one((vpc_id, vpc_name)): raise SaltInvocationError('Exactly one of vpc_id or vpc_name must be provided.') if vpc_name: vpc_id = check_vpc(vpc_name=vpc_name, region=region, key=key, keyid=keyid, profile=profile) if not vpc_id: log.warning('Could not resolve VPC name %s to an ID', vpc_name) return False conn = _get_conn3(region=region, key=key, keyid=keyid, profile=profile) filters = [{'Name': 'requester-vpc-info.vpc-id', 'Values': [vpc_id]}, {'Name': 'status-code', 'Values': [ACTIVE, PENDING_ACCEPTANCE, PROVISIONING]}] if conn_id: filters += [{'Name': 'vpc-peering-connection-id', 'Values': [conn_id]}] else: filters += [{'Name': 'tag:Name', 'Values': [conn_name]}] vpcs = conn.describe_vpc_peering_connections(Filters=filters).get('VpcPeeringConnections', []) if not vpcs: return False elif len(vpcs) > 1: raise SaltInvocationError('Found more than one ID for the VPC peering ' 'connection ({0}). Please call this function ' 'with an ID instead.'.format(conn_id or conn_name)) else: status = vpcs[0]['Status']['Code'] return bool(status == PENDING_ACCEPTANCE)
def function[peering_connection_pending_from_vpc, parameter[conn_id, conn_name, vpc_id, vpc_name, region, key, keyid, profile]]: constant[ Check if a VPC peering connection is in the pending state, and requested from the given VPC. .. versionadded:: 2016.11.0 conn_id The connection ID to check. Exclusive with conn_name. conn_name The connection name to check. Exclusive with conn_id. vpc_id Is this the ID of the requesting VPC for this peering connection. Exclusive with vpc_name. vpc_name Is this the Name of the requesting VPC for this peering connection. Exclusive with vpc_id. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. CLI Example: .. code-block:: bash salt myminion boto_vpc.is_peering_connection_pending name=salt-vpc ] if <ast.UnaryOp object at 0x7da1b2023b80> begin[:] <ast.Raise object at 0x7da1b2023a60> if <ast.UnaryOp object at 0x7da1b2023970> begin[:] <ast.Raise object at 0x7da1b2023850> if name[vpc_name] begin[:] variable[vpc_id] assign[=] call[name[check_vpc], parameter[]] if <ast.UnaryOp object at 0x7da1b2023430> begin[:] call[name[log].warning, parameter[constant[Could not resolve VPC name %s to an ID], name[vpc_name]]] return[constant[False]] variable[conn] assign[=] call[name[_get_conn3], parameter[]] variable[filters] assign[=] list[[<ast.Dict object at 0x7da1b2022f20>, <ast.Dict object at 0x7da1b2022e00>]] if name[conn_id] begin[:] <ast.AugAssign object at 0x7da1b2022c20> variable[vpcs] assign[=] call[call[name[conn].describe_vpc_peering_connections, parameter[]].get, parameter[constant[VpcPeeringConnections], list[[]]]] if <ast.UnaryOp object at 0x7da1b2022680> begin[:] return[constant[False]] return[call[name[bool], parameter[compare[name[status] equal[==] name[PENDING_ACCEPTANCE]]]]]
keyword[def] identifier[peering_connection_pending_from_vpc] ( identifier[conn_id] = keyword[None] , identifier[conn_name] = keyword[None] , identifier[vpc_id] = keyword[None] , identifier[vpc_name] = keyword[None] , identifier[region] = keyword[None] , identifier[key] = keyword[None] , identifier[keyid] = keyword[None] , identifier[profile] = keyword[None] ): literal[string] keyword[if] keyword[not] identifier[_exactly_one] (( identifier[conn_id] , identifier[conn_name] )): keyword[raise] identifier[SaltInvocationError] ( literal[string] ) keyword[if] keyword[not] identifier[_exactly_one] (( identifier[vpc_id] , identifier[vpc_name] )): keyword[raise] identifier[SaltInvocationError] ( literal[string] ) keyword[if] identifier[vpc_name] : identifier[vpc_id] = identifier[check_vpc] ( identifier[vpc_name] = identifier[vpc_name] , identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] ) keyword[if] keyword[not] identifier[vpc_id] : identifier[log] . identifier[warning] ( literal[string] , identifier[vpc_name] ) keyword[return] keyword[False] identifier[conn] = identifier[_get_conn3] ( identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] ) identifier[filters] =[{ literal[string] : literal[string] , literal[string] :[ identifier[vpc_id] ]}, { literal[string] : literal[string] , literal[string] :[ identifier[ACTIVE] , identifier[PENDING_ACCEPTANCE] , identifier[PROVISIONING] ]}] keyword[if] identifier[conn_id] : identifier[filters] +=[{ literal[string] : literal[string] , literal[string] :[ identifier[conn_id] ]}] keyword[else] : identifier[filters] +=[{ literal[string] : literal[string] , literal[string] :[ identifier[conn_name] ]}] identifier[vpcs] = identifier[conn] . identifier[describe_vpc_peering_connections] ( identifier[Filters] = identifier[filters] ). identifier[get] ( literal[string] ,[]) keyword[if] keyword[not] identifier[vpcs] : keyword[return] keyword[False] keyword[elif] identifier[len] ( identifier[vpcs] )> literal[int] : keyword[raise] identifier[SaltInvocationError] ( literal[string] literal[string] literal[string] . identifier[format] ( identifier[conn_id] keyword[or] identifier[conn_name] )) keyword[else] : identifier[status] = identifier[vpcs] [ literal[int] ][ literal[string] ][ literal[string] ] keyword[return] identifier[bool] ( identifier[status] == identifier[PENDING_ACCEPTANCE] )
def peering_connection_pending_from_vpc(conn_id=None, conn_name=None, vpc_id=None, vpc_name=None, region=None, key=None, keyid=None, profile=None): """ Check if a VPC peering connection is in the pending state, and requested from the given VPC. .. versionadded:: 2016.11.0 conn_id The connection ID to check. Exclusive with conn_name. conn_name The connection name to check. Exclusive with conn_id. vpc_id Is this the ID of the requesting VPC for this peering connection. Exclusive with vpc_name. vpc_name Is this the Name of the requesting VPC for this peering connection. Exclusive with vpc_id. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. CLI Example: .. code-block:: bash salt myminion boto_vpc.is_peering_connection_pending name=salt-vpc """ if not _exactly_one((conn_id, conn_name)): raise SaltInvocationError('Exactly one of conn_id or conn_name must be provided.') # depends on [control=['if'], data=[]] if not _exactly_one((vpc_id, vpc_name)): raise SaltInvocationError('Exactly one of vpc_id or vpc_name must be provided.') # depends on [control=['if'], data=[]] if vpc_name: vpc_id = check_vpc(vpc_name=vpc_name, region=region, key=key, keyid=keyid, profile=profile) if not vpc_id: log.warning('Could not resolve VPC name %s to an ID', vpc_name) return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] conn = _get_conn3(region=region, key=key, keyid=keyid, profile=profile) filters = [{'Name': 'requester-vpc-info.vpc-id', 'Values': [vpc_id]}, {'Name': 'status-code', 'Values': [ACTIVE, PENDING_ACCEPTANCE, PROVISIONING]}] if conn_id: filters += [{'Name': 'vpc-peering-connection-id', 'Values': [conn_id]}] # depends on [control=['if'], data=[]] else: filters += [{'Name': 'tag:Name', 'Values': [conn_name]}] vpcs = conn.describe_vpc_peering_connections(Filters=filters).get('VpcPeeringConnections', []) if not vpcs: return False # depends on [control=['if'], data=[]] elif len(vpcs) > 1: raise SaltInvocationError('Found more than one ID for the VPC peering connection ({0}). Please call this function with an ID instead.'.format(conn_id or conn_name)) # depends on [control=['if'], data=[]] else: status = vpcs[0]['Status']['Code'] return bool(status == PENDING_ACCEPTANCE)
def find_python_files(dirname): """Yield all of the importable Python files in `dirname`, recursively. To be importable, the files have to be in a directory with a __init__.py, except for `dirname` itself, which isn't required to have one. The assumption is that `dirname` was specified directly, so the user knows best, but subdirectories are checked for a __init__.py to be sure we only find the importable files. """ for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dirname)): if i > 0 and '__init__.py' not in filenames: # If a directory doesn't have __init__.py, then it isn't # importable and neither are its files del dirnames[:] continue for filename in filenames: # We're only interested in files that look like reasonable Python # files: Must end with .py or .pyw, and must not have certain funny # characters that probably mean they are editor junk. if re.match(r"^[^.#~!$@%^&*()+=,]+\.pyw?$", filename): yield os.path.join(dirpath, filename)
def function[find_python_files, parameter[dirname]]: constant[Yield all of the importable Python files in `dirname`, recursively. To be importable, the files have to be in a directory with a __init__.py, except for `dirname` itself, which isn't required to have one. The assumption is that `dirname` was specified directly, so the user knows best, but subdirectories are checked for a __init__.py to be sure we only find the importable files. ] for taget[tuple[[<ast.Name object at 0x7da204345780>, <ast.Tuple object at 0x7da2043472b0>]]] in starred[call[name[enumerate], parameter[call[name[os].walk, parameter[name[dirname]]]]]] begin[:] if <ast.BoolOp object at 0x7da204344a60> begin[:] <ast.Delete object at 0x7da204347f70> continue for taget[name[filename]] in starred[name[filenames]] begin[:] if call[name[re].match, parameter[constant[^[^.#~!$@%^&*()+=,]+\.pyw?$], name[filename]]] begin[:] <ast.Yield object at 0x7da204344af0>
keyword[def] identifier[find_python_files] ( identifier[dirname] ): literal[string] keyword[for] identifier[i] ,( identifier[dirpath] , identifier[dirnames] , identifier[filenames] ) keyword[in] identifier[enumerate] ( identifier[os] . identifier[walk] ( identifier[dirname] )): keyword[if] identifier[i] > literal[int] keyword[and] literal[string] keyword[not] keyword[in] identifier[filenames] : keyword[del] identifier[dirnames] [:] keyword[continue] keyword[for] identifier[filename] keyword[in] identifier[filenames] : keyword[if] identifier[re] . identifier[match] ( literal[string] , identifier[filename] ): keyword[yield] identifier[os] . identifier[path] . identifier[join] ( identifier[dirpath] , identifier[filename] )
def find_python_files(dirname): """Yield all of the importable Python files in `dirname`, recursively. To be importable, the files have to be in a directory with a __init__.py, except for `dirname` itself, which isn't required to have one. The assumption is that `dirname` was specified directly, so the user knows best, but subdirectories are checked for a __init__.py to be sure we only find the importable files. """ for (i, (dirpath, dirnames, filenames)) in enumerate(os.walk(dirname)): if i > 0 and '__init__.py' not in filenames: # If a directory doesn't have __init__.py, then it isn't # importable and neither are its files del dirnames[:] continue # depends on [control=['if'], data=[]] for filename in filenames: # We're only interested in files that look like reasonable Python # files: Must end with .py or .pyw, and must not have certain funny # characters that probably mean they are editor junk. if re.match('^[^.#~!$@%^&*()+=,]+\\.pyw?$', filename): yield os.path.join(dirpath, filename) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['filename']] # depends on [control=['for'], data=[]]
def get_default_config(self): """ Return the default config for the handler """ config = super(zmqHandler, self).get_default_config() config.update({ 'port': 1234, }) return config
def function[get_default_config, parameter[self]]: constant[ Return the default config for the handler ] variable[config] assign[=] call[call[name[super], parameter[name[zmqHandler], name[self]]].get_default_config, parameter[]] call[name[config].update, parameter[dictionary[[<ast.Constant object at 0x7da18dc04910>], [<ast.Constant object at 0x7da18dc05840>]]]] return[name[config]]
keyword[def] identifier[get_default_config] ( identifier[self] ): literal[string] identifier[config] = identifier[super] ( identifier[zmqHandler] , identifier[self] ). identifier[get_default_config] () identifier[config] . identifier[update] ({ literal[string] : literal[int] , }) keyword[return] identifier[config]
def get_default_config(self): """ Return the default config for the handler """ config = super(zmqHandler, self).get_default_config() config.update({'port': 1234}) return config
def send_sms(request, to_number, body, callback_urlname="sms_status_callback"): """ Create :class:`OutgoingSMS` object and send SMS using Twilio. """ client = TwilioRestClient(settings.TWILIO_ACCOUNT_SID, settings.TWILIO_AUTH_TOKEN) from_number = settings.TWILIO_PHONE_NUMBER message = OutgoingSMS.objects.create( from_number=from_number, to_number=to_number, body=body, ) status_callback = None if callback_urlname: status_callback = build_callback_url(request, callback_urlname, message) logger.debug("Sending SMS message to %s with callback url %s: %s.", to_number, status_callback, body) if not getattr(settings, "TWILIO_DRY_MODE", False): sent = client.sms.messages.create( to=to_number, from_=from_number, body=body, status_callback=status_callback ) logger.debug("SMS message sent: %s", sent.__dict__) message.sms_sid = sent.sid message.account_sid = sent.account_sid message.status = sent.status message.to_parsed = sent.to if sent.price: message.price = Decimal(force_text(sent.price)) message.price_unit = sent.price_unit message.sent_at = sent.date_created message.save(update_fields=[ "sms_sid", "account_sid", "status", "to_parsed", "price", "price_unit", "sent_at" ]) else: logger.info("SMS: from %s to %s: %s", from_number, to_number, body) return message
def function[send_sms, parameter[request, to_number, body, callback_urlname]]: constant[ Create :class:`OutgoingSMS` object and send SMS using Twilio. ] variable[client] assign[=] call[name[TwilioRestClient], parameter[name[settings].TWILIO_ACCOUNT_SID, name[settings].TWILIO_AUTH_TOKEN]] variable[from_number] assign[=] name[settings].TWILIO_PHONE_NUMBER variable[message] assign[=] call[name[OutgoingSMS].objects.create, parameter[]] variable[status_callback] assign[=] constant[None] if name[callback_urlname] begin[:] variable[status_callback] assign[=] call[name[build_callback_url], parameter[name[request], name[callback_urlname], name[message]]] call[name[logger].debug, parameter[constant[Sending SMS message to %s with callback url %s: %s.], name[to_number], name[status_callback], name[body]]] if <ast.UnaryOp object at 0x7da1b0290ee0> begin[:] variable[sent] assign[=] call[name[client].sms.messages.create, parameter[]] call[name[logger].debug, parameter[constant[SMS message sent: %s], name[sent].__dict__]] name[message].sms_sid assign[=] name[sent].sid name[message].account_sid assign[=] name[sent].account_sid name[message].status assign[=] name[sent].status name[message].to_parsed assign[=] name[sent].to if name[sent].price begin[:] name[message].price assign[=] call[name[Decimal], parameter[call[name[force_text], parameter[name[sent].price]]]] name[message].price_unit assign[=] name[sent].price_unit name[message].sent_at assign[=] name[sent].date_created call[name[message].save, parameter[]] return[name[message]]
keyword[def] identifier[send_sms] ( identifier[request] , identifier[to_number] , identifier[body] , identifier[callback_urlname] = literal[string] ): literal[string] identifier[client] = identifier[TwilioRestClient] ( identifier[settings] . identifier[TWILIO_ACCOUNT_SID] , identifier[settings] . identifier[TWILIO_AUTH_TOKEN] ) identifier[from_number] = identifier[settings] . identifier[TWILIO_PHONE_NUMBER] identifier[message] = identifier[OutgoingSMS] . identifier[objects] . identifier[create] ( identifier[from_number] = identifier[from_number] , identifier[to_number] = identifier[to_number] , identifier[body] = identifier[body] , ) identifier[status_callback] = keyword[None] keyword[if] identifier[callback_urlname] : identifier[status_callback] = identifier[build_callback_url] ( identifier[request] , identifier[callback_urlname] , identifier[message] ) identifier[logger] . identifier[debug] ( literal[string] , identifier[to_number] , identifier[status_callback] , identifier[body] ) keyword[if] keyword[not] identifier[getattr] ( identifier[settings] , literal[string] , keyword[False] ): identifier[sent] = identifier[client] . identifier[sms] . identifier[messages] . identifier[create] ( identifier[to] = identifier[to_number] , identifier[from_] = identifier[from_number] , identifier[body] = identifier[body] , identifier[status_callback] = identifier[status_callback] ) identifier[logger] . identifier[debug] ( literal[string] , identifier[sent] . identifier[__dict__] ) identifier[message] . identifier[sms_sid] = identifier[sent] . identifier[sid] identifier[message] . identifier[account_sid] = identifier[sent] . identifier[account_sid] identifier[message] . identifier[status] = identifier[sent] . identifier[status] identifier[message] . identifier[to_parsed] = identifier[sent] . identifier[to] keyword[if] identifier[sent] . identifier[price] : identifier[message] . identifier[price] = identifier[Decimal] ( identifier[force_text] ( identifier[sent] . identifier[price] )) identifier[message] . identifier[price_unit] = identifier[sent] . identifier[price_unit] identifier[message] . identifier[sent_at] = identifier[sent] . identifier[date_created] identifier[message] . identifier[save] ( identifier[update_fields] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]) keyword[else] : identifier[logger] . identifier[info] ( literal[string] , identifier[from_number] , identifier[to_number] , identifier[body] ) keyword[return] identifier[message]
def send_sms(request, to_number, body, callback_urlname='sms_status_callback'): """ Create :class:`OutgoingSMS` object and send SMS using Twilio. """ client = TwilioRestClient(settings.TWILIO_ACCOUNT_SID, settings.TWILIO_AUTH_TOKEN) from_number = settings.TWILIO_PHONE_NUMBER message = OutgoingSMS.objects.create(from_number=from_number, to_number=to_number, body=body) status_callback = None if callback_urlname: status_callback = build_callback_url(request, callback_urlname, message) # depends on [control=['if'], data=[]] logger.debug('Sending SMS message to %s with callback url %s: %s.', to_number, status_callback, body) if not getattr(settings, 'TWILIO_DRY_MODE', False): sent = client.sms.messages.create(to=to_number, from_=from_number, body=body, status_callback=status_callback) logger.debug('SMS message sent: %s', sent.__dict__) message.sms_sid = sent.sid message.account_sid = sent.account_sid message.status = sent.status message.to_parsed = sent.to if sent.price: message.price = Decimal(force_text(sent.price)) message.price_unit = sent.price_unit # depends on [control=['if'], data=[]] message.sent_at = sent.date_created message.save(update_fields=['sms_sid', 'account_sid', 'status', 'to_parsed', 'price', 'price_unit', 'sent_at']) # depends on [control=['if'], data=[]] else: logger.info('SMS: from %s to %s: %s', from_number, to_number, body) return message
def reset_image_attribute(self, image_id, attribute='launchPermission'): """ Resets an attribute of an AMI to its default value. :type image_id: string :param image_id: ID of the AMI for which an attribute will be described :type attribute: string :param attribute: The attribute to reset :rtype: bool :return: Whether the operation succeeded or not """ params = {'ImageId' : image_id, 'Attribute' : attribute} return self.get_status('ResetImageAttribute', params, verb='POST')
def function[reset_image_attribute, parameter[self, image_id, attribute]]: constant[ Resets an attribute of an AMI to its default value. :type image_id: string :param image_id: ID of the AMI for which an attribute will be described :type attribute: string :param attribute: The attribute to reset :rtype: bool :return: Whether the operation succeeded or not ] variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b26a5d20>, <ast.Constant object at 0x7da1b26a45b0>], [<ast.Name object at 0x7da1b26a5b70>, <ast.Name object at 0x7da1b26a7af0>]] return[call[name[self].get_status, parameter[constant[ResetImageAttribute], name[params]]]]
keyword[def] identifier[reset_image_attribute] ( identifier[self] , identifier[image_id] , identifier[attribute] = literal[string] ): literal[string] identifier[params] ={ literal[string] : identifier[image_id] , literal[string] : identifier[attribute] } keyword[return] identifier[self] . identifier[get_status] ( literal[string] , identifier[params] , identifier[verb] = literal[string] )
def reset_image_attribute(self, image_id, attribute='launchPermission'): """ Resets an attribute of an AMI to its default value. :type image_id: string :param image_id: ID of the AMI for which an attribute will be described :type attribute: string :param attribute: The attribute to reset :rtype: bool :return: Whether the operation succeeded or not """ params = {'ImageId': image_id, 'Attribute': attribute} return self.get_status('ResetImageAttribute', params, verb='POST')