code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def create_python_worker(self, func, *args, **kwargs): """Create a new python worker instance.""" worker = PythonWorker(func, args, kwargs) self._create_worker(worker) return worker
def function[create_python_worker, parameter[self, func]]: constant[Create a new python worker instance.] variable[worker] assign[=] call[name[PythonWorker], parameter[name[func], name[args], name[kwargs]]] call[name[self]._create_worker, parameter[name[worker]]] return[name[worker]]
keyword[def] identifier[create_python_worker] ( identifier[self] , identifier[func] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[worker] = identifier[PythonWorker] ( identifier[func] , identifier[args] , identifier[kwargs] ) identifier[self] . identifier[_create_worker] ( identifier[worker] ) keyword[return] identifier[worker]
def create_python_worker(self, func, *args, **kwargs): """Create a new python worker instance.""" worker = PythonWorker(func, args, kwargs) self._create_worker(worker) return worker
def in_general_ns(uri): """Return True iff the URI is in a well-known general RDF namespace. URI namespaces considered well-known are RDF, RDFS, OWL, SKOS and DC.""" RDFuri = RDF.uri RDFSuri = RDFS.uri for ns in (RDFuri, RDFSuri, OWL, SKOS, DC): if uri.startswith(ns): return True return False
def function[in_general_ns, parameter[uri]]: constant[Return True iff the URI is in a well-known general RDF namespace. URI namespaces considered well-known are RDF, RDFS, OWL, SKOS and DC.] variable[RDFuri] assign[=] name[RDF].uri variable[RDFSuri] assign[=] name[RDFS].uri for taget[name[ns]] in starred[tuple[[<ast.Name object at 0x7da1b0401330>, <ast.Name object at 0x7da1b04010f0>, <ast.Name object at 0x7da1b04000d0>, <ast.Name object at 0x7da1b0403280>, <ast.Name object at 0x7da1b0401d80>]]] begin[:] if call[name[uri].startswith, parameter[name[ns]]] begin[:] return[constant[True]] return[constant[False]]
keyword[def] identifier[in_general_ns] ( identifier[uri] ): literal[string] identifier[RDFuri] = identifier[RDF] . identifier[uri] identifier[RDFSuri] = identifier[RDFS] . identifier[uri] keyword[for] identifier[ns] keyword[in] ( identifier[RDFuri] , identifier[RDFSuri] , identifier[OWL] , identifier[SKOS] , identifier[DC] ): keyword[if] identifier[uri] . identifier[startswith] ( identifier[ns] ): keyword[return] keyword[True] keyword[return] keyword[False]
def in_general_ns(uri): """Return True iff the URI is in a well-known general RDF namespace. URI namespaces considered well-known are RDF, RDFS, OWL, SKOS and DC.""" RDFuri = RDF.uri RDFSuri = RDFS.uri for ns in (RDFuri, RDFSuri, OWL, SKOS, DC): if uri.startswith(ns): return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ns']] return False
def get_mnist(): """ Gets MNIST dataset """ np.random.seed(1234) # set seed for deterministic ordering mnist_data = mx.test_utils.get_mnist() X = np.concatenate([mnist_data['train_data'], mnist_data['test_data']]) Y = np.concatenate([mnist_data['train_label'], mnist_data['test_label']]) p = np.random.permutation(X.shape[0]) X = X[p].reshape((X.shape[0], -1)).astype(np.float32)*5 Y = Y[p] return X, Y
def function[get_mnist, parameter[]]: constant[ Gets MNIST dataset ] call[name[np].random.seed, parameter[constant[1234]]] variable[mnist_data] assign[=] call[name[mx].test_utils.get_mnist, parameter[]] variable[X] assign[=] call[name[np].concatenate, parameter[list[[<ast.Subscript object at 0x7da1b1ef0f40>, <ast.Subscript object at 0x7da1b1ef31f0>]]]] variable[Y] assign[=] call[name[np].concatenate, parameter[list[[<ast.Subscript object at 0x7da1b1ef0c10>, <ast.Subscript object at 0x7da1b1ef2200>]]]] variable[p] assign[=] call[name[np].random.permutation, parameter[call[name[X].shape][constant[0]]]] variable[X] assign[=] binary_operation[call[call[call[name[X]][name[p]].reshape, parameter[tuple[[<ast.Subscript object at 0x7da1b1ef08b0>, <ast.UnaryOp object at 0x7da1b1ef09d0>]]]].astype, parameter[name[np].float32]] * constant[5]] variable[Y] assign[=] call[name[Y]][name[p]] return[tuple[[<ast.Name object at 0x7da1b1ef17e0>, <ast.Name object at 0x7da1b1ef2dd0>]]]
keyword[def] identifier[get_mnist] (): literal[string] identifier[np] . identifier[random] . identifier[seed] ( literal[int] ) identifier[mnist_data] = identifier[mx] . identifier[test_utils] . identifier[get_mnist] () identifier[X] = identifier[np] . identifier[concatenate] ([ identifier[mnist_data] [ literal[string] ], identifier[mnist_data] [ literal[string] ]]) identifier[Y] = identifier[np] . identifier[concatenate] ([ identifier[mnist_data] [ literal[string] ], identifier[mnist_data] [ literal[string] ]]) identifier[p] = identifier[np] . identifier[random] . identifier[permutation] ( identifier[X] . identifier[shape] [ literal[int] ]) identifier[X] = identifier[X] [ identifier[p] ]. identifier[reshape] (( identifier[X] . identifier[shape] [ literal[int] ],- literal[int] )). identifier[astype] ( identifier[np] . identifier[float32] )* literal[int] identifier[Y] = identifier[Y] [ identifier[p] ] keyword[return] identifier[X] , identifier[Y]
def get_mnist(): """ Gets MNIST dataset """ np.random.seed(1234) # set seed for deterministic ordering mnist_data = mx.test_utils.get_mnist() X = np.concatenate([mnist_data['train_data'], mnist_data['test_data']]) Y = np.concatenate([mnist_data['train_label'], mnist_data['test_label']]) p = np.random.permutation(X.shape[0]) X = X[p].reshape((X.shape[0], -1)).astype(np.float32) * 5 Y = Y[p] return (X, Y)
def pack_dunder(name): ''' Compatibility helper function to make __utils__ available on demand. ''' # TODO: Deprecate starting with Beryllium mod = sys.modules[name] if not hasattr(mod, '__utils__'): setattr(mod, '__utils__', salt.loader.utils(mod.__opts__))
def function[pack_dunder, parameter[name]]: constant[ Compatibility helper function to make __utils__ available on demand. ] variable[mod] assign[=] call[name[sys].modules][name[name]] if <ast.UnaryOp object at 0x7da1b1f74760> begin[:] call[name[setattr], parameter[name[mod], constant[__utils__], call[name[salt].loader.utils, parameter[name[mod].__opts__]]]]
keyword[def] identifier[pack_dunder] ( identifier[name] ): literal[string] identifier[mod] = identifier[sys] . identifier[modules] [ identifier[name] ] keyword[if] keyword[not] identifier[hasattr] ( identifier[mod] , literal[string] ): identifier[setattr] ( identifier[mod] , literal[string] , identifier[salt] . identifier[loader] . identifier[utils] ( identifier[mod] . identifier[__opts__] ))
def pack_dunder(name): """ Compatibility helper function to make __utils__ available on demand. """ # TODO: Deprecate starting with Beryllium mod = sys.modules[name] if not hasattr(mod, '__utils__'): setattr(mod, '__utils__', salt.loader.utils(mod.__opts__)) # depends on [control=['if'], data=[]]
def setup(self): """ Set up the power system object by executing the following workflow: * Sort the loaded models to meet the initialization sequence * Create call strings for routines * Call the ``setup`` function of the loaded models * Assign addresses for the loaded models * Call ``dae.setup`` to assign memory for the numerical dae structure * Convert model parameters to the system base Returns ------- PowerSystem The instance of the PowerSystem """ self.devman.sort_device() self.call.setup() self.model_setup() self.xy_addr0() self.dae.setup() self.to_sysbase() return self
def function[setup, parameter[self]]: constant[ Set up the power system object by executing the following workflow: * Sort the loaded models to meet the initialization sequence * Create call strings for routines * Call the ``setup`` function of the loaded models * Assign addresses for the loaded models * Call ``dae.setup`` to assign memory for the numerical dae structure * Convert model parameters to the system base Returns ------- PowerSystem The instance of the PowerSystem ] call[name[self].devman.sort_device, parameter[]] call[name[self].call.setup, parameter[]] call[name[self].model_setup, parameter[]] call[name[self].xy_addr0, parameter[]] call[name[self].dae.setup, parameter[]] call[name[self].to_sysbase, parameter[]] return[name[self]]
keyword[def] identifier[setup] ( identifier[self] ): literal[string] identifier[self] . identifier[devman] . identifier[sort_device] () identifier[self] . identifier[call] . identifier[setup] () identifier[self] . identifier[model_setup] () identifier[self] . identifier[xy_addr0] () identifier[self] . identifier[dae] . identifier[setup] () identifier[self] . identifier[to_sysbase] () keyword[return] identifier[self]
def setup(self): """ Set up the power system object by executing the following workflow: * Sort the loaded models to meet the initialization sequence * Create call strings for routines * Call the ``setup`` function of the loaded models * Assign addresses for the loaded models * Call ``dae.setup`` to assign memory for the numerical dae structure * Convert model parameters to the system base Returns ------- PowerSystem The instance of the PowerSystem """ self.devman.sort_device() self.call.setup() self.model_setup() self.xy_addr0() self.dae.setup() self.to_sysbase() return self
def publish_report(report, args, old_commit, new_commit): """Publish the RST report based on the user request.""" # Print the report to stdout unless the user specified --quiet. output = "" if not args.quiet and not args.gist and not args.file: return report if args.gist: gist_url = post_gist(report, old_commit, new_commit) output += "\nReport posted to GitHub Gist: {0}".format(gist_url) if args.file is not None: with open(args.file, 'w') as f: f.write(report.encode('utf-8')) output += "\nReport written to file: {0}".format(args.file) return output
def function[publish_report, parameter[report, args, old_commit, new_commit]]: constant[Publish the RST report based on the user request.] variable[output] assign[=] constant[] if <ast.BoolOp object at 0x7da1b28f1570> begin[:] return[name[report]] if name[args].gist begin[:] variable[gist_url] assign[=] call[name[post_gist], parameter[name[report], name[old_commit], name[new_commit]]] <ast.AugAssign object at 0x7da1b28f0970> if compare[name[args].file is_not constant[None]] begin[:] with call[name[open], parameter[name[args].file, constant[w]]] begin[:] call[name[f].write, parameter[call[name[report].encode, parameter[constant[utf-8]]]]] <ast.AugAssign object at 0x7da1b28f1ea0> return[name[output]]
keyword[def] identifier[publish_report] ( identifier[report] , identifier[args] , identifier[old_commit] , identifier[new_commit] ): literal[string] identifier[output] = literal[string] keyword[if] keyword[not] identifier[args] . identifier[quiet] keyword[and] keyword[not] identifier[args] . identifier[gist] keyword[and] keyword[not] identifier[args] . identifier[file] : keyword[return] identifier[report] keyword[if] identifier[args] . identifier[gist] : identifier[gist_url] = identifier[post_gist] ( identifier[report] , identifier[old_commit] , identifier[new_commit] ) identifier[output] += literal[string] . identifier[format] ( identifier[gist_url] ) keyword[if] identifier[args] . identifier[file] keyword[is] keyword[not] keyword[None] : keyword[with] identifier[open] ( identifier[args] . identifier[file] , literal[string] ) keyword[as] identifier[f] : identifier[f] . identifier[write] ( identifier[report] . identifier[encode] ( literal[string] )) identifier[output] += literal[string] . identifier[format] ( identifier[args] . identifier[file] ) keyword[return] identifier[output]
def publish_report(report, args, old_commit, new_commit): """Publish the RST report based on the user request.""" # Print the report to stdout unless the user specified --quiet. output = '' if not args.quiet and (not args.gist) and (not args.file): return report # depends on [control=['if'], data=[]] if args.gist: gist_url = post_gist(report, old_commit, new_commit) output += '\nReport posted to GitHub Gist: {0}'.format(gist_url) # depends on [control=['if'], data=[]] if args.file is not None: with open(args.file, 'w') as f: f.write(report.encode('utf-8')) # depends on [control=['with'], data=['f']] output += '\nReport written to file: {0}'.format(args.file) # depends on [control=['if'], data=[]] return output
def prepare(ctx, new_version=None, version_part=None, hide=True, dry_run=False): """Prepare the release: bump version, build packages, ...""" if new_version is not None: bump_version(ctx, new_version, version_part=version_part, dry_run=dry_run) build_packages(ctx, hide=hide) packages = ensure_packages_exist(ctx, check_only=True) print_packages(packages)
def function[prepare, parameter[ctx, new_version, version_part, hide, dry_run]]: constant[Prepare the release: bump version, build packages, ...] if compare[name[new_version] is_not constant[None]] begin[:] call[name[bump_version], parameter[name[ctx], name[new_version]]] call[name[build_packages], parameter[name[ctx]]] variable[packages] assign[=] call[name[ensure_packages_exist], parameter[name[ctx]]] call[name[print_packages], parameter[name[packages]]]
keyword[def] identifier[prepare] ( identifier[ctx] , identifier[new_version] = keyword[None] , identifier[version_part] = keyword[None] , identifier[hide] = keyword[True] , identifier[dry_run] = keyword[False] ): literal[string] keyword[if] identifier[new_version] keyword[is] keyword[not] keyword[None] : identifier[bump_version] ( identifier[ctx] , identifier[new_version] , identifier[version_part] = identifier[version_part] , identifier[dry_run] = identifier[dry_run] ) identifier[build_packages] ( identifier[ctx] , identifier[hide] = identifier[hide] ) identifier[packages] = identifier[ensure_packages_exist] ( identifier[ctx] , identifier[check_only] = keyword[True] ) identifier[print_packages] ( identifier[packages] )
def prepare(ctx, new_version=None, version_part=None, hide=True, dry_run=False): """Prepare the release: bump version, build packages, ...""" if new_version is not None: bump_version(ctx, new_version, version_part=version_part, dry_run=dry_run) # depends on [control=['if'], data=['new_version']] build_packages(ctx, hide=hide) packages = ensure_packages_exist(ctx, check_only=True) print_packages(packages)
def create_table_sql(cls, db): ''' Returns the SQL command for creating a table for this model. ''' parts = ['CREATE TABLE IF NOT EXISTS `%s`.`%s` AS `%s`.`%s`' % (db.db_name, cls.table_name(), db.db_name, cls.engine.main_model.table_name())] engine_str = cls.engine.create_table_sql(db) parts.append(engine_str) return ' '.join(parts)
def function[create_table_sql, parameter[cls, db]]: constant[ Returns the SQL command for creating a table for this model. ] variable[parts] assign[=] list[[<ast.BinOp object at 0x7da18fe90f70>]] variable[engine_str] assign[=] call[name[cls].engine.create_table_sql, parameter[name[db]]] call[name[parts].append, parameter[name[engine_str]]] return[call[constant[ ].join, parameter[name[parts]]]]
keyword[def] identifier[create_table_sql] ( identifier[cls] , identifier[db] ): literal[string] identifier[parts] =[ literal[string] %( identifier[db] . identifier[db_name] , identifier[cls] . identifier[table_name] (), identifier[db] . identifier[db_name] , identifier[cls] . identifier[engine] . identifier[main_model] . identifier[table_name] ())] identifier[engine_str] = identifier[cls] . identifier[engine] . identifier[create_table_sql] ( identifier[db] ) identifier[parts] . identifier[append] ( identifier[engine_str] ) keyword[return] literal[string] . identifier[join] ( identifier[parts] )
def create_table_sql(cls, db): """ Returns the SQL command for creating a table for this model. """ parts = ['CREATE TABLE IF NOT EXISTS `%s`.`%s` AS `%s`.`%s`' % (db.db_name, cls.table_name(), db.db_name, cls.engine.main_model.table_name())] engine_str = cls.engine.create_table_sql(db) parts.append(engine_str) return ' '.join(parts)
def distributions_for_instances(self, data): """ Peforms predictions, returning the class distributions. :param data: the Instances to get the class distributions for :type data: Instances :return: the class distribution matrix, None if not a batch predictor :rtype: ndarray """ if self.is_batchpredictor: return typeconv.double_matrix_to_ndarray(self.__distributions(data.jobject)) else: return None
def function[distributions_for_instances, parameter[self, data]]: constant[ Peforms predictions, returning the class distributions. :param data: the Instances to get the class distributions for :type data: Instances :return: the class distribution matrix, None if not a batch predictor :rtype: ndarray ] if name[self].is_batchpredictor begin[:] return[call[name[typeconv].double_matrix_to_ndarray, parameter[call[name[self].__distributions, parameter[name[data].jobject]]]]]
keyword[def] identifier[distributions_for_instances] ( identifier[self] , identifier[data] ): literal[string] keyword[if] identifier[self] . identifier[is_batchpredictor] : keyword[return] identifier[typeconv] . identifier[double_matrix_to_ndarray] ( identifier[self] . identifier[__distributions] ( identifier[data] . identifier[jobject] )) keyword[else] : keyword[return] keyword[None]
def distributions_for_instances(self, data): """ Peforms predictions, returning the class distributions. :param data: the Instances to get the class distributions for :type data: Instances :return: the class distribution matrix, None if not a batch predictor :rtype: ndarray """ if self.is_batchpredictor: return typeconv.double_matrix_to_ndarray(self.__distributions(data.jobject)) # depends on [control=['if'], data=[]] else: return None
def _submit_to_queue(self, script_file): """Submit a job script to the queue.""" if sys.version_info[0] < 3: process = Popen(['qsub', script_file], stdout=PIPE, stderr=PIPE) else: # need string not bytes so must use universal_newlines process = Popen(['qsub', script_file], stdout=PIPE, stderr=PIPE, universal_newlines=True) out, err = process.communicate() # grab the return code. PBS returns 0 if the job was successful queue_id = None if process.returncode == 0: try: # output should of the form '2561553.sdb' or '352353.jessup' - just grab the first part for job id queue_id = int(out.split('.')[0]) except: # probably error parsing job code logger.critical("Could not parse job id following qsub...") return SubmitResults(qid=queue_id, out=out, err=err, process=process)
def function[_submit_to_queue, parameter[self, script_file]]: constant[Submit a job script to the queue.] if compare[call[name[sys].version_info][constant[0]] less[<] constant[3]] begin[:] variable[process] assign[=] call[name[Popen], parameter[list[[<ast.Constant object at 0x7da20c992b00>, <ast.Name object at 0x7da20c993be0>]]]] <ast.Tuple object at 0x7da18f8105b0> assign[=] call[name[process].communicate, parameter[]] variable[queue_id] assign[=] constant[None] if compare[name[process].returncode equal[==] constant[0]] begin[:] <ast.Try object at 0x7da18f8101f0> return[call[name[SubmitResults], parameter[]]]
keyword[def] identifier[_submit_to_queue] ( identifier[self] , identifier[script_file] ): literal[string] keyword[if] identifier[sys] . identifier[version_info] [ literal[int] ]< literal[int] : identifier[process] = identifier[Popen] ([ literal[string] , identifier[script_file] ], identifier[stdout] = identifier[PIPE] , identifier[stderr] = identifier[PIPE] ) keyword[else] : identifier[process] = identifier[Popen] ([ literal[string] , identifier[script_file] ], identifier[stdout] = identifier[PIPE] , identifier[stderr] = identifier[PIPE] , identifier[universal_newlines] = keyword[True] ) identifier[out] , identifier[err] = identifier[process] . identifier[communicate] () identifier[queue_id] = keyword[None] keyword[if] identifier[process] . identifier[returncode] == literal[int] : keyword[try] : identifier[queue_id] = identifier[int] ( identifier[out] . identifier[split] ( literal[string] )[ literal[int] ]) keyword[except] : identifier[logger] . identifier[critical] ( literal[string] ) keyword[return] identifier[SubmitResults] ( identifier[qid] = identifier[queue_id] , identifier[out] = identifier[out] , identifier[err] = identifier[err] , identifier[process] = identifier[process] )
def _submit_to_queue(self, script_file): """Submit a job script to the queue.""" if sys.version_info[0] < 3: process = Popen(['qsub', script_file], stdout=PIPE, stderr=PIPE) # depends on [control=['if'], data=[]] else: # need string not bytes so must use universal_newlines process = Popen(['qsub', script_file], stdout=PIPE, stderr=PIPE, universal_newlines=True) (out, err) = process.communicate() # grab the return code. PBS returns 0 if the job was successful queue_id = None if process.returncode == 0: try: # output should of the form '2561553.sdb' or '352353.jessup' - just grab the first part for job id queue_id = int(out.split('.')[0]) # depends on [control=['try'], data=[]] except: # probably error parsing job code logger.critical('Could not parse job id following qsub...') # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] return SubmitResults(qid=queue_id, out=out, err=err, process=process)
def render_css(self, fn=None, text=None, margin='', indent='\t'): """output css using the Sass processor""" fn = fn or os.path.splitext(self.fn)[0]+'.css' if not os.path.exists(os.path.dirname(fn)): os.makedirs(os.path.dirname(fn)) curdir = os.path.abspath(os.curdir) os.chdir(os.path.dirname(fn)) # needed in order for scss to relative @import text = text or self.render_styles() if text != '': text = sass.compile(string=text) os.chdir(curdir) return CSS(fn=fn, text=text)
def function[render_css, parameter[self, fn, text, margin, indent]]: constant[output css using the Sass processor] variable[fn] assign[=] <ast.BoolOp object at 0x7da207f98c40> if <ast.UnaryOp object at 0x7da18dc98e20> begin[:] call[name[os].makedirs, parameter[call[name[os].path.dirname, parameter[name[fn]]]]] variable[curdir] assign[=] call[name[os].path.abspath, parameter[name[os].curdir]] call[name[os].chdir, parameter[call[name[os].path.dirname, parameter[name[fn]]]]] variable[text] assign[=] <ast.BoolOp object at 0x7da18dc98340> if compare[name[text] not_equal[!=] constant[]] begin[:] variable[text] assign[=] call[name[sass].compile, parameter[]] call[name[os].chdir, parameter[name[curdir]]] return[call[name[CSS], parameter[]]]
keyword[def] identifier[render_css] ( identifier[self] , identifier[fn] = keyword[None] , identifier[text] = keyword[None] , identifier[margin] = literal[string] , identifier[indent] = literal[string] ): literal[string] identifier[fn] = identifier[fn] keyword[or] identifier[os] . identifier[path] . identifier[splitext] ( identifier[self] . identifier[fn] )[ literal[int] ]+ literal[string] keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[fn] )): identifier[os] . identifier[makedirs] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[fn] )) identifier[curdir] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[os] . identifier[curdir] ) identifier[os] . identifier[chdir] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[fn] )) identifier[text] = identifier[text] keyword[or] identifier[self] . identifier[render_styles] () keyword[if] identifier[text] != literal[string] : identifier[text] = identifier[sass] . identifier[compile] ( identifier[string] = identifier[text] ) identifier[os] . identifier[chdir] ( identifier[curdir] ) keyword[return] identifier[CSS] ( identifier[fn] = identifier[fn] , identifier[text] = identifier[text] )
def render_css(self, fn=None, text=None, margin='', indent='\t'): """output css using the Sass processor""" fn = fn or os.path.splitext(self.fn)[0] + '.css' if not os.path.exists(os.path.dirname(fn)): os.makedirs(os.path.dirname(fn)) # depends on [control=['if'], data=[]] curdir = os.path.abspath(os.curdir) os.chdir(os.path.dirname(fn)) # needed in order for scss to relative @import text = text or self.render_styles() if text != '': text = sass.compile(string=text) # depends on [control=['if'], data=['text']] os.chdir(curdir) return CSS(fn=fn, text=text)
def open(filename, frame='unspecified'): """Creates a PointCloudImage from a file. Parameters ---------- filename : :obj:`str` The file to load the data from. Must be one of .png, .jpg, .npy, or .npz. frame : :obj:`str` A string representing the frame of reference in which the new image lies. Returns ------- :obj:`PointCloudImage` The new PointCloudImage. """ data = Image.load_data(filename) return PointCloudImage(data, frame)
def function[open, parameter[filename, frame]]: constant[Creates a PointCloudImage from a file. Parameters ---------- filename : :obj:`str` The file to load the data from. Must be one of .png, .jpg, .npy, or .npz. frame : :obj:`str` A string representing the frame of reference in which the new image lies. Returns ------- :obj:`PointCloudImage` The new PointCloudImage. ] variable[data] assign[=] call[name[Image].load_data, parameter[name[filename]]] return[call[name[PointCloudImage], parameter[name[data], name[frame]]]]
keyword[def] identifier[open] ( identifier[filename] , identifier[frame] = literal[string] ): literal[string] identifier[data] = identifier[Image] . identifier[load_data] ( identifier[filename] ) keyword[return] identifier[PointCloudImage] ( identifier[data] , identifier[frame] )
def open(filename, frame='unspecified'): """Creates a PointCloudImage from a file. Parameters ---------- filename : :obj:`str` The file to load the data from. Must be one of .png, .jpg, .npy, or .npz. frame : :obj:`str` A string representing the frame of reference in which the new image lies. Returns ------- :obj:`PointCloudImage` The new PointCloudImage. """ data = Image.load_data(filename) return PointCloudImage(data, frame)
def start(self, start_context): """ Perform any logic on solution start """ for p in self._providers: p.start(start_context) if self._clear_start: self.clear_cache()
def function[start, parameter[self, start_context]]: constant[ Perform any logic on solution start ] for taget[name[p]] in starred[name[self]._providers] begin[:] call[name[p].start, parameter[name[start_context]]] if name[self]._clear_start begin[:] call[name[self].clear_cache, parameter[]]
keyword[def] identifier[start] ( identifier[self] , identifier[start_context] ): literal[string] keyword[for] identifier[p] keyword[in] identifier[self] . identifier[_providers] : identifier[p] . identifier[start] ( identifier[start_context] ) keyword[if] identifier[self] . identifier[_clear_start] : identifier[self] . identifier[clear_cache] ()
def start(self, start_context): """ Perform any logic on solution start """ for p in self._providers: p.start(start_context) # depends on [control=['for'], data=['p']] if self._clear_start: self.clear_cache() # depends on [control=['if'], data=[]]
def infer_call_result(self, caller, context=None): """infer what a class is returning when called""" if ( self.is_subtype_of("%s.type" % (BUILTINS,), context) and len(caller.args) == 3 ): result = self._infer_type_call(caller, context) yield result return dunder_call = None try: metaclass = self.metaclass(context=context) if metaclass is not None: dunder_call = next(metaclass.igetattr("__call__", context)) except exceptions.AttributeInferenceError: pass if dunder_call and dunder_call.qname() != "builtins.type.__call__": context = contextmod.bind_context_to_node(context, self) yield from dunder_call.infer_call_result(caller, context) else: # Call type.__call__ if not set metaclass # (since type is the default metaclass) yield bases.Instance(self)
def function[infer_call_result, parameter[self, caller, context]]: constant[infer what a class is returning when called] if <ast.BoolOp object at 0x7da1b1e5a530> begin[:] variable[result] assign[=] call[name[self]._infer_type_call, parameter[name[caller], name[context]]] <ast.Yield object at 0x7da1b1e5a1d0> return[None] variable[dunder_call] assign[=] constant[None] <ast.Try object at 0x7da1b1e59600> if <ast.BoolOp object at 0x7da1b1e747f0> begin[:] variable[context] assign[=] call[name[contextmod].bind_context_to_node, parameter[name[context], name[self]]] <ast.YieldFrom object at 0x7da1b1e75360>
keyword[def] identifier[infer_call_result] ( identifier[self] , identifier[caller] , identifier[context] = keyword[None] ): literal[string] keyword[if] ( identifier[self] . identifier[is_subtype_of] ( literal[string] %( identifier[BUILTINS] ,), identifier[context] ) keyword[and] identifier[len] ( identifier[caller] . identifier[args] )== literal[int] ): identifier[result] = identifier[self] . identifier[_infer_type_call] ( identifier[caller] , identifier[context] ) keyword[yield] identifier[result] keyword[return] identifier[dunder_call] = keyword[None] keyword[try] : identifier[metaclass] = identifier[self] . identifier[metaclass] ( identifier[context] = identifier[context] ) keyword[if] identifier[metaclass] keyword[is] keyword[not] keyword[None] : identifier[dunder_call] = identifier[next] ( identifier[metaclass] . identifier[igetattr] ( literal[string] , identifier[context] )) keyword[except] identifier[exceptions] . identifier[AttributeInferenceError] : keyword[pass] keyword[if] identifier[dunder_call] keyword[and] identifier[dunder_call] . identifier[qname] ()!= literal[string] : identifier[context] = identifier[contextmod] . identifier[bind_context_to_node] ( identifier[context] , identifier[self] ) keyword[yield] keyword[from] identifier[dunder_call] . identifier[infer_call_result] ( identifier[caller] , identifier[context] ) keyword[else] : keyword[yield] identifier[bases] . identifier[Instance] ( identifier[self] )
def infer_call_result(self, caller, context=None): """infer what a class is returning when called""" if self.is_subtype_of('%s.type' % (BUILTINS,), context) and len(caller.args) == 3: result = self._infer_type_call(caller, context) yield result return # depends on [control=['if'], data=[]] dunder_call = None try: metaclass = self.metaclass(context=context) if metaclass is not None: dunder_call = next(metaclass.igetattr('__call__', context)) # depends on [control=['if'], data=['metaclass']] # depends on [control=['try'], data=[]] except exceptions.AttributeInferenceError: pass # depends on [control=['except'], data=[]] if dunder_call and dunder_call.qname() != 'builtins.type.__call__': context = contextmod.bind_context_to_node(context, self) yield from dunder_call.infer_call_result(caller, context) # depends on [control=['if'], data=[]] else: # Call type.__call__ if not set metaclass # (since type is the default metaclass) yield bases.Instance(self)
def _generate_base_mimetypes(self): """ Generate the base mimetypes as described by non customized document types. """ for t in self.type_instances: if t.custom_mime: continue yield t.mime, (t, None, None)
def function[_generate_base_mimetypes, parameter[self]]: constant[ Generate the base mimetypes as described by non customized document types. ] for taget[name[t]] in starred[name[self].type_instances] begin[:] if name[t].custom_mime begin[:] continue <ast.Yield object at 0x7da18dc9b5e0>
keyword[def] identifier[_generate_base_mimetypes] ( identifier[self] ): literal[string] keyword[for] identifier[t] keyword[in] identifier[self] . identifier[type_instances] : keyword[if] identifier[t] . identifier[custom_mime] : keyword[continue] keyword[yield] identifier[t] . identifier[mime] ,( identifier[t] , keyword[None] , keyword[None] )
def _generate_base_mimetypes(self): """ Generate the base mimetypes as described by non customized document types. """ for t in self.type_instances: if t.custom_mime: continue # depends on [control=['if'], data=[]] yield (t.mime, (t, None, None)) # depends on [control=['for'], data=['t']]
def run(self, scr): 'Manage execution of keystrokes and subsequent redrawing of screen.' global sheet scr.timeout(int(options.curses_timeout)) with suppress(curses.error): curses.curs_set(0) self.scr = scr numTimeouts = 0 self.keystrokes = '' while True: if not self.sheets: # if no more sheets, exit return sheet = self.sheets[0] threading.current_thread().sheet = sheet try: sheet.draw(scr) except Exception as e: self.exceptionCaught(e) self.drawLeftStatus(scr, sheet) self.drawRightStatus(scr, sheet) # visible during this getkeystroke keystroke = self.getkeystroke(scr, sheet) if keystroke: # wait until next keystroke to clear statuses and previous keystrokes numTimeouts = 0 if not self.prefixWaiting: self.keystrokes = '' self.statuses.clear() if keystroke == 'KEY_MOUSE': self.keystrokes = '' clicktype = '' try: devid, x, y, z, bstate = curses.getmouse() sheet.mouseX, sheet.mouseY = x, y if bstate & curses.BUTTON_CTRL: clicktype += "CTRL-" bstate &= ~curses.BUTTON_CTRL if bstate & curses.BUTTON_ALT: clicktype += "ALT-" bstate &= ~curses.BUTTON_ALT if bstate & curses.BUTTON_SHIFT: clicktype += "SHIFT-" bstate &= ~curses.BUTTON_SHIFT keystroke = clicktype + curses.mouseEvents.get(bstate, str(bstate)) f = self.getMouse(scr, x, y, keystroke) if f: if isinstance(f, str): for cmd in f.split(): sheet.exec_keystrokes(cmd) else: f(y, x, keystroke) self.keystrokes = keystroke keystroke = '' except curses.error: pass except Exception as e: exceptionCaught(e) self.keystrokes += keystroke self.drawRightStatus(scr, sheet) # visible for commands that wait for input if not keystroke: # timeout instead of keypress pass elif keystroke == '^Q': return self.lastErrors and '\n'.join(self.lastErrors[-1]) elif bindkeys._get(self.keystrokes): sheet.exec_keystrokes(self.keystrokes) self.prefixWaiting = False elif keystroke in self.allPrefixes: self.keystrokes = ''.join(sorted(set(self.keystrokes))) # prefix order/quantity does not matter self.prefixWaiting = True else: status('no command for "%s"' % (self.keystrokes)) self.prefixWaiting = False self.checkForFinishedThreads() self.callHook('predraw') catchapply(sheet.checkCursor) # no idle redraw unless background threads are running time.sleep(0) # yield to other threads which may not have started yet if vd.unfinishedThreads: scr.timeout(options.curses_timeout) else: numTimeouts += 1 if numTimeouts > 1: scr.timeout(-1) else: scr.timeout(options.curses_timeout)
def function[run, parameter[self, scr]]: constant[Manage execution of keystrokes and subsequent redrawing of screen.] <ast.Global object at 0x7da207f02050> call[name[scr].timeout, parameter[call[name[int], parameter[name[options].curses_timeout]]]] with call[name[suppress], parameter[name[curses].error]] begin[:] call[name[curses].curs_set, parameter[constant[0]]] name[self].scr assign[=] name[scr] variable[numTimeouts] assign[=] constant[0] name[self].keystrokes assign[=] constant[] while constant[True] begin[:] if <ast.UnaryOp object at 0x7da207f03460> begin[:] return[None] variable[sheet] assign[=] call[name[self].sheets][constant[0]] call[name[threading].current_thread, parameter[]].sheet assign[=] name[sheet] <ast.Try object at 0x7da207f00400> call[name[self].drawLeftStatus, parameter[name[scr], name[sheet]]] call[name[self].drawRightStatus, parameter[name[scr], name[sheet]]] variable[keystroke] assign[=] call[name[self].getkeystroke, parameter[name[scr], name[sheet]]] if name[keystroke] begin[:] variable[numTimeouts] assign[=] constant[0] if <ast.UnaryOp object at 0x7da207f00730> begin[:] name[self].keystrokes assign[=] constant[] call[name[self].statuses.clear, parameter[]] if compare[name[keystroke] equal[==] constant[KEY_MOUSE]] begin[:] name[self].keystrokes assign[=] constant[] variable[clicktype] assign[=] constant[] <ast.Try object at 0x7da207f013f0> <ast.AugAssign object at 0x7da207f028c0> call[name[self].drawRightStatus, parameter[name[scr], name[sheet]]] if <ast.UnaryOp object at 0x7da207f00970> begin[:] pass call[name[self].checkForFinishedThreads, parameter[]] call[name[self].callHook, parameter[constant[predraw]]] call[name[catchapply], parameter[name[sheet].checkCursor]] call[name[time].sleep, parameter[constant[0]]] if name[vd].unfinishedThreads begin[:] call[name[scr].timeout, parameter[name[options].curses_timeout]]
keyword[def] identifier[run] ( identifier[self] , identifier[scr] ): literal[string] keyword[global] identifier[sheet] identifier[scr] . identifier[timeout] ( identifier[int] ( identifier[options] . identifier[curses_timeout] )) keyword[with] identifier[suppress] ( identifier[curses] . identifier[error] ): identifier[curses] . identifier[curs_set] ( literal[int] ) identifier[self] . identifier[scr] = identifier[scr] identifier[numTimeouts] = literal[int] identifier[self] . identifier[keystrokes] = literal[string] keyword[while] keyword[True] : keyword[if] keyword[not] identifier[self] . identifier[sheets] : keyword[return] identifier[sheet] = identifier[self] . identifier[sheets] [ literal[int] ] identifier[threading] . identifier[current_thread] (). identifier[sheet] = identifier[sheet] keyword[try] : identifier[sheet] . identifier[draw] ( identifier[scr] ) keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[self] . identifier[exceptionCaught] ( identifier[e] ) identifier[self] . identifier[drawLeftStatus] ( identifier[scr] , identifier[sheet] ) identifier[self] . identifier[drawRightStatus] ( identifier[scr] , identifier[sheet] ) identifier[keystroke] = identifier[self] . identifier[getkeystroke] ( identifier[scr] , identifier[sheet] ) keyword[if] identifier[keystroke] : identifier[numTimeouts] = literal[int] keyword[if] keyword[not] identifier[self] . identifier[prefixWaiting] : identifier[self] . identifier[keystrokes] = literal[string] identifier[self] . identifier[statuses] . identifier[clear] () keyword[if] identifier[keystroke] == literal[string] : identifier[self] . identifier[keystrokes] = literal[string] identifier[clicktype] = literal[string] keyword[try] : identifier[devid] , identifier[x] , identifier[y] , identifier[z] , identifier[bstate] = identifier[curses] . identifier[getmouse] () identifier[sheet] . identifier[mouseX] , identifier[sheet] . identifier[mouseY] = identifier[x] , identifier[y] keyword[if] identifier[bstate] & identifier[curses] . identifier[BUTTON_CTRL] : identifier[clicktype] += literal[string] identifier[bstate] &=~ identifier[curses] . identifier[BUTTON_CTRL] keyword[if] identifier[bstate] & identifier[curses] . identifier[BUTTON_ALT] : identifier[clicktype] += literal[string] identifier[bstate] &=~ identifier[curses] . identifier[BUTTON_ALT] keyword[if] identifier[bstate] & identifier[curses] . identifier[BUTTON_SHIFT] : identifier[clicktype] += literal[string] identifier[bstate] &=~ identifier[curses] . identifier[BUTTON_SHIFT] identifier[keystroke] = identifier[clicktype] + identifier[curses] . identifier[mouseEvents] . identifier[get] ( identifier[bstate] , identifier[str] ( identifier[bstate] )) identifier[f] = identifier[self] . identifier[getMouse] ( identifier[scr] , identifier[x] , identifier[y] , identifier[keystroke] ) keyword[if] identifier[f] : keyword[if] identifier[isinstance] ( identifier[f] , identifier[str] ): keyword[for] identifier[cmd] keyword[in] identifier[f] . identifier[split] (): identifier[sheet] . identifier[exec_keystrokes] ( identifier[cmd] ) keyword[else] : identifier[f] ( identifier[y] , identifier[x] , identifier[keystroke] ) identifier[self] . identifier[keystrokes] = identifier[keystroke] identifier[keystroke] = literal[string] keyword[except] identifier[curses] . identifier[error] : keyword[pass] keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[exceptionCaught] ( identifier[e] ) identifier[self] . identifier[keystrokes] += identifier[keystroke] identifier[self] . identifier[drawRightStatus] ( identifier[scr] , identifier[sheet] ) keyword[if] keyword[not] identifier[keystroke] : keyword[pass] keyword[elif] identifier[keystroke] == literal[string] : keyword[return] identifier[self] . identifier[lastErrors] keyword[and] literal[string] . identifier[join] ( identifier[self] . identifier[lastErrors] [- literal[int] ]) keyword[elif] identifier[bindkeys] . identifier[_get] ( identifier[self] . identifier[keystrokes] ): identifier[sheet] . identifier[exec_keystrokes] ( identifier[self] . identifier[keystrokes] ) identifier[self] . identifier[prefixWaiting] = keyword[False] keyword[elif] identifier[keystroke] keyword[in] identifier[self] . identifier[allPrefixes] : identifier[self] . identifier[keystrokes] = literal[string] . identifier[join] ( identifier[sorted] ( identifier[set] ( identifier[self] . identifier[keystrokes] ))) identifier[self] . identifier[prefixWaiting] = keyword[True] keyword[else] : identifier[status] ( literal[string] %( identifier[self] . identifier[keystrokes] )) identifier[self] . identifier[prefixWaiting] = keyword[False] identifier[self] . identifier[checkForFinishedThreads] () identifier[self] . identifier[callHook] ( literal[string] ) identifier[catchapply] ( identifier[sheet] . identifier[checkCursor] ) identifier[time] . identifier[sleep] ( literal[int] ) keyword[if] identifier[vd] . identifier[unfinishedThreads] : identifier[scr] . identifier[timeout] ( identifier[options] . identifier[curses_timeout] ) keyword[else] : identifier[numTimeouts] += literal[int] keyword[if] identifier[numTimeouts] > literal[int] : identifier[scr] . identifier[timeout] (- literal[int] ) keyword[else] : identifier[scr] . identifier[timeout] ( identifier[options] . identifier[curses_timeout] )
def run(self, scr): """Manage execution of keystrokes and subsequent redrawing of screen.""" global sheet scr.timeout(int(options.curses_timeout)) with suppress(curses.error): curses.curs_set(0) # depends on [control=['with'], data=[]] self.scr = scr numTimeouts = 0 self.keystrokes = '' while True: if not self.sheets: # if no more sheets, exit return # depends on [control=['if'], data=[]] sheet = self.sheets[0] threading.current_thread().sheet = sheet try: sheet.draw(scr) # depends on [control=['try'], data=[]] except Exception as e: self.exceptionCaught(e) # depends on [control=['except'], data=['e']] self.drawLeftStatus(scr, sheet) self.drawRightStatus(scr, sheet) # visible during this getkeystroke keystroke = self.getkeystroke(scr, sheet) if keystroke: # wait until next keystroke to clear statuses and previous keystrokes numTimeouts = 0 if not self.prefixWaiting: self.keystrokes = '' # depends on [control=['if'], data=[]] self.statuses.clear() if keystroke == 'KEY_MOUSE': self.keystrokes = '' clicktype = '' try: (devid, x, y, z, bstate) = curses.getmouse() (sheet.mouseX, sheet.mouseY) = (x, y) if bstate & curses.BUTTON_CTRL: clicktype += 'CTRL-' bstate &= ~curses.BUTTON_CTRL # depends on [control=['if'], data=[]] if bstate & curses.BUTTON_ALT: clicktype += 'ALT-' bstate &= ~curses.BUTTON_ALT # depends on [control=['if'], data=[]] if bstate & curses.BUTTON_SHIFT: clicktype += 'SHIFT-' bstate &= ~curses.BUTTON_SHIFT # depends on [control=['if'], data=[]] keystroke = clicktype + curses.mouseEvents.get(bstate, str(bstate)) f = self.getMouse(scr, x, y, keystroke) if f: if isinstance(f, str): for cmd in f.split(): sheet.exec_keystrokes(cmd) # depends on [control=['for'], data=['cmd']] # depends on [control=['if'], data=[]] else: f(y, x, keystroke) self.keystrokes = keystroke keystroke = '' # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except curses.error: pass # depends on [control=['except'], data=[]] except Exception as e: exceptionCaught(e) # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=['keystroke']] self.keystrokes += keystroke # depends on [control=['if'], data=[]] self.drawRightStatus(scr, sheet) # visible for commands that wait for input if not keystroke: # timeout instead of keypress pass # depends on [control=['if'], data=[]] elif keystroke == '^Q': return self.lastErrors and '\n'.join(self.lastErrors[-1]) # depends on [control=['if'], data=[]] elif bindkeys._get(self.keystrokes): sheet.exec_keystrokes(self.keystrokes) self.prefixWaiting = False # depends on [control=['if'], data=[]] elif keystroke in self.allPrefixes: self.keystrokes = ''.join(sorted(set(self.keystrokes))) # prefix order/quantity does not matter self.prefixWaiting = True # depends on [control=['if'], data=[]] else: status('no command for "%s"' % self.keystrokes) self.prefixWaiting = False self.checkForFinishedThreads() self.callHook('predraw') catchapply(sheet.checkCursor) # no idle redraw unless background threads are running time.sleep(0) # yield to other threads which may not have started yet if vd.unfinishedThreads: scr.timeout(options.curses_timeout) # depends on [control=['if'], data=[]] else: numTimeouts += 1 if numTimeouts > 1: scr.timeout(-1) # depends on [control=['if'], data=[]] else: scr.timeout(options.curses_timeout) # depends on [control=['while'], data=[]]
def market_if_touched_replace(self, accountID, orderID, **kwargs): """ Shortcut to replace a pending MarketIfTouched Order in an Account Args: accountID : The ID of the Account orderID : The ID of the MarketIfTouched Order to replace kwargs : The arguments to create a MarketIfTouchedOrderRequest Returns: v20.response.Response containing the results from submitting the request """ return self.replace( accountID, orderID, order=MarketIfTouchedOrderRequest(**kwargs) )
def function[market_if_touched_replace, parameter[self, accountID, orderID]]: constant[ Shortcut to replace a pending MarketIfTouched Order in an Account Args: accountID : The ID of the Account orderID : The ID of the MarketIfTouched Order to replace kwargs : The arguments to create a MarketIfTouchedOrderRequest Returns: v20.response.Response containing the results from submitting the request ] return[call[name[self].replace, parameter[name[accountID], name[orderID]]]]
keyword[def] identifier[market_if_touched_replace] ( identifier[self] , identifier[accountID] , identifier[orderID] ,** identifier[kwargs] ): literal[string] keyword[return] identifier[self] . identifier[replace] ( identifier[accountID] , identifier[orderID] , identifier[order] = identifier[MarketIfTouchedOrderRequest] (** identifier[kwargs] ) )
def market_if_touched_replace(self, accountID, orderID, **kwargs): """ Shortcut to replace a pending MarketIfTouched Order in an Account Args: accountID : The ID of the Account orderID : The ID of the MarketIfTouched Order to replace kwargs : The arguments to create a MarketIfTouchedOrderRequest Returns: v20.response.Response containing the results from submitting the request """ return self.replace(accountID, orderID, order=MarketIfTouchedOrderRequest(**kwargs))
def kill_workflow(self): '''Kills the workflow. See also -------- :func:`tmserver.api.workflow.kill_workflow` :class:`tmlib.workflow.workflow.Workflow` ''' logger.info('kill workflow of experiment "%s"', self.experiment_name) content = dict() url = self._build_api_url( '/experiments/{experiment_id}/workflow/kill'.format( experiment_id=self._experiment_id ) ) res = self._session.post(url) res.raise_for_status()
def function[kill_workflow, parameter[self]]: constant[Kills the workflow. See also -------- :func:`tmserver.api.workflow.kill_workflow` :class:`tmlib.workflow.workflow.Workflow` ] call[name[logger].info, parameter[constant[kill workflow of experiment "%s"], name[self].experiment_name]] variable[content] assign[=] call[name[dict], parameter[]] variable[url] assign[=] call[name[self]._build_api_url, parameter[call[constant[/experiments/{experiment_id}/workflow/kill].format, parameter[]]]] variable[res] assign[=] call[name[self]._session.post, parameter[name[url]]] call[name[res].raise_for_status, parameter[]]
keyword[def] identifier[kill_workflow] ( identifier[self] ): literal[string] identifier[logger] . identifier[info] ( literal[string] , identifier[self] . identifier[experiment_name] ) identifier[content] = identifier[dict] () identifier[url] = identifier[self] . identifier[_build_api_url] ( literal[string] . identifier[format] ( identifier[experiment_id] = identifier[self] . identifier[_experiment_id] ) ) identifier[res] = identifier[self] . identifier[_session] . identifier[post] ( identifier[url] ) identifier[res] . identifier[raise_for_status] ()
def kill_workflow(self): """Kills the workflow. See also -------- :func:`tmserver.api.workflow.kill_workflow` :class:`tmlib.workflow.workflow.Workflow` """ logger.info('kill workflow of experiment "%s"', self.experiment_name) content = dict() url = self._build_api_url('/experiments/{experiment_id}/workflow/kill'.format(experiment_id=self._experiment_id)) res = self._session.post(url) res.raise_for_status()
def set_working_directory(self, dirname): """Set current working directory. In the workingdirectory and explorer plugins. """ if dirname: self.main.workingdirectory.chdir(dirname, refresh_explorer=True, refresh_console=False)
def function[set_working_directory, parameter[self, dirname]]: constant[Set current working directory. In the workingdirectory and explorer plugins. ] if name[dirname] begin[:] call[name[self].main.workingdirectory.chdir, parameter[name[dirname]]]
keyword[def] identifier[set_working_directory] ( identifier[self] , identifier[dirname] ): literal[string] keyword[if] identifier[dirname] : identifier[self] . identifier[main] . identifier[workingdirectory] . identifier[chdir] ( identifier[dirname] , identifier[refresh_explorer] = keyword[True] , identifier[refresh_console] = keyword[False] )
def set_working_directory(self, dirname): """Set current working directory. In the workingdirectory and explorer plugins. """ if dirname: self.main.workingdirectory.chdir(dirname, refresh_explorer=True, refresh_console=False) # depends on [control=['if'], data=[]]
def get_fieldsets(self, request, obj=None): """ Add fieldsets of placeholders to the list of already existing fieldsets. """ # some ugly business to remove freeze_date # from the field list general_module = { 'fields': list(self.general_fields), 'classes': ('module-general',), } default_fieldsets = list(self.fieldsets) if not request.user.has_perm('pages.can_freeze'): general_module['fields'].remove('freeze_date') if not request.user.has_perm('pages.can_publish'): general_module['fields'].remove('status') default_fieldsets[0][1] = general_module placeholder_fieldsets = [] template = get_template_from_request(request, obj) for placeholder in get_placeholders(template): if placeholder.name not in self.mandatory_placeholders: placeholder_fieldsets.append(placeholder.name) additional_fieldsets = [] # meta fields metadata_fieldsets = [f['name'] for f in self.metadata_fields] additional_fieldsets.append((_('Metadata'), { 'fields': metadata_fieldsets, 'classes': ('module-content', 'grp-collapse grp-closed'), })) additional_fieldsets.append((_('Content'), { 'fields': placeholder_fieldsets, 'classes': ('module-content',), })) return default_fieldsets + additional_fieldsets
def function[get_fieldsets, parameter[self, request, obj]]: constant[ Add fieldsets of placeholders to the list of already existing fieldsets. ] variable[general_module] assign[=] dictionary[[<ast.Constant object at 0x7da1b1d34d30>, <ast.Constant object at 0x7da1b1d37af0>], [<ast.Call object at 0x7da1b1d35c30>, <ast.Tuple object at 0x7da1b1d377c0>]] variable[default_fieldsets] assign[=] call[name[list], parameter[name[self].fieldsets]] if <ast.UnaryOp object at 0x7da1b1d350f0> begin[:] call[call[name[general_module]][constant[fields]].remove, parameter[constant[freeze_date]]] if <ast.UnaryOp object at 0x7da1b1d34b80> begin[:] call[call[name[general_module]][constant[fields]].remove, parameter[constant[status]]] call[call[name[default_fieldsets]][constant[0]]][constant[1]] assign[=] name[general_module] variable[placeholder_fieldsets] assign[=] list[[]] variable[template] assign[=] call[name[get_template_from_request], parameter[name[request], name[obj]]] for taget[name[placeholder]] in starred[call[name[get_placeholders], parameter[name[template]]]] begin[:] if compare[name[placeholder].name <ast.NotIn object at 0x7da2590d7190> name[self].mandatory_placeholders] begin[:] call[name[placeholder_fieldsets].append, parameter[name[placeholder].name]] variable[additional_fieldsets] assign[=] list[[]] variable[metadata_fieldsets] assign[=] <ast.ListComp object at 0x7da1b1e09120> call[name[additional_fieldsets].append, parameter[tuple[[<ast.Call object at 0x7da1b1e0a260>, <ast.Dict object at 0x7da1b1e0b790>]]]] call[name[additional_fieldsets].append, parameter[tuple[[<ast.Call object at 0x7da1b1e0ba30>, <ast.Dict object at 0x7da1b1e09450>]]]] return[binary_operation[name[default_fieldsets] + name[additional_fieldsets]]]
keyword[def] identifier[get_fieldsets] ( identifier[self] , identifier[request] , identifier[obj] = keyword[None] ): literal[string] identifier[general_module] ={ literal[string] : identifier[list] ( identifier[self] . identifier[general_fields] ), literal[string] :( literal[string] ,), } identifier[default_fieldsets] = identifier[list] ( identifier[self] . identifier[fieldsets] ) keyword[if] keyword[not] identifier[request] . identifier[user] . identifier[has_perm] ( literal[string] ): identifier[general_module] [ literal[string] ]. identifier[remove] ( literal[string] ) keyword[if] keyword[not] identifier[request] . identifier[user] . identifier[has_perm] ( literal[string] ): identifier[general_module] [ literal[string] ]. identifier[remove] ( literal[string] ) identifier[default_fieldsets] [ literal[int] ][ literal[int] ]= identifier[general_module] identifier[placeholder_fieldsets] =[] identifier[template] = identifier[get_template_from_request] ( identifier[request] , identifier[obj] ) keyword[for] identifier[placeholder] keyword[in] identifier[get_placeholders] ( identifier[template] ): keyword[if] identifier[placeholder] . identifier[name] keyword[not] keyword[in] identifier[self] . identifier[mandatory_placeholders] : identifier[placeholder_fieldsets] . identifier[append] ( identifier[placeholder] . identifier[name] ) identifier[additional_fieldsets] =[] identifier[metadata_fieldsets] =[ identifier[f] [ literal[string] ] keyword[for] identifier[f] keyword[in] identifier[self] . identifier[metadata_fields] ] identifier[additional_fieldsets] . identifier[append] (( identifier[_] ( literal[string] ),{ literal[string] : identifier[metadata_fieldsets] , literal[string] :( literal[string] , literal[string] ), })) identifier[additional_fieldsets] . identifier[append] (( identifier[_] ( literal[string] ),{ literal[string] : identifier[placeholder_fieldsets] , literal[string] :( literal[string] ,), })) keyword[return] identifier[default_fieldsets] + identifier[additional_fieldsets]
def get_fieldsets(self, request, obj=None): """ Add fieldsets of placeholders to the list of already existing fieldsets. """ # some ugly business to remove freeze_date # from the field list general_module = {'fields': list(self.general_fields), 'classes': ('module-general',)} default_fieldsets = list(self.fieldsets) if not request.user.has_perm('pages.can_freeze'): general_module['fields'].remove('freeze_date') # depends on [control=['if'], data=[]] if not request.user.has_perm('pages.can_publish'): general_module['fields'].remove('status') # depends on [control=['if'], data=[]] default_fieldsets[0][1] = general_module placeholder_fieldsets = [] template = get_template_from_request(request, obj) for placeholder in get_placeholders(template): if placeholder.name not in self.mandatory_placeholders: placeholder_fieldsets.append(placeholder.name) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['placeholder']] additional_fieldsets = [] # meta fields metadata_fieldsets = [f['name'] for f in self.metadata_fields] additional_fieldsets.append((_('Metadata'), {'fields': metadata_fieldsets, 'classes': ('module-content', 'grp-collapse grp-closed')})) additional_fieldsets.append((_('Content'), {'fields': placeholder_fieldsets, 'classes': ('module-content',)})) return default_fieldsets + additional_fieldsets
def validate_field(field, allowed_keys, allowed_types): """Validate field is allowed and valid.""" for key, value in field.items(): if key not in allowed_keys: raise exceptions.ParametersFieldError(key, "property") if key == defs.TYPE: if value not in allowed_types: raise exceptions.ParametersFieldError(value, key) if key == defs.VALUE: if not is_valid_field_name(value): raise exceptions.ParametersFieldError(value, "field name")
def function[validate_field, parameter[field, allowed_keys, allowed_types]]: constant[Validate field is allowed and valid.] for taget[tuple[[<ast.Name object at 0x7da1b11a4bb0>, <ast.Name object at 0x7da1b11a6bf0>]]] in starred[call[name[field].items, parameter[]]] begin[:] if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[allowed_keys]] begin[:] <ast.Raise object at 0x7da1b11a5b10> if compare[name[key] equal[==] name[defs].TYPE] begin[:] if compare[name[value] <ast.NotIn object at 0x7da2590d7190> name[allowed_types]] begin[:] <ast.Raise object at 0x7da1b11a41c0> if compare[name[key] equal[==] name[defs].VALUE] begin[:] if <ast.UnaryOp object at 0x7da1b12f2980> begin[:] <ast.Raise object at 0x7da1b12f25c0>
keyword[def] identifier[validate_field] ( identifier[field] , identifier[allowed_keys] , identifier[allowed_types] ): literal[string] keyword[for] identifier[key] , identifier[value] keyword[in] identifier[field] . identifier[items] (): keyword[if] identifier[key] keyword[not] keyword[in] identifier[allowed_keys] : keyword[raise] identifier[exceptions] . identifier[ParametersFieldError] ( identifier[key] , literal[string] ) keyword[if] identifier[key] == identifier[defs] . identifier[TYPE] : keyword[if] identifier[value] keyword[not] keyword[in] identifier[allowed_types] : keyword[raise] identifier[exceptions] . identifier[ParametersFieldError] ( identifier[value] , identifier[key] ) keyword[if] identifier[key] == identifier[defs] . identifier[VALUE] : keyword[if] keyword[not] identifier[is_valid_field_name] ( identifier[value] ): keyword[raise] identifier[exceptions] . identifier[ParametersFieldError] ( identifier[value] , literal[string] )
def validate_field(field, allowed_keys, allowed_types): """Validate field is allowed and valid.""" for (key, value) in field.items(): if key not in allowed_keys: raise exceptions.ParametersFieldError(key, 'property') # depends on [control=['if'], data=['key']] if key == defs.TYPE: if value not in allowed_types: raise exceptions.ParametersFieldError(value, key) # depends on [control=['if'], data=['value']] # depends on [control=['if'], data=['key']] if key == defs.VALUE: if not is_valid_field_name(value): raise exceptions.ParametersFieldError(value, 'field name') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
def keep_params(self, base_key, *params): """ Method to keep only specific parameters from a parameter documentation. This method extracts the given `param` from the `base_key` item in the :attr:`params` dictionary and creates a new item with the original documentation with only the description of the param. This method works for ``'Parameters'`` like sections. The new docstring with the selected parts will be accessible as ``base_key + '.' + '|'.join(params)``, e.g. ``'original_key.param1|param2'`` Parameters ---------- base_key: str key in the :attr:`params` dictionary ``*params`` str. Parameter identifier of which the documentations shall be in the new section See Also -------- keep_types, delete_params Examples -------- To extract just two parameters from a function and reuse their docstrings, you can type:: >>> from docrep import DocstringProcessor >>> d = DocstringProcessor() >>> @d.get_sectionsf('do_something') ... def do_something(a=1, b=2, c=3): ... ''' ... That's %(doc_key)s ... ... Parameters ... ---------- ... a: int, optional ... A dummy parameter description ... b: int, optional ... A second dummy parameter that will be excluded ... c: float, optional ... A third parameter''' ... print(a) >>> d.keep_params('do_something.parameters', 'a', 'c') >>> @d.dedent ... def do_less(a=1, c=4): ... ''' ... My second function with only `a` and `c` ... ... Parameters ... ---------- ... %(do_something.parameters.a|c)s''' ... pass >>> print(do_less.__doc__) My second function with only `a` and `c` <BLANKLINE> Parameters ---------- a: int, optional A dummy parameter description c: float, optional A third parameter Equivalently, you can use the :meth:`delete_params` method to remove parameters:: >>> d.delete_params('do_something.parameters', 'b') >>> @d.dedent ... def do_less(a=1, c=4): ... ''' ... My second function with only `a` and `c` ... ... Parameters ... ---------- ... %(do_something.parameters.no_b)s''' ... pass """ self.params[base_key + '.' + '|'.join(params)] = self.keep_params_s( self.params[base_key], params)
def function[keep_params, parameter[self, base_key]]: constant[ Method to keep only specific parameters from a parameter documentation. This method extracts the given `param` from the `base_key` item in the :attr:`params` dictionary and creates a new item with the original documentation with only the description of the param. This method works for ``'Parameters'`` like sections. The new docstring with the selected parts will be accessible as ``base_key + '.' + '|'.join(params)``, e.g. ``'original_key.param1|param2'`` Parameters ---------- base_key: str key in the :attr:`params` dictionary ``*params`` str. Parameter identifier of which the documentations shall be in the new section See Also -------- keep_types, delete_params Examples -------- To extract just two parameters from a function and reuse their docstrings, you can type:: >>> from docrep import DocstringProcessor >>> d = DocstringProcessor() >>> @d.get_sectionsf('do_something') ... def do_something(a=1, b=2, c=3): ... ''' ... That's %(doc_key)s ... ... Parameters ... ---------- ... a: int, optional ... A dummy parameter description ... b: int, optional ... A second dummy parameter that will be excluded ... c: float, optional ... A third parameter''' ... print(a) >>> d.keep_params('do_something.parameters', 'a', 'c') >>> @d.dedent ... def do_less(a=1, c=4): ... ''' ... My second function with only `a` and `c` ... ... Parameters ... ---------- ... %(do_something.parameters.a|c)s''' ... pass >>> print(do_less.__doc__) My second function with only `a` and `c` <BLANKLINE> Parameters ---------- a: int, optional A dummy parameter description c: float, optional A third parameter Equivalently, you can use the :meth:`delete_params` method to remove parameters:: >>> d.delete_params('do_something.parameters', 'b') >>> @d.dedent ... def do_less(a=1, c=4): ... ''' ... My second function with only `a` and `c` ... ... Parameters ... ---------- ... %(do_something.parameters.no_b)s''' ... pass ] call[name[self].params][binary_operation[binary_operation[name[base_key] + constant[.]] + call[constant[|].join, parameter[name[params]]]]] assign[=] call[name[self].keep_params_s, parameter[call[name[self].params][name[base_key]], name[params]]]
keyword[def] identifier[keep_params] ( identifier[self] , identifier[base_key] ,* identifier[params] ): literal[string] identifier[self] . identifier[params] [ identifier[base_key] + literal[string] + literal[string] . identifier[join] ( identifier[params] )]= identifier[self] . identifier[keep_params_s] ( identifier[self] . identifier[params] [ identifier[base_key] ], identifier[params] )
def keep_params(self, base_key, *params): """ Method to keep only specific parameters from a parameter documentation. This method extracts the given `param` from the `base_key` item in the :attr:`params` dictionary and creates a new item with the original documentation with only the description of the param. This method works for ``'Parameters'`` like sections. The new docstring with the selected parts will be accessible as ``base_key + '.' + '|'.join(params)``, e.g. ``'original_key.param1|param2'`` Parameters ---------- base_key: str key in the :attr:`params` dictionary ``*params`` str. Parameter identifier of which the documentations shall be in the new section See Also -------- keep_types, delete_params Examples -------- To extract just two parameters from a function and reuse their docstrings, you can type:: >>> from docrep import DocstringProcessor >>> d = DocstringProcessor() >>> @d.get_sectionsf('do_something') ... def do_something(a=1, b=2, c=3): ... ''' ... That's %(doc_key)s ... ... Parameters ... ---------- ... a: int, optional ... A dummy parameter description ... b: int, optional ... A second dummy parameter that will be excluded ... c: float, optional ... A third parameter''' ... print(a) >>> d.keep_params('do_something.parameters', 'a', 'c') >>> @d.dedent ... def do_less(a=1, c=4): ... ''' ... My second function with only `a` and `c` ... ... Parameters ... ---------- ... %(do_something.parameters.a|c)s''' ... pass >>> print(do_less.__doc__) My second function with only `a` and `c` <BLANKLINE> Parameters ---------- a: int, optional A dummy parameter description c: float, optional A third parameter Equivalently, you can use the :meth:`delete_params` method to remove parameters:: >>> d.delete_params('do_something.parameters', 'b') >>> @d.dedent ... def do_less(a=1, c=4): ... ''' ... My second function with only `a` and `c` ... ... Parameters ... ---------- ... %(do_something.parameters.no_b)s''' ... pass """ self.params[base_key + '.' + '|'.join(params)] = self.keep_params_s(self.params[base_key], params)
def compare(self, origin, pattern): """ Args: origin (:obj:`str`): original string pattern (:obj:`str`): Regexp pattern string Returns: bool: True if matches otherwise False. """ if origin is None or pattern is None: return False return re.match(pattern, origin) is not None
def function[compare, parameter[self, origin, pattern]]: constant[ Args: origin (:obj:`str`): original string pattern (:obj:`str`): Regexp pattern string Returns: bool: True if matches otherwise False. ] if <ast.BoolOp object at 0x7da2044c2800> begin[:] return[constant[False]] return[compare[call[name[re].match, parameter[name[pattern], name[origin]]] is_not constant[None]]]
keyword[def] identifier[compare] ( identifier[self] , identifier[origin] , identifier[pattern] ): literal[string] keyword[if] identifier[origin] keyword[is] keyword[None] keyword[or] identifier[pattern] keyword[is] keyword[None] : keyword[return] keyword[False] keyword[return] identifier[re] . identifier[match] ( identifier[pattern] , identifier[origin] ) keyword[is] keyword[not] keyword[None]
def compare(self, origin, pattern): """ Args: origin (:obj:`str`): original string pattern (:obj:`str`): Regexp pattern string Returns: bool: True if matches otherwise False. """ if origin is None or pattern is None: return False # depends on [control=['if'], data=[]] return re.match(pattern, origin) is not None
def pull_request(self, file): """ Create a pull request :param file: File to push through pull request :return: URL of the PullRequest or Proxy Error """ uri = "{api}/repos/{upstream}/pulls".format( api=self.github_api_url, upstream=self.upstream, path=file.path ) params = { "title": "[Proxy] {message}".format(message=file.logs), "body": "", "head": "{origin}:{branch}".format(origin=self.origin.split("/")[0], branch=file.branch), "base": self.master_upstream } data = self.request("POST", uri, data=params) if data.status_code == 201: return json.loads(data.content.decode("utf-8"))["html_url"] else: reply = json.loads(data.content.decode("utf-8")) return self.ProxyError( data.status_code, reply["message"], step="pull_request", context={ "uri": uri, "params": params } )
def function[pull_request, parameter[self, file]]: constant[ Create a pull request :param file: File to push through pull request :return: URL of the PullRequest or Proxy Error ] variable[uri] assign[=] call[constant[{api}/repos/{upstream}/pulls].format, parameter[]] variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b28ad720>, <ast.Constant object at 0x7da1b28ae200>, <ast.Constant object at 0x7da1b28ac8e0>, <ast.Constant object at 0x7da1b28ae830>], [<ast.Call object at 0x7da1b28aff10>, <ast.Constant object at 0x7da1b28ac0d0>, <ast.Call object at 0x7da1b28aef20>, <ast.Attribute object at 0x7da1b28aedd0>]] variable[data] assign[=] call[name[self].request, parameter[constant[POST], name[uri]]] if compare[name[data].status_code equal[==] constant[201]] begin[:] return[call[call[name[json].loads, parameter[call[name[data].content.decode, parameter[constant[utf-8]]]]]][constant[html_url]]]
keyword[def] identifier[pull_request] ( identifier[self] , identifier[file] ): literal[string] identifier[uri] = literal[string] . identifier[format] ( identifier[api] = identifier[self] . identifier[github_api_url] , identifier[upstream] = identifier[self] . identifier[upstream] , identifier[path] = identifier[file] . identifier[path] ) identifier[params] ={ literal[string] : literal[string] . identifier[format] ( identifier[message] = identifier[file] . identifier[logs] ), literal[string] : literal[string] , literal[string] : literal[string] . identifier[format] ( identifier[origin] = identifier[self] . identifier[origin] . identifier[split] ( literal[string] )[ literal[int] ], identifier[branch] = identifier[file] . identifier[branch] ), literal[string] : identifier[self] . identifier[master_upstream] } identifier[data] = identifier[self] . identifier[request] ( literal[string] , identifier[uri] , identifier[data] = identifier[params] ) keyword[if] identifier[data] . identifier[status_code] == literal[int] : keyword[return] identifier[json] . identifier[loads] ( identifier[data] . identifier[content] . identifier[decode] ( literal[string] ))[ literal[string] ] keyword[else] : identifier[reply] = identifier[json] . identifier[loads] ( identifier[data] . identifier[content] . identifier[decode] ( literal[string] )) keyword[return] identifier[self] . identifier[ProxyError] ( identifier[data] . identifier[status_code] , identifier[reply] [ literal[string] ], identifier[step] = literal[string] , identifier[context] ={ literal[string] : identifier[uri] , literal[string] : identifier[params] } )
def pull_request(self, file): """ Create a pull request :param file: File to push through pull request :return: URL of the PullRequest or Proxy Error """ uri = '{api}/repos/{upstream}/pulls'.format(api=self.github_api_url, upstream=self.upstream, path=file.path) params = {'title': '[Proxy] {message}'.format(message=file.logs), 'body': '', 'head': '{origin}:{branch}'.format(origin=self.origin.split('/')[0], branch=file.branch), 'base': self.master_upstream} data = self.request('POST', uri, data=params) if data.status_code == 201: return json.loads(data.content.decode('utf-8'))['html_url'] # depends on [control=['if'], data=[]] else: reply = json.loads(data.content.decode('utf-8')) return self.ProxyError(data.status_code, reply['message'], step='pull_request', context={'uri': uri, 'params': params})
def update_eol(self, os_name): """Update end of line status.""" os_name = to_text_string(os_name) value = {"nt": "CRLF", "posix": "LF"}.get(os_name, "CR") self.set_value(value)
def function[update_eol, parameter[self, os_name]]: constant[Update end of line status.] variable[os_name] assign[=] call[name[to_text_string], parameter[name[os_name]]] variable[value] assign[=] call[dictionary[[<ast.Constant object at 0x7da1b2042650>, <ast.Constant object at 0x7da1b20420b0>], [<ast.Constant object at 0x7da1b20436a0>, <ast.Constant object at 0x7da1b20421a0>]].get, parameter[name[os_name], constant[CR]]] call[name[self].set_value, parameter[name[value]]]
keyword[def] identifier[update_eol] ( identifier[self] , identifier[os_name] ): literal[string] identifier[os_name] = identifier[to_text_string] ( identifier[os_name] ) identifier[value] ={ literal[string] : literal[string] , literal[string] : literal[string] }. identifier[get] ( identifier[os_name] , literal[string] ) identifier[self] . identifier[set_value] ( identifier[value] )
def update_eol(self, os_name): """Update end of line status.""" os_name = to_text_string(os_name) value = {'nt': 'CRLF', 'posix': 'LF'}.get(os_name, 'CR') self.set_value(value)
def encode_dict(dynamizer, value): """ Encode a dict for the DynamoDB format """ encoded_dict = {} for k, v in six.iteritems(value): encoded_type, encoded_value = dynamizer.raw_encode(v) encoded_dict[k] = { encoded_type: encoded_value, } return 'M', encoded_dict
def function[encode_dict, parameter[dynamizer, value]]: constant[ Encode a dict for the DynamoDB format ] variable[encoded_dict] assign[=] dictionary[[], []] for taget[tuple[[<ast.Name object at 0x7da204346080>, <ast.Name object at 0x7da204347b20>]]] in starred[call[name[six].iteritems, parameter[name[value]]]] begin[:] <ast.Tuple object at 0x7da204345e10> assign[=] call[name[dynamizer].raw_encode, parameter[name[v]]] call[name[encoded_dict]][name[k]] assign[=] dictionary[[<ast.Name object at 0x7da1b259ce20>], [<ast.Name object at 0x7da1b259c2e0>]] return[tuple[[<ast.Constant object at 0x7da1b259ccd0>, <ast.Name object at 0x7da1b259d4e0>]]]
keyword[def] identifier[encode_dict] ( identifier[dynamizer] , identifier[value] ): literal[string] identifier[encoded_dict] ={} keyword[for] identifier[k] , identifier[v] keyword[in] identifier[six] . identifier[iteritems] ( identifier[value] ): identifier[encoded_type] , identifier[encoded_value] = identifier[dynamizer] . identifier[raw_encode] ( identifier[v] ) identifier[encoded_dict] [ identifier[k] ]={ identifier[encoded_type] : identifier[encoded_value] , } keyword[return] literal[string] , identifier[encoded_dict]
def encode_dict(dynamizer, value): """ Encode a dict for the DynamoDB format """ encoded_dict = {} for (k, v) in six.iteritems(value): (encoded_type, encoded_value) = dynamizer.raw_encode(v) encoded_dict[k] = {encoded_type: encoded_value} # depends on [control=['for'], data=[]] return ('M', encoded_dict)
def addActionFinish(self): """ Indicates all callbacks that should run within the action's context have been added, and that the action should therefore finish once those callbacks have fired. @return: The wrapped L{Deferred}. @raises AlreadyFinished: L{DeferredContext.addActionFinish} has been called previously. This indicates a programmer error. """ if self._finishAdded: raise AlreadyFinished() self._finishAdded = True def done(result): if isinstance(result, Failure): exception = result.value else: exception = None self._action.finish(exception) return result self.result.addBoth(done) return self.result
def function[addActionFinish, parameter[self]]: constant[ Indicates all callbacks that should run within the action's context have been added, and that the action should therefore finish once those callbacks have fired. @return: The wrapped L{Deferred}. @raises AlreadyFinished: L{DeferredContext.addActionFinish} has been called previously. This indicates a programmer error. ] if name[self]._finishAdded begin[:] <ast.Raise object at 0x7da18fe910c0> name[self]._finishAdded assign[=] constant[True] def function[done, parameter[result]]: if call[name[isinstance], parameter[name[result], name[Failure]]] begin[:] variable[exception] assign[=] name[result].value call[name[self]._action.finish, parameter[name[exception]]] return[name[result]] call[name[self].result.addBoth, parameter[name[done]]] return[name[self].result]
keyword[def] identifier[addActionFinish] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[_finishAdded] : keyword[raise] identifier[AlreadyFinished] () identifier[self] . identifier[_finishAdded] = keyword[True] keyword[def] identifier[done] ( identifier[result] ): keyword[if] identifier[isinstance] ( identifier[result] , identifier[Failure] ): identifier[exception] = identifier[result] . identifier[value] keyword[else] : identifier[exception] = keyword[None] identifier[self] . identifier[_action] . identifier[finish] ( identifier[exception] ) keyword[return] identifier[result] identifier[self] . identifier[result] . identifier[addBoth] ( identifier[done] ) keyword[return] identifier[self] . identifier[result]
def addActionFinish(self): """ Indicates all callbacks that should run within the action's context have been added, and that the action should therefore finish once those callbacks have fired. @return: The wrapped L{Deferred}. @raises AlreadyFinished: L{DeferredContext.addActionFinish} has been called previously. This indicates a programmer error. """ if self._finishAdded: raise AlreadyFinished() # depends on [control=['if'], data=[]] self._finishAdded = True def done(result): if isinstance(result, Failure): exception = result.value # depends on [control=['if'], data=[]] else: exception = None self._action.finish(exception) return result self.result.addBoth(done) return self.result
def drizCR(input=None, configObj=None, editpars=False, **inputDict): """ Look for cosmic rays. """ log.debug(inputDict) inputDict["input"] = input configObj = util.getDefaultConfigObj(__taskname__, configObj, inputDict, loadOnly=(not editpars)) if configObj is None: return if not editpars: run(configObj)
def function[drizCR, parameter[input, configObj, editpars]]: constant[ Look for cosmic rays. ] call[name[log].debug, parameter[name[inputDict]]] call[name[inputDict]][constant[input]] assign[=] name[input] variable[configObj] assign[=] call[name[util].getDefaultConfigObj, parameter[name[__taskname__], name[configObj], name[inputDict]]] if compare[name[configObj] is constant[None]] begin[:] return[None] if <ast.UnaryOp object at 0x7da1b1a89360> begin[:] call[name[run], parameter[name[configObj]]]
keyword[def] identifier[drizCR] ( identifier[input] = keyword[None] , identifier[configObj] = keyword[None] , identifier[editpars] = keyword[False] ,** identifier[inputDict] ): literal[string] identifier[log] . identifier[debug] ( identifier[inputDict] ) identifier[inputDict] [ literal[string] ]= identifier[input] identifier[configObj] = identifier[util] . identifier[getDefaultConfigObj] ( identifier[__taskname__] , identifier[configObj] , identifier[inputDict] , identifier[loadOnly] =( keyword[not] identifier[editpars] )) keyword[if] identifier[configObj] keyword[is] keyword[None] : keyword[return] keyword[if] keyword[not] identifier[editpars] : identifier[run] ( identifier[configObj] )
def drizCR(input=None, configObj=None, editpars=False, **inputDict): """ Look for cosmic rays. """ log.debug(inputDict) inputDict['input'] = input configObj = util.getDefaultConfigObj(__taskname__, configObj, inputDict, loadOnly=not editpars) if configObj is None: return # depends on [control=['if'], data=[]] if not editpars: run(configObj) # depends on [control=['if'], data=[]]
def salt_call(): ''' Directly call a salt command in the modules, does not require a running salt minion to run. ''' import salt.cli.call if '' in sys.path: sys.path.remove('') client = salt.cli.call.SaltCall() _install_signal_handlers(client) client.run()
def function[salt_call, parameter[]]: constant[ Directly call a salt command in the modules, does not require a running salt minion to run. ] import module[salt.cli.call] if compare[constant[] in name[sys].path] begin[:] call[name[sys].path.remove, parameter[constant[]]] variable[client] assign[=] call[name[salt].cli.call.SaltCall, parameter[]] call[name[_install_signal_handlers], parameter[name[client]]] call[name[client].run, parameter[]]
keyword[def] identifier[salt_call] (): literal[string] keyword[import] identifier[salt] . identifier[cli] . identifier[call] keyword[if] literal[string] keyword[in] identifier[sys] . identifier[path] : identifier[sys] . identifier[path] . identifier[remove] ( literal[string] ) identifier[client] = identifier[salt] . identifier[cli] . identifier[call] . identifier[SaltCall] () identifier[_install_signal_handlers] ( identifier[client] ) identifier[client] . identifier[run] ()
def salt_call(): """ Directly call a salt command in the modules, does not require a running salt minion to run. """ import salt.cli.call if '' in sys.path: sys.path.remove('') # depends on [control=['if'], data=[]] client = salt.cli.call.SaltCall() _install_signal_handlers(client) client.run()
def get_act_act(self, end): """ implements Act/Act day count convention (4.16(b) 2006 ISDA Definitions) """ # split end-self in year portions # if the period does not lie within a year split the days in the period as following: # restdays of start year / years in between / days in the end year # REMARK: following the affore mentioned ISDA Definition the first day of the period is included whereas the # last day will be excluded # What remains to check now is only whether the start and end year are leap or non-leap years. The quotients # can be easily calculated and for the years in between they are always one (365/365 = 1; 366/366 = 1) if end.year - self.year == 0: if BusinessDate.is_leap_year(self.year): return BusinessDate.diff_in_days(self, end) / 366.0 # leap year: 366 days else: # return BusinessDate.diff_in_days(self, end) / 366.0 return BusinessDate.diff_in_days(self, end) / 365.0 # non-leap year: 365 days else: rest_year1 = BusinessDate.diff_in_days(self, BusinessDate( date(self.year, 12, 31))) + 1 # since the first day counts rest_year2 = abs(BusinessDate.diff_in_days(end, BusinessDate( date(end.year, 1, 1)))) # here the last day is automatically not counted years_in_between = end.year - self.year - 1 return years_in_between + rest_year1 / (366.0 if is_leap_year(self.year) else 365.0) + rest_year2 / ( 366.0 if is_leap_year(end.year) else 365.0)
def function[get_act_act, parameter[self, end]]: constant[ implements Act/Act day count convention (4.16(b) 2006 ISDA Definitions) ] if compare[binary_operation[name[end].year - name[self].year] equal[==] constant[0]] begin[:] if call[name[BusinessDate].is_leap_year, parameter[name[self].year]] begin[:] return[binary_operation[call[name[BusinessDate].diff_in_days, parameter[name[self], name[end]]] / constant[366.0]]]
keyword[def] identifier[get_act_act] ( identifier[self] , identifier[end] ): literal[string] keyword[if] identifier[end] . identifier[year] - identifier[self] . identifier[year] == literal[int] : keyword[if] identifier[BusinessDate] . identifier[is_leap_year] ( identifier[self] . identifier[year] ): keyword[return] identifier[BusinessDate] . identifier[diff_in_days] ( identifier[self] , identifier[end] )/ literal[int] keyword[else] : keyword[return] identifier[BusinessDate] . identifier[diff_in_days] ( identifier[self] , identifier[end] )/ literal[int] keyword[else] : identifier[rest_year1] = identifier[BusinessDate] . identifier[diff_in_days] ( identifier[self] , identifier[BusinessDate] ( identifier[date] ( identifier[self] . identifier[year] , literal[int] , literal[int] )))+ literal[int] identifier[rest_year2] = identifier[abs] ( identifier[BusinessDate] . identifier[diff_in_days] ( identifier[end] , identifier[BusinessDate] ( identifier[date] ( identifier[end] . identifier[year] , literal[int] , literal[int] )))) identifier[years_in_between] = identifier[end] . identifier[year] - identifier[self] . identifier[year] - literal[int] keyword[return] identifier[years_in_between] + identifier[rest_year1] /( literal[int] keyword[if] identifier[is_leap_year] ( identifier[self] . identifier[year] ) keyword[else] literal[int] )+ identifier[rest_year2] /( literal[int] keyword[if] identifier[is_leap_year] ( identifier[end] . identifier[year] ) keyword[else] literal[int] )
def get_act_act(self, end): """ implements Act/Act day count convention (4.16(b) 2006 ISDA Definitions) """ # split end-self in year portions # if the period does not lie within a year split the days in the period as following: # restdays of start year / years in between / days in the end year # REMARK: following the affore mentioned ISDA Definition the first day of the period is included whereas the # last day will be excluded # What remains to check now is only whether the start and end year are leap or non-leap years. The quotients # can be easily calculated and for the years in between they are always one (365/365 = 1; 366/366 = 1) if end.year - self.year == 0: if BusinessDate.is_leap_year(self.year): return BusinessDate.diff_in_days(self, end) / 366.0 # leap year: 366 days # depends on [control=['if'], data=[]] else: # return BusinessDate.diff_in_days(self, end) / 366.0 return BusinessDate.diff_in_days(self, end) / 365.0 # non-leap year: 365 days # depends on [control=['if'], data=[]] else: rest_year1 = BusinessDate.diff_in_days(self, BusinessDate(date(self.year, 12, 31))) + 1 # since the first day counts rest_year2 = abs(BusinessDate.diff_in_days(end, BusinessDate(date(end.year, 1, 1)))) # here the last day is automatically not counted years_in_between = end.year - self.year - 1 return years_in_between + rest_year1 / (366.0 if is_leap_year(self.year) else 365.0) + rest_year2 / (366.0 if is_leap_year(end.year) else 365.0)
def extra_info(self): """Retrieve the log string generated when opening the file.""" info = _ffi.new("char[]", 2**14) _snd.sf_command(self._file, _snd.SFC_GET_LOG_INFO, info, _ffi.sizeof(info)) return _ffi.string(info).decode('utf-8', 'replace')
def function[extra_info, parameter[self]]: constant[Retrieve the log string generated when opening the file.] variable[info] assign[=] call[name[_ffi].new, parameter[constant[char[]], binary_operation[constant[2] ** constant[14]]]] call[name[_snd].sf_command, parameter[name[self]._file, name[_snd].SFC_GET_LOG_INFO, name[info], call[name[_ffi].sizeof, parameter[name[info]]]]] return[call[call[name[_ffi].string, parameter[name[info]]].decode, parameter[constant[utf-8], constant[replace]]]]
keyword[def] identifier[extra_info] ( identifier[self] ): literal[string] identifier[info] = identifier[_ffi] . identifier[new] ( literal[string] , literal[int] ** literal[int] ) identifier[_snd] . identifier[sf_command] ( identifier[self] . identifier[_file] , identifier[_snd] . identifier[SFC_GET_LOG_INFO] , identifier[info] , identifier[_ffi] . identifier[sizeof] ( identifier[info] )) keyword[return] identifier[_ffi] . identifier[string] ( identifier[info] ). identifier[decode] ( literal[string] , literal[string] )
def extra_info(self): """Retrieve the log string generated when opening the file.""" info = _ffi.new('char[]', 2 ** 14) _snd.sf_command(self._file, _snd.SFC_GET_LOG_INFO, info, _ffi.sizeof(info)) return _ffi.string(info).decode('utf-8', 'replace')
def __sub(self, string: str = '') -> str: """Replace spaces in string. :param string: String. :return: String without spaces. """ replacer = self.random.choice(['_', '-']) return re.sub(r'\s+', replacer, string.strip())
def function[__sub, parameter[self, string]]: constant[Replace spaces in string. :param string: String. :return: String without spaces. ] variable[replacer] assign[=] call[name[self].random.choice, parameter[list[[<ast.Constant object at 0x7da20c6c5d50>, <ast.Constant object at 0x7da20c6c49d0>]]]] return[call[name[re].sub, parameter[constant[\s+], name[replacer], call[name[string].strip, parameter[]]]]]
keyword[def] identifier[__sub] ( identifier[self] , identifier[string] : identifier[str] = literal[string] )-> identifier[str] : literal[string] identifier[replacer] = identifier[self] . identifier[random] . identifier[choice] ([ literal[string] , literal[string] ]) keyword[return] identifier[re] . identifier[sub] ( literal[string] , identifier[replacer] , identifier[string] . identifier[strip] ())
def __sub(self, string: str='') -> str: """Replace spaces in string. :param string: String. :return: String without spaces. """ replacer = self.random.choice(['_', '-']) return re.sub('\\s+', replacer, string.strip())
def get_descriptions(self, description_type): """ Gets the descriptions for specified type. When complete the callback is called with a list of descriptions """ (desc_type, max_units) = description_type results = [None] * max_units self.elk._descriptions_in_progress[desc_type] = (max_units, results, self._got_desc) self.elk.send(sd_encode(desc_type=desc_type, unit=0))
def function[get_descriptions, parameter[self, description_type]]: constant[ Gets the descriptions for specified type. When complete the callback is called with a list of descriptions ] <ast.Tuple object at 0x7da18dc076a0> assign[=] name[description_type] variable[results] assign[=] binary_operation[list[[<ast.Constant object at 0x7da18dc06830>]] * name[max_units]] call[name[self].elk._descriptions_in_progress][name[desc_type]] assign[=] tuple[[<ast.Name object at 0x7da18dc07910>, <ast.Name object at 0x7da18dc06080>, <ast.Attribute object at 0x7da18dc07760>]] call[name[self].elk.send, parameter[call[name[sd_encode], parameter[]]]]
keyword[def] identifier[get_descriptions] ( identifier[self] , identifier[description_type] ): literal[string] ( identifier[desc_type] , identifier[max_units] )= identifier[description_type] identifier[results] =[ keyword[None] ]* identifier[max_units] identifier[self] . identifier[elk] . identifier[_descriptions_in_progress] [ identifier[desc_type] ]=( identifier[max_units] , identifier[results] , identifier[self] . identifier[_got_desc] ) identifier[self] . identifier[elk] . identifier[send] ( identifier[sd_encode] ( identifier[desc_type] = identifier[desc_type] , identifier[unit] = literal[int] ))
def get_descriptions(self, description_type): """ Gets the descriptions for specified type. When complete the callback is called with a list of descriptions """ (desc_type, max_units) = description_type results = [None] * max_units self.elk._descriptions_in_progress[desc_type] = (max_units, results, self._got_desc) self.elk.send(sd_encode(desc_type=desc_type, unit=0))
def in_8(library, session, space, offset, extended=False): """Reads in an 8-bit value from the specified memory space and offset. Corresponds to viIn8* function of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session. :param space: Specifies the address space. (Constants.*SPACE*) :param offset: Offset (in bytes) of the address or register from which to read. :param extended: Use 64 bits offset independent of the platform. :return: Data read from memory, return value of the library call. :rtype: int, :class:`pyvisa.constants.StatusCode` """ value_8 = ViUInt8() if extended: ret = library.viIn8Ex(session, space, offset, byref(value_8)) else: ret = library.viIn8(session, space, offset, byref(value_8)) return value_8.value, ret
def function[in_8, parameter[library, session, space, offset, extended]]: constant[Reads in an 8-bit value from the specified memory space and offset. Corresponds to viIn8* function of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session. :param space: Specifies the address space. (Constants.*SPACE*) :param offset: Offset (in bytes) of the address or register from which to read. :param extended: Use 64 bits offset independent of the platform. :return: Data read from memory, return value of the library call. :rtype: int, :class:`pyvisa.constants.StatusCode` ] variable[value_8] assign[=] call[name[ViUInt8], parameter[]] if name[extended] begin[:] variable[ret] assign[=] call[name[library].viIn8Ex, parameter[name[session], name[space], name[offset], call[name[byref], parameter[name[value_8]]]]] return[tuple[[<ast.Attribute object at 0x7da18dc9a170>, <ast.Name object at 0x7da18dc9bb80>]]]
keyword[def] identifier[in_8] ( identifier[library] , identifier[session] , identifier[space] , identifier[offset] , identifier[extended] = keyword[False] ): literal[string] identifier[value_8] = identifier[ViUInt8] () keyword[if] identifier[extended] : identifier[ret] = identifier[library] . identifier[viIn8Ex] ( identifier[session] , identifier[space] , identifier[offset] , identifier[byref] ( identifier[value_8] )) keyword[else] : identifier[ret] = identifier[library] . identifier[viIn8] ( identifier[session] , identifier[space] , identifier[offset] , identifier[byref] ( identifier[value_8] )) keyword[return] identifier[value_8] . identifier[value] , identifier[ret]
def in_8(library, session, space, offset, extended=False): """Reads in an 8-bit value from the specified memory space and offset. Corresponds to viIn8* function of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session. :param space: Specifies the address space. (Constants.*SPACE*) :param offset: Offset (in bytes) of the address or register from which to read. :param extended: Use 64 bits offset independent of the platform. :return: Data read from memory, return value of the library call. :rtype: int, :class:`pyvisa.constants.StatusCode` """ value_8 = ViUInt8() if extended: ret = library.viIn8Ex(session, space, offset, byref(value_8)) # depends on [control=['if'], data=[]] else: ret = library.viIn8(session, space, offset, byref(value_8)) return (value_8.value, ret)
def orb(self): """ Returns the orb of this fixed star. """ for (mag, orb) in FixedStar._ORBS: if self.mag < mag: return orb return 0.5
def function[orb, parameter[self]]: constant[ Returns the orb of this fixed star. ] for taget[tuple[[<ast.Name object at 0x7da1b11dde40>, <ast.Name object at 0x7da1b11dc7c0>]]] in starred[name[FixedStar]._ORBS] begin[:] if compare[name[self].mag less[<] name[mag]] begin[:] return[name[orb]] return[constant[0.5]]
keyword[def] identifier[orb] ( identifier[self] ): literal[string] keyword[for] ( identifier[mag] , identifier[orb] ) keyword[in] identifier[FixedStar] . identifier[_ORBS] : keyword[if] identifier[self] . identifier[mag] < identifier[mag] : keyword[return] identifier[orb] keyword[return] literal[int]
def orb(self): """ Returns the orb of this fixed star. """ for (mag, orb) in FixedStar._ORBS: if self.mag < mag: return orb # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] return 0.5
def accounts(self) -> AccountsAggregate: """ Returns the Accounts aggregate """ if not self.__accounts_aggregate: self.__accounts_aggregate = AccountsAggregate(self.book) return self.__accounts_aggregate
def function[accounts, parameter[self]]: constant[ Returns the Accounts aggregate ] if <ast.UnaryOp object at 0x7da1b1289510> begin[:] name[self].__accounts_aggregate assign[=] call[name[AccountsAggregate], parameter[name[self].book]] return[name[self].__accounts_aggregate]
keyword[def] identifier[accounts] ( identifier[self] )-> identifier[AccountsAggregate] : literal[string] keyword[if] keyword[not] identifier[self] . identifier[__accounts_aggregate] : identifier[self] . identifier[__accounts_aggregate] = identifier[AccountsAggregate] ( identifier[self] . identifier[book] ) keyword[return] identifier[self] . identifier[__accounts_aggregate]
def accounts(self) -> AccountsAggregate: """ Returns the Accounts aggregate """ if not self.__accounts_aggregate: self.__accounts_aggregate = AccountsAggregate(self.book) # depends on [control=['if'], data=[]] return self.__accounts_aggregate
def update(self, **kwargs): """Update the Account resource with specified content. Args: name (str): Human-readable name for the account Returns: the updated Account object. """ return self.__class__(self.resource.update(kwargs), self.client, wallet=self.wallet)
def function[update, parameter[self]]: constant[Update the Account resource with specified content. Args: name (str): Human-readable name for the account Returns: the updated Account object. ] return[call[name[self].__class__, parameter[call[name[self].resource.update, parameter[name[kwargs]]], name[self].client]]]
keyword[def] identifier[update] ( identifier[self] ,** identifier[kwargs] ): literal[string] keyword[return] identifier[self] . identifier[__class__] ( identifier[self] . identifier[resource] . identifier[update] ( identifier[kwargs] ), identifier[self] . identifier[client] , identifier[wallet] = identifier[self] . identifier[wallet] )
def update(self, **kwargs): """Update the Account resource with specified content. Args: name (str): Human-readable name for the account Returns: the updated Account object. """ return self.__class__(self.resource.update(kwargs), self.client, wallet=self.wallet)
def seen_nonce(id, nonce, timestamp): """ Returns True if the Hawk nonce has been seen already. """ key = '{id}:{n}:{ts}'.format(id=id, n=nonce, ts=timestamp) if cache.get(key): log.warning('replay attack? already processed nonce {k}' .format(k=key)) return True else: log.debug('caching nonce {k}'.format(k=key)) cache.set(key, True, # We only need the nonce until the message itself expires. # This also adds a little bit of padding. timeout=getattr(settings, 'HAWK_MESSAGE_EXPIRATION', default_message_expiration) + 5) return False
def function[seen_nonce, parameter[id, nonce, timestamp]]: constant[ Returns True if the Hawk nonce has been seen already. ] variable[key] assign[=] call[constant[{id}:{n}:{ts}].format, parameter[]] if call[name[cache].get, parameter[name[key]]] begin[:] call[name[log].warning, parameter[call[constant[replay attack? already processed nonce {k}].format, parameter[]]]] return[constant[True]]
keyword[def] identifier[seen_nonce] ( identifier[id] , identifier[nonce] , identifier[timestamp] ): literal[string] identifier[key] = literal[string] . identifier[format] ( identifier[id] = identifier[id] , identifier[n] = identifier[nonce] , identifier[ts] = identifier[timestamp] ) keyword[if] identifier[cache] . identifier[get] ( identifier[key] ): identifier[log] . identifier[warning] ( literal[string] . identifier[format] ( identifier[k] = identifier[key] )) keyword[return] keyword[True] keyword[else] : identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[k] = identifier[key] )) identifier[cache] . identifier[set] ( identifier[key] , keyword[True] , identifier[timeout] = identifier[getattr] ( identifier[settings] , literal[string] , identifier[default_message_expiration] )+ literal[int] ) keyword[return] keyword[False]
def seen_nonce(id, nonce, timestamp): """ Returns True if the Hawk nonce has been seen already. """ key = '{id}:{n}:{ts}'.format(id=id, n=nonce, ts=timestamp) if cache.get(key): log.warning('replay attack? already processed nonce {k}'.format(k=key)) return True # depends on [control=['if'], data=[]] else: log.debug('caching nonce {k}'.format(k=key)) # We only need the nonce until the message itself expires. # This also adds a little bit of padding. cache.set(key, True, timeout=getattr(settings, 'HAWK_MESSAGE_EXPIRATION', default_message_expiration) + 5) return False
def p_expression_1(self, program): """ expression : '-' expression %prec negative | '+' expression %prec positive """ program[0] = node.Prefix([node.UnaryOperator(program[1]), program[2]])
def function[p_expression_1, parameter[self, program]]: constant[ expression : '-' expression %prec negative | '+' expression %prec positive ] call[name[program]][constant[0]] assign[=] call[name[node].Prefix, parameter[list[[<ast.Call object at 0x7da1b03a9ea0>, <ast.Subscript object at 0x7da1b03a93f0>]]]]
keyword[def] identifier[p_expression_1] ( identifier[self] , identifier[program] ): literal[string] identifier[program] [ literal[int] ]= identifier[node] . identifier[Prefix] ([ identifier[node] . identifier[UnaryOperator] ( identifier[program] [ literal[int] ]), identifier[program] [ literal[int] ]])
def p_expression_1(self, program): """ expression : '-' expression %prec negative | '+' expression %prec positive """ program[0] = node.Prefix([node.UnaryOperator(program[1]), program[2]])
def urlunsplit(data): """Combine the elements of a tuple as returned by urlsplit() into a complete URL as a string. The data argument can be any five-item iterable. This may result in a slightly different, but equivalent URL, if the URL that was parsed originally had unnecessary delimiters (for example, a ? with an empty query; the RFC states that these are equivalent).""" scheme, netloc, url, query, fragment = data if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'): if url and url[:1] != '/': url = '/' + url url = '//' + (netloc or '') + url if scheme: url = scheme + ':' + url if query: url = url + '?' + query if fragment: url = url + '#' + fragment return url
def function[urlunsplit, parameter[data]]: constant[Combine the elements of a tuple as returned by urlsplit() into a complete URL as a string. The data argument can be any five-item iterable. This may result in a slightly different, but equivalent URL, if the URL that was parsed originally had unnecessary delimiters (for example, a ? with an empty query; the RFC states that these are equivalent).] <ast.Tuple object at 0x7da2047eb9a0> assign[=] name[data] if <ast.BoolOp object at 0x7da20e9b0df0> begin[:] if <ast.BoolOp object at 0x7da20e9b26b0> begin[:] variable[url] assign[=] binary_operation[constant[/] + name[url]] variable[url] assign[=] binary_operation[binary_operation[constant[//] + <ast.BoolOp object at 0x7da20e9b1ea0>] + name[url]] if name[scheme] begin[:] variable[url] assign[=] binary_operation[binary_operation[name[scheme] + constant[:]] + name[url]] if name[query] begin[:] variable[url] assign[=] binary_operation[binary_operation[name[url] + constant[?]] + name[query]] if name[fragment] begin[:] variable[url] assign[=] binary_operation[binary_operation[name[url] + constant[#]] + name[fragment]] return[name[url]]
keyword[def] identifier[urlunsplit] ( identifier[data] ): literal[string] identifier[scheme] , identifier[netloc] , identifier[url] , identifier[query] , identifier[fragment] = identifier[data] keyword[if] identifier[netloc] keyword[or] ( identifier[scheme] keyword[and] identifier[scheme] keyword[in] identifier[uses_netloc] keyword[and] identifier[url] [: literal[int] ]!= literal[string] ): keyword[if] identifier[url] keyword[and] identifier[url] [: literal[int] ]!= literal[string] : identifier[url] = literal[string] + identifier[url] identifier[url] = literal[string] +( identifier[netloc] keyword[or] literal[string] )+ identifier[url] keyword[if] identifier[scheme] : identifier[url] = identifier[scheme] + literal[string] + identifier[url] keyword[if] identifier[query] : identifier[url] = identifier[url] + literal[string] + identifier[query] keyword[if] identifier[fragment] : identifier[url] = identifier[url] + literal[string] + identifier[fragment] keyword[return] identifier[url]
def urlunsplit(data): """Combine the elements of a tuple as returned by urlsplit() into a complete URL as a string. The data argument can be any five-item iterable. This may result in a slightly different, but equivalent URL, if the URL that was parsed originally had unnecessary delimiters (for example, a ? with an empty query; the RFC states that these are equivalent).""" (scheme, netloc, url, query, fragment) = data if netloc or (scheme and scheme in uses_netloc and (url[:2] != '//')): if url and url[:1] != '/': url = '/' + url # depends on [control=['if'], data=[]] url = '//' + (netloc or '') + url # depends on [control=['if'], data=[]] if scheme: url = scheme + ':' + url # depends on [control=['if'], data=[]] if query: url = url + '?' + query # depends on [control=['if'], data=[]] if fragment: url = url + '#' + fragment # depends on [control=['if'], data=[]] return url
def make_path(config, *endings): """ Create a path based on component configuration. All paths are relative to the component's configuration directory; usually this will be the same for an entire session, but this function supuports component-specific configuration directories. Arguments: config - the configuration object for a component endings - a list of file paths to append to the component's configuration directory """ config_dir = config.get("dp.config_dir") return os.path.join(config_dir, *endings)
def function[make_path, parameter[config]]: constant[ Create a path based on component configuration. All paths are relative to the component's configuration directory; usually this will be the same for an entire session, but this function supuports component-specific configuration directories. Arguments: config - the configuration object for a component endings - a list of file paths to append to the component's configuration directory ] variable[config_dir] assign[=] call[name[config].get, parameter[constant[dp.config_dir]]] return[call[name[os].path.join, parameter[name[config_dir], <ast.Starred object at 0x7da1b26763e0>]]]
keyword[def] identifier[make_path] ( identifier[config] ,* identifier[endings] ): literal[string] identifier[config_dir] = identifier[config] . identifier[get] ( literal[string] ) keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[config_dir] ,* identifier[endings] )
def make_path(config, *endings): """ Create a path based on component configuration. All paths are relative to the component's configuration directory; usually this will be the same for an entire session, but this function supuports component-specific configuration directories. Arguments: config - the configuration object for a component endings - a list of file paths to append to the component's configuration directory """ config_dir = config.get('dp.config_dir') return os.path.join(config_dir, *endings)
def parse_file(self, filename: str, entry: str=None) -> parsing.Node: """Parse filename using the grammar""" self.from_string = False import os.path with open(filename, 'r') as f: self.parsed_stream(f.read(), os.path.abspath(filename)) if entry is None: entry = self.entry if entry is None: raise ValueError("No entry rule name defined for {}".format( self.__class__.__name__)) return self._do_parse(entry)
def function[parse_file, parameter[self, filename, entry]]: constant[Parse filename using the grammar] name[self].from_string assign[=] constant[False] import module[os.path] with call[name[open], parameter[name[filename], constant[r]]] begin[:] call[name[self].parsed_stream, parameter[call[name[f].read, parameter[]], call[name[os].path.abspath, parameter[name[filename]]]]] if compare[name[entry] is constant[None]] begin[:] variable[entry] assign[=] name[self].entry if compare[name[entry] is constant[None]] begin[:] <ast.Raise object at 0x7da1b0118df0> return[call[name[self]._do_parse, parameter[name[entry]]]]
keyword[def] identifier[parse_file] ( identifier[self] , identifier[filename] : identifier[str] , identifier[entry] : identifier[str] = keyword[None] )-> identifier[parsing] . identifier[Node] : literal[string] identifier[self] . identifier[from_string] = keyword[False] keyword[import] identifier[os] . identifier[path] keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[f] : identifier[self] . identifier[parsed_stream] ( identifier[f] . identifier[read] (), identifier[os] . identifier[path] . identifier[abspath] ( identifier[filename] )) keyword[if] identifier[entry] keyword[is] keyword[None] : identifier[entry] = identifier[self] . identifier[entry] keyword[if] identifier[entry] keyword[is] keyword[None] : keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[self] . identifier[__class__] . identifier[__name__] )) keyword[return] identifier[self] . identifier[_do_parse] ( identifier[entry] )
def parse_file(self, filename: str, entry: str=None) -> parsing.Node: """Parse filename using the grammar""" self.from_string = False import os.path with open(filename, 'r') as f: self.parsed_stream(f.read(), os.path.abspath(filename)) # depends on [control=['with'], data=['f']] if entry is None: entry = self.entry # depends on [control=['if'], data=['entry']] if entry is None: raise ValueError('No entry rule name defined for {}'.format(self.__class__.__name__)) # depends on [control=['if'], data=[]] return self._do_parse(entry)
def get_next_entry(file, entrymarker="\xFE\xFF\xFE\xFF\xFE\xFF\xFE\xFF\xFE\xFF", only_coord=True, blocksize=65535): '''Find or read the next ecc entry in a given ecc file. Call this function multiple times with the same file handle to get subsequent markers positions (this is not a generator but it works very similarly, because it will continue reading from the file's current cursor position -- this can be used advantageously if you want to read only a specific entry by seeking before supplying the file handle). This will read any string length between two entrymarkers. The reading is very tolerant, so it will always return any valid entry (but also scrambled entries if any, but the decoding will ensure everything's ok). `file` is a file handle, not the path to the file.''' found = False start = None # start and end vars are the relative position of the starting/ending entrymarkers in the current buffer end = None startcursor = None # startcursor and endcursor are the absolute position of the starting/ending entrymarkers inside the database file endcursor = None buf = 1 # Sanity check: cannot screen the file's content if the window is of the same size as the pattern to match (the marker) if blocksize <= len(entrymarker): blocksize = len(entrymarker) + 1 # Continue the search as long as we did not find at least one starting marker and one ending marker (or end of file) while (not found and buf): # Read a long block at once, we will readjust the file cursor after buf = file.read(blocksize) # Find the start marker (if not found already) if start is None or start == -1: start = buf.find(entrymarker); # relative position of the starting marker in the currently read string if start >= 0 and not startcursor: # assign startcursor only if it's empty (meaning that we did not find the starting entrymarker, else if found we are only looking for startcursor = file.tell() - len(buf) + start # absolute position of the starting marker in the file if start >= 0: start = start + len(entrymarker) # If we have a starting marker, we try to find a subsequent marker which will be the ending of our entry (if the entry is corrupted we don't care: it won't pass the entry_to_dict() decoding or subsequent steps of decoding and we will just pass to the next ecc entry). This allows to process any valid entry, no matter if previous ones were scrambled. if startcursor is not None and startcursor >= 0: end = buf.find(entrymarker, start) if end < 0 and len(buf) < blocksize: # Special case: we didn't find any ending marker but we reached the end of file, then we are probably in fact just reading the last entry (thus there's no ending marker for this entry) end = len(buf) # It's ok, we have our entry, the ending marker is just the end of file # If we found an ending marker (or if end of file is reached), then we compute the absolute cursor value and put the file reading cursor back in position, just before the next entry (where the ending marker is if any) if end >= 0: endcursor = file.tell() - len(buf) + end # Make sure we are not redetecting the same marker as the start marker if endcursor > startcursor: file.seek(endcursor) found = True else: end = -1 encursor = None #print("Start:", start, startcursor) #print("End: ", end, endcursor) # Stop criterion to avoid infinite loop: in the case we could not find any entry in the rest of the file and we reached the EOF, we just quit now if len(buf) < blocksize: break # Did not find the full entry in one buffer? Reinit variables for next iteration, but keep in memory startcursor. if start > 0: start = 0 # reset the start position for the end buf find at next iteration (ie: in the arithmetic operations to compute the absolute endcursor position, the start entrymarker won't be accounted because it was discovered in a previous buffer). if not endcursor: file.seek(file.tell()-len(entrymarker)) # Try to fix edge case where blocksize stops the buffer exactly in the middle of the ending entrymarker. The starting marker should always be ok because it should be quite close (or generally immediately after) the previous entry, but the end depends on the end of the current entry (size of the original file), thus the buffer may miss the ending entrymarker. should offset file.seek(-len(entrymarker)) before searching for ending. if found: # if an entry was found, we seek to the beginning of the entry and then either read the entry from file or just return the markers positions (aka the entry bounds) file.seek(startcursor + len(entrymarker)) if only_coord: # Return only coordinates of the start and end markers # Note: it is useful to just return the reading positions and not the entry itself because it can get quite huge and may overflow memory, thus we will read each ecc blocks on request using a generator. return [startcursor + len(entrymarker), endcursor] else: # Return the full entry's content return file.read(endcursor - startcursor - len(entrymarker)) else: # Nothing found (or no new entry to find, we've already found them all), so we return None return None
def function[get_next_entry, parameter[file, entrymarker, only_coord, blocksize]]: constant[Find or read the next ecc entry in a given ecc file. Call this function multiple times with the same file handle to get subsequent markers positions (this is not a generator but it works very similarly, because it will continue reading from the file's current cursor position -- this can be used advantageously if you want to read only a specific entry by seeking before supplying the file handle). This will read any string length between two entrymarkers. The reading is very tolerant, so it will always return any valid entry (but also scrambled entries if any, but the decoding will ensure everything's ok). `file` is a file handle, not the path to the file.] variable[found] assign[=] constant[False] variable[start] assign[=] constant[None] variable[end] assign[=] constant[None] variable[startcursor] assign[=] constant[None] variable[endcursor] assign[=] constant[None] variable[buf] assign[=] constant[1] if compare[name[blocksize] less_or_equal[<=] call[name[len], parameter[name[entrymarker]]]] begin[:] variable[blocksize] assign[=] binary_operation[call[name[len], parameter[name[entrymarker]]] + constant[1]] while <ast.BoolOp object at 0x7da2054a4130> begin[:] variable[buf] assign[=] call[name[file].read, parameter[name[blocksize]]] if <ast.BoolOp object at 0x7da2054a5630> begin[:] variable[start] assign[=] call[name[buf].find, parameter[name[entrymarker]]] if <ast.BoolOp object at 0x7da2054a4d60> begin[:] variable[startcursor] assign[=] binary_operation[binary_operation[call[name[file].tell, parameter[]] - call[name[len], parameter[name[buf]]]] + name[start]] if compare[name[start] greater_or_equal[>=] constant[0]] begin[:] variable[start] assign[=] binary_operation[name[start] + call[name[len], parameter[name[entrymarker]]]] if <ast.BoolOp object at 0x7da2054a4c10> begin[:] variable[end] assign[=] call[name[buf].find, parameter[name[entrymarker], name[start]]] if <ast.BoolOp object at 0x7da2054a69e0> begin[:] variable[end] assign[=] call[name[len], parameter[name[buf]]] if compare[name[end] greater_or_equal[>=] constant[0]] begin[:] variable[endcursor] assign[=] binary_operation[binary_operation[call[name[file].tell, parameter[]] - call[name[len], parameter[name[buf]]]] + name[end]] if compare[name[endcursor] greater[>] name[startcursor]] begin[:] call[name[file].seek, parameter[name[endcursor]]] variable[found] assign[=] constant[True] if compare[call[name[len], parameter[name[buf]]] less[<] name[blocksize]] begin[:] break if compare[name[start] greater[>] constant[0]] begin[:] variable[start] assign[=] constant[0] if <ast.UnaryOp object at 0x7da2054a4400> begin[:] call[name[file].seek, parameter[binary_operation[call[name[file].tell, parameter[]] - call[name[len], parameter[name[entrymarker]]]]]] if name[found] begin[:] call[name[file].seek, parameter[binary_operation[name[startcursor] + call[name[len], parameter[name[entrymarker]]]]]] if name[only_coord] begin[:] return[list[[<ast.BinOp object at 0x7da18bcc8460>, <ast.Name object at 0x7da18bcca590>]]]
keyword[def] identifier[get_next_entry] ( identifier[file] , identifier[entrymarker] = literal[string] , identifier[only_coord] = keyword[True] , identifier[blocksize] = literal[int] ): literal[string] identifier[found] = keyword[False] identifier[start] = keyword[None] identifier[end] = keyword[None] identifier[startcursor] = keyword[None] identifier[endcursor] = keyword[None] identifier[buf] = literal[int] keyword[if] identifier[blocksize] <= identifier[len] ( identifier[entrymarker] ): identifier[blocksize] = identifier[len] ( identifier[entrymarker] )+ literal[int] keyword[while] ( keyword[not] identifier[found] keyword[and] identifier[buf] ): identifier[buf] = identifier[file] . identifier[read] ( identifier[blocksize] ) keyword[if] identifier[start] keyword[is] keyword[None] keyword[or] identifier[start] ==- literal[int] : identifier[start] = identifier[buf] . identifier[find] ( identifier[entrymarker] ); keyword[if] identifier[start] >= literal[int] keyword[and] keyword[not] identifier[startcursor] : identifier[startcursor] = identifier[file] . identifier[tell] ()- identifier[len] ( identifier[buf] )+ identifier[start] keyword[if] identifier[start] >= literal[int] : identifier[start] = identifier[start] + identifier[len] ( identifier[entrymarker] ) keyword[if] identifier[startcursor] keyword[is] keyword[not] keyword[None] keyword[and] identifier[startcursor] >= literal[int] : identifier[end] = identifier[buf] . identifier[find] ( identifier[entrymarker] , identifier[start] ) keyword[if] identifier[end] < literal[int] keyword[and] identifier[len] ( identifier[buf] )< identifier[blocksize] : identifier[end] = identifier[len] ( identifier[buf] ) keyword[if] identifier[end] >= literal[int] : identifier[endcursor] = identifier[file] . identifier[tell] ()- identifier[len] ( identifier[buf] )+ identifier[end] keyword[if] identifier[endcursor] > identifier[startcursor] : identifier[file] . identifier[seek] ( identifier[endcursor] ) identifier[found] = keyword[True] keyword[else] : identifier[end] =- literal[int] identifier[encursor] = keyword[None] keyword[if] identifier[len] ( identifier[buf] )< identifier[blocksize] : keyword[break] keyword[if] identifier[start] > literal[int] : identifier[start] = literal[int] keyword[if] keyword[not] identifier[endcursor] : identifier[file] . identifier[seek] ( identifier[file] . identifier[tell] ()- identifier[len] ( identifier[entrymarker] )) keyword[if] identifier[found] : identifier[file] . identifier[seek] ( identifier[startcursor] + identifier[len] ( identifier[entrymarker] )) keyword[if] identifier[only_coord] : keyword[return] [ identifier[startcursor] + identifier[len] ( identifier[entrymarker] ), identifier[endcursor] ] keyword[else] : keyword[return] identifier[file] . identifier[read] ( identifier[endcursor] - identifier[startcursor] - identifier[len] ( identifier[entrymarker] )) keyword[else] : keyword[return] keyword[None]
def get_next_entry(file, entrymarker='þÿþÿþÿþÿþÿ', only_coord=True, blocksize=65535): """Find or read the next ecc entry in a given ecc file. Call this function multiple times with the same file handle to get subsequent markers positions (this is not a generator but it works very similarly, because it will continue reading from the file's current cursor position -- this can be used advantageously if you want to read only a specific entry by seeking before supplying the file handle). This will read any string length between two entrymarkers. The reading is very tolerant, so it will always return any valid entry (but also scrambled entries if any, but the decoding will ensure everything's ok). `file` is a file handle, not the path to the file.""" found = False start = None # start and end vars are the relative position of the starting/ending entrymarkers in the current buffer end = None startcursor = None # startcursor and endcursor are the absolute position of the starting/ending entrymarkers inside the database file endcursor = None buf = 1 # Sanity check: cannot screen the file's content if the window is of the same size as the pattern to match (the marker) if blocksize <= len(entrymarker): blocksize = len(entrymarker) + 1 # depends on [control=['if'], data=['blocksize']] # Continue the search as long as we did not find at least one starting marker and one ending marker (or end of file) while not found and buf: # Read a long block at once, we will readjust the file cursor after buf = file.read(blocksize) # Find the start marker (if not found already) if start is None or start == -1: start = buf.find(entrymarker) # relative position of the starting marker in the currently read string if start >= 0 and (not startcursor): # assign startcursor only if it's empty (meaning that we did not find the starting entrymarker, else if found we are only looking for startcursor = file.tell() - len(buf) + start # absolute position of the starting marker in the file # depends on [control=['if'], data=[]] if start >= 0: start = start + len(entrymarker) # depends on [control=['if'], data=['start']] # depends on [control=['if'], data=[]] # If we have a starting marker, we try to find a subsequent marker which will be the ending of our entry (if the entry is corrupted we don't care: it won't pass the entry_to_dict() decoding or subsequent steps of decoding and we will just pass to the next ecc entry). This allows to process any valid entry, no matter if previous ones were scrambled. if startcursor is not None and startcursor >= 0: end = buf.find(entrymarker, start) if end < 0 and len(buf) < blocksize: # Special case: we didn't find any ending marker but we reached the end of file, then we are probably in fact just reading the last entry (thus there's no ending marker for this entry) end = len(buf) # It's ok, we have our entry, the ending marker is just the end of file # depends on [control=['if'], data=[]] # If we found an ending marker (or if end of file is reached), then we compute the absolute cursor value and put the file reading cursor back in position, just before the next entry (where the ending marker is if any) if end >= 0: endcursor = file.tell() - len(buf) + end # Make sure we are not redetecting the same marker as the start marker if endcursor > startcursor: file.seek(endcursor) found = True # depends on [control=['if'], data=['endcursor']] else: end = -1 encursor = None # depends on [control=['if'], data=['end']] # depends on [control=['if'], data=[]] #print("Start:", start, startcursor) #print("End: ", end, endcursor) # Stop criterion to avoid infinite loop: in the case we could not find any entry in the rest of the file and we reached the EOF, we just quit now if len(buf) < blocksize: break # depends on [control=['if'], data=[]] # Did not find the full entry in one buffer? Reinit variables for next iteration, but keep in memory startcursor. if start > 0: start = 0 # reset the start position for the end buf find at next iteration (ie: in the arithmetic operations to compute the absolute endcursor position, the start entrymarker won't be accounted because it was discovered in a previous buffer). # depends on [control=['if'], data=['start']] if not endcursor: file.seek(file.tell() - len(entrymarker)) # Try to fix edge case where blocksize stops the buffer exactly in the middle of the ending entrymarker. The starting marker should always be ok because it should be quite close (or generally immediately after) the previous entry, but the end depends on the end of the current entry (size of the original file), thus the buffer may miss the ending entrymarker. should offset file.seek(-len(entrymarker)) before searching for ending. # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] if found: # if an entry was found, we seek to the beginning of the entry and then either read the entry from file or just return the markers positions (aka the entry bounds) file.seek(startcursor + len(entrymarker)) if only_coord: # Return only coordinates of the start and end markers # Note: it is useful to just return the reading positions and not the entry itself because it can get quite huge and may overflow memory, thus we will read each ecc blocks on request using a generator. return [startcursor + len(entrymarker), endcursor] # depends on [control=['if'], data=[]] else: # Return the full entry's content return file.read(endcursor - startcursor - len(entrymarker)) # depends on [control=['if'], data=[]] else: # Nothing found (or no new entry to find, we've already found them all), so we return None return None
def _compute_a22_factor(self, imt): """ Compute and return the a22 factor, equation 20, page 80. """ if imt.name == 'PGV': return 0.0 period = imt.period if period < 2.0: return 0.0 else: return 0.0625 * (period - 2.0)
def function[_compute_a22_factor, parameter[self, imt]]: constant[ Compute and return the a22 factor, equation 20, page 80. ] if compare[name[imt].name equal[==] constant[PGV]] begin[:] return[constant[0.0]] variable[period] assign[=] name[imt].period if compare[name[period] less[<] constant[2.0]] begin[:] return[constant[0.0]]
keyword[def] identifier[_compute_a22_factor] ( identifier[self] , identifier[imt] ): literal[string] keyword[if] identifier[imt] . identifier[name] == literal[string] : keyword[return] literal[int] identifier[period] = identifier[imt] . identifier[period] keyword[if] identifier[period] < literal[int] : keyword[return] literal[int] keyword[else] : keyword[return] literal[int] *( identifier[period] - literal[int] )
def _compute_a22_factor(self, imt): """ Compute and return the a22 factor, equation 20, page 80. """ if imt.name == 'PGV': return 0.0 # depends on [control=['if'], data=[]] period = imt.period if period < 2.0: return 0.0 # depends on [control=['if'], data=[]] else: return 0.0625 * (period - 2.0)
def read_solrad(filename): """ Read NOAA SOLRAD [1]_ [2]_ fixed-width file into pandas dataframe. Parameters ---------- filename: str filepath or url to read for the fixed-width file. Returns ------- data: Dataframe A dataframe with DatetimeIndex and all of the variables in the file. Notes ----- SOLRAD data resolution is described by the README_SOLRAD.txt: "Before 1-jan. 2015 the data were reported as 3-min averages; on and after 1-Jan. 2015, SOLRAD data are reported as 1-min. averages of 1-sec. samples." Here, missing data is flagged as NaN, rather than -9999.9. References ---------- .. [1] NOAA SOLRAD Network `https://www.esrl.noaa.gov/gmd/grad/solrad/index.html <https://www.esrl.noaa.gov/gmd/grad/solrad/index.html>`_ .. [2] B. B. Hicks et. al., (1996), The NOAA Integrated Surface Irradiance Study (ISIS). A New Surface Radiation Monitoring Program. Bull. Amer. Meteor. Soc., 77, 2857-2864. :doi:`10.1175/1520-0477(1996)077<2857:TNISIS>2.0.CO;2` """ if 'msn' in filename: names = MADISON_HEADERS widths = MADISON_WIDTHS dtypes = MADISON_DTYPES else: names = HEADERS widths = WIDTHS dtypes = DTYPES # read in data data = pd.read_fwf(filename, header=None, skiprows=2, names=names, widths=widths, na_values=-9999.9) # loop here because dtype kwarg not supported in read_fwf until 0.20 for (col, _dtype) in zip(data.columns, dtypes): ser = data[col].astype(_dtype) if _dtype == 'float64': # older verions of pandas/numpy read '-9999.9' as # -9999.8999999999996 and fail to set nan in read_fwf, # so manually set nan ser = ser.where(ser > -9999, other=np.nan) data[col] = ser # set index # columns do not have leading 0s, so must zfill(2) to comply # with %m%d%H%M format dts = data[['month', 'day', 'hour', 'minute']].astype(str).apply( lambda x: x.str.zfill(2)) dtindex = pd.to_datetime( data['year'].astype(str) + dts['month'] + dts['day'] + dts['hour'] + dts['minute'], format='%Y%m%d%H%M', utc=True) data = data.set_index(dtindex) try: # to_datetime(utc=True) does not work in older versions of pandas data = data.tz_localize('UTC') except TypeError: pass return data
def function[read_solrad, parameter[filename]]: constant[ Read NOAA SOLRAD [1]_ [2]_ fixed-width file into pandas dataframe. Parameters ---------- filename: str filepath or url to read for the fixed-width file. Returns ------- data: Dataframe A dataframe with DatetimeIndex and all of the variables in the file. Notes ----- SOLRAD data resolution is described by the README_SOLRAD.txt: "Before 1-jan. 2015 the data were reported as 3-min averages; on and after 1-Jan. 2015, SOLRAD data are reported as 1-min. averages of 1-sec. samples." Here, missing data is flagged as NaN, rather than -9999.9. References ---------- .. [1] NOAA SOLRAD Network `https://www.esrl.noaa.gov/gmd/grad/solrad/index.html <https://www.esrl.noaa.gov/gmd/grad/solrad/index.html>`_ .. [2] B. B. Hicks et. al., (1996), The NOAA Integrated Surface Irradiance Study (ISIS). A New Surface Radiation Monitoring Program. Bull. Amer. Meteor. Soc., 77, 2857-2864. :doi:`10.1175/1520-0477(1996)077<2857:TNISIS>2.0.CO;2` ] if compare[constant[msn] in name[filename]] begin[:] variable[names] assign[=] name[MADISON_HEADERS] variable[widths] assign[=] name[MADISON_WIDTHS] variable[dtypes] assign[=] name[MADISON_DTYPES] variable[data] assign[=] call[name[pd].read_fwf, parameter[name[filename]]] for taget[tuple[[<ast.Name object at 0x7da18ede53c0>, <ast.Name object at 0x7da18ede55d0>]]] in starred[call[name[zip], parameter[name[data].columns, name[dtypes]]]] begin[:] variable[ser] assign[=] call[call[name[data]][name[col]].astype, parameter[name[_dtype]]] if compare[name[_dtype] equal[==] constant[float64]] begin[:] variable[ser] assign[=] call[name[ser].where, parameter[compare[name[ser] greater[>] <ast.UnaryOp object at 0x7da18ede6320>]]] call[name[data]][name[col]] assign[=] name[ser] variable[dts] assign[=] call[call[call[name[data]][list[[<ast.Constant object at 0x7da18ede4ee0>, <ast.Constant object at 0x7da18ede6d40>, <ast.Constant object at 0x7da18ede5660>, <ast.Constant object at 0x7da18ede58a0>]]].astype, parameter[name[str]]].apply, parameter[<ast.Lambda object at 0x7da18ede6650>]] variable[dtindex] assign[=] call[name[pd].to_datetime, parameter[binary_operation[binary_operation[binary_operation[binary_operation[call[call[name[data]][constant[year]].astype, parameter[name[str]]] + call[name[dts]][constant[month]]] + call[name[dts]][constant[day]]] + call[name[dts]][constant[hour]]] + call[name[dts]][constant[minute]]]]] variable[data] assign[=] call[name[data].set_index, parameter[name[dtindex]]] <ast.Try object at 0x7da18ede6830> return[name[data]]
keyword[def] identifier[read_solrad] ( identifier[filename] ): literal[string] keyword[if] literal[string] keyword[in] identifier[filename] : identifier[names] = identifier[MADISON_HEADERS] identifier[widths] = identifier[MADISON_WIDTHS] identifier[dtypes] = identifier[MADISON_DTYPES] keyword[else] : identifier[names] = identifier[HEADERS] identifier[widths] = identifier[WIDTHS] identifier[dtypes] = identifier[DTYPES] identifier[data] = identifier[pd] . identifier[read_fwf] ( identifier[filename] , identifier[header] = keyword[None] , identifier[skiprows] = literal[int] , identifier[names] = identifier[names] , identifier[widths] = identifier[widths] , identifier[na_values] =- literal[int] ) keyword[for] ( identifier[col] , identifier[_dtype] ) keyword[in] identifier[zip] ( identifier[data] . identifier[columns] , identifier[dtypes] ): identifier[ser] = identifier[data] [ identifier[col] ]. identifier[astype] ( identifier[_dtype] ) keyword[if] identifier[_dtype] == literal[string] : identifier[ser] = identifier[ser] . identifier[where] ( identifier[ser] >- literal[int] , identifier[other] = identifier[np] . identifier[nan] ) identifier[data] [ identifier[col] ]= identifier[ser] identifier[dts] = identifier[data] [[ literal[string] , literal[string] , literal[string] , literal[string] ]]. identifier[astype] ( identifier[str] ). identifier[apply] ( keyword[lambda] identifier[x] : identifier[x] . identifier[str] . identifier[zfill] ( literal[int] )) identifier[dtindex] = identifier[pd] . identifier[to_datetime] ( identifier[data] [ literal[string] ]. identifier[astype] ( identifier[str] )+ identifier[dts] [ literal[string] ]+ identifier[dts] [ literal[string] ]+ identifier[dts] [ literal[string] ]+ identifier[dts] [ literal[string] ], identifier[format] = literal[string] , identifier[utc] = keyword[True] ) identifier[data] = identifier[data] . identifier[set_index] ( identifier[dtindex] ) keyword[try] : identifier[data] = identifier[data] . identifier[tz_localize] ( literal[string] ) keyword[except] identifier[TypeError] : keyword[pass] keyword[return] identifier[data]
def read_solrad(filename): """ Read NOAA SOLRAD [1]_ [2]_ fixed-width file into pandas dataframe. Parameters ---------- filename: str filepath or url to read for the fixed-width file. Returns ------- data: Dataframe A dataframe with DatetimeIndex and all of the variables in the file. Notes ----- SOLRAD data resolution is described by the README_SOLRAD.txt: "Before 1-jan. 2015 the data were reported as 3-min averages; on and after 1-Jan. 2015, SOLRAD data are reported as 1-min. averages of 1-sec. samples." Here, missing data is flagged as NaN, rather than -9999.9. References ---------- .. [1] NOAA SOLRAD Network `https://www.esrl.noaa.gov/gmd/grad/solrad/index.html <https://www.esrl.noaa.gov/gmd/grad/solrad/index.html>`_ .. [2] B. B. Hicks et. al., (1996), The NOAA Integrated Surface Irradiance Study (ISIS). A New Surface Radiation Monitoring Program. Bull. Amer. Meteor. Soc., 77, 2857-2864. :doi:`10.1175/1520-0477(1996)077<2857:TNISIS>2.0.CO;2` """ if 'msn' in filename: names = MADISON_HEADERS widths = MADISON_WIDTHS dtypes = MADISON_DTYPES # depends on [control=['if'], data=[]] else: names = HEADERS widths = WIDTHS dtypes = DTYPES # read in data data = pd.read_fwf(filename, header=None, skiprows=2, names=names, widths=widths, na_values=-9999.9) # loop here because dtype kwarg not supported in read_fwf until 0.20 for (col, _dtype) in zip(data.columns, dtypes): ser = data[col].astype(_dtype) if _dtype == 'float64': # older verions of pandas/numpy read '-9999.9' as # -9999.8999999999996 and fail to set nan in read_fwf, # so manually set nan ser = ser.where(ser > -9999, other=np.nan) # depends on [control=['if'], data=[]] data[col] = ser # depends on [control=['for'], data=[]] # set index # columns do not have leading 0s, so must zfill(2) to comply # with %m%d%H%M format dts = data[['month', 'day', 'hour', 'minute']].astype(str).apply(lambda x: x.str.zfill(2)) dtindex = pd.to_datetime(data['year'].astype(str) + dts['month'] + dts['day'] + dts['hour'] + dts['minute'], format='%Y%m%d%H%M', utc=True) data = data.set_index(dtindex) try: # to_datetime(utc=True) does not work in older versions of pandas data = data.tz_localize('UTC') # depends on [control=['try'], data=[]] except TypeError: pass # depends on [control=['except'], data=[]] return data
def draw(self): """ Draw guide Returns ------- out : matplotlib.offsetbox.Offsetbox A drawing of this legend """ obverse = slice(0, None) reverse = slice(None, None, -1) nbreak = len(self.key) themeable = self.theme.figure._themeable # When there is more than one guide, we keep # record of all of them using lists if 'legend_title' not in themeable: themeable['legend_title'] = [] if 'legend_text_legend' not in themeable: themeable['legend_key'] = [] themeable['legend_text_legend'] = [] # title title_box = TextArea(self.title, textprops=dict(color='black')) themeable['legend_title'].append(title_box) # labels labels = [] for item in self.key['label']: if isinstance(item, np.float) and np.float.is_integer(item): item = np.int(item) # 1.0 to 1 va = 'center' if self.label_position == 'top' else 'baseline' ta = TextArea(item, textprops=dict(color='black', va=va)) labels.append(ta) themeable['legend_text_legend'].extend(labels) # Drawings drawings = [] for i in range(nbreak): da = ColoredDrawingArea(self._keywidth[i], self._keyheight[i], 0, 0, color='white') # overlay geoms for gl in self.glayers: with suppress(IndexError): data = gl.data.iloc[i] da = gl.geom.draw_legend(data, da, gl.layer) drawings.append(da) themeable['legend_key'].append(drawings) # Match Drawings with labels to create the entries lookup = { 'right': (HPacker, reverse), 'left': (HPacker, obverse), 'bottom': (VPacker, reverse), 'top': (VPacker, obverse)} packer, slc = lookup[self.label_position] entries = [] for d, l in zip(drawings, labels): e = packer(children=[l, d][slc], sep=self._label_margin, align='center', pad=0) entries.append(e) # Put the entries together in rows or columns # A chunk is either a row or a column of entries # for a single legend if self.byrow: chunk_size, packers = self.ncol, [HPacker, VPacker] sep1 = self._legend_entry_spacing_x sep2 = self._legend_entry_spacing_y else: chunk_size, packers = self.nrow, [VPacker, HPacker] sep1 = self._legend_entry_spacing_y sep2 = self._legend_entry_spacing_x if self.reverse: entries = entries[::-1] chunks = [] for i in range(len(entries)): start = i*chunk_size stop = start + chunk_size s = islice(entries, start, stop) chunks.append(list(s)) if stop >= len(entries): break chunk_boxes = [] for chunk in chunks: d1 = packers[0](children=chunk, align='left', sep=sep1, pad=0,) chunk_boxes.append(d1) # Put all the entries (row & columns) together entries_box = packers[1](children=chunk_boxes, align='baseline', sep=sep2, pad=0) # Put the title and entries together packer, slc = lookup[self.title_position] children = [title_box, entries_box][slc] box = packer(children=children, sep=self._title_margin, align=self._title_align, pad=self._legend_margin) return box
def function[draw, parameter[self]]: constant[ Draw guide Returns ------- out : matplotlib.offsetbox.Offsetbox A drawing of this legend ] variable[obverse] assign[=] call[name[slice], parameter[constant[0], constant[None]]] variable[reverse] assign[=] call[name[slice], parameter[constant[None], constant[None], <ast.UnaryOp object at 0x7da18eb56bf0>]] variable[nbreak] assign[=] call[name[len], parameter[name[self].key]] variable[themeable] assign[=] name[self].theme.figure._themeable if compare[constant[legend_title] <ast.NotIn object at 0x7da2590d7190> name[themeable]] begin[:] call[name[themeable]][constant[legend_title]] assign[=] list[[]] if compare[constant[legend_text_legend] <ast.NotIn object at 0x7da2590d7190> name[themeable]] begin[:] call[name[themeable]][constant[legend_key]] assign[=] list[[]] call[name[themeable]][constant[legend_text_legend]] assign[=] list[[]] variable[title_box] assign[=] call[name[TextArea], parameter[name[self].title]] call[call[name[themeable]][constant[legend_title]].append, parameter[name[title_box]]] variable[labels] assign[=] list[[]] for taget[name[item]] in starred[call[name[self].key][constant[label]]] begin[:] if <ast.BoolOp object at 0x7da18eb56ef0> begin[:] variable[item] assign[=] call[name[np].int, parameter[name[item]]] variable[va] assign[=] <ast.IfExp object at 0x7da18eb55270> variable[ta] assign[=] call[name[TextArea], parameter[name[item]]] call[name[labels].append, parameter[name[ta]]] call[call[name[themeable]][constant[legend_text_legend]].extend, parameter[name[labels]]] variable[drawings] assign[=] list[[]] for taget[name[i]] in starred[call[name[range], parameter[name[nbreak]]]] begin[:] variable[da] assign[=] call[name[ColoredDrawingArea], parameter[call[name[self]._keywidth][name[i]], call[name[self]._keyheight][name[i]], constant[0], constant[0]]] for taget[name[gl]] in starred[name[self].glayers] begin[:] with call[name[suppress], parameter[name[IndexError]]] begin[:] variable[data] assign[=] call[name[gl].data.iloc][name[i]] variable[da] assign[=] call[name[gl].geom.draw_legend, parameter[name[data], name[da], name[gl].layer]] call[name[drawings].append, parameter[name[da]]] call[call[name[themeable]][constant[legend_key]].append, parameter[name[drawings]]] variable[lookup] assign[=] dictionary[[<ast.Constant object at 0x7da207f00a60>, <ast.Constant object at 0x7da207f00850>, <ast.Constant object at 0x7da207f00a00>, <ast.Constant object at 0x7da207f02a70>], [<ast.Tuple object at 0x7da207f03fd0>, <ast.Tuple object at 0x7da207f020b0>, <ast.Tuple object at 0x7da207f03670>, <ast.Tuple object at 0x7da207f00dc0>]] <ast.Tuple object at 0x7da207f03460> assign[=] call[name[lookup]][name[self].label_position] variable[entries] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da207f02b90>, <ast.Name object at 0x7da207f00430>]]] in starred[call[name[zip], parameter[name[drawings], name[labels]]]] begin[:] variable[e] assign[=] call[name[packer], parameter[]] call[name[entries].append, parameter[name[e]]] if name[self].byrow begin[:] <ast.Tuple object at 0x7da207f02ad0> assign[=] tuple[[<ast.Attribute object at 0x7da207f00ca0>, <ast.List object at 0x7da207f032b0>]] variable[sep1] assign[=] name[self]._legend_entry_spacing_x variable[sep2] assign[=] name[self]._legend_entry_spacing_y if name[self].reverse begin[:] variable[entries] assign[=] call[name[entries]][<ast.Slice object at 0x7da20c794b80>] variable[chunks] assign[=] list[[]] for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[entries]]]]]] begin[:] variable[start] assign[=] binary_operation[name[i] * name[chunk_size]] variable[stop] assign[=] binary_operation[name[start] + name[chunk_size]] variable[s] assign[=] call[name[islice], parameter[name[entries], name[start], name[stop]]] call[name[chunks].append, parameter[call[name[list], parameter[name[s]]]]] if compare[name[stop] greater_or_equal[>=] call[name[len], parameter[name[entries]]]] begin[:] break variable[chunk_boxes] assign[=] list[[]] for taget[name[chunk]] in starred[name[chunks]] begin[:] variable[d1] assign[=] call[call[name[packers]][constant[0]], parameter[]] call[name[chunk_boxes].append, parameter[name[d1]]] variable[entries_box] assign[=] call[call[name[packers]][constant[1]], parameter[]] <ast.Tuple object at 0x7da20c7962c0> assign[=] call[name[lookup]][name[self].title_position] variable[children] assign[=] call[list[[<ast.Name object at 0x7da20c795570>, <ast.Name object at 0x7da20c795180>]]][name[slc]] variable[box] assign[=] call[name[packer], parameter[]] return[name[box]]
keyword[def] identifier[draw] ( identifier[self] ): literal[string] identifier[obverse] = identifier[slice] ( literal[int] , keyword[None] ) identifier[reverse] = identifier[slice] ( keyword[None] , keyword[None] ,- literal[int] ) identifier[nbreak] = identifier[len] ( identifier[self] . identifier[key] ) identifier[themeable] = identifier[self] . identifier[theme] . identifier[figure] . identifier[_themeable] keyword[if] literal[string] keyword[not] keyword[in] identifier[themeable] : identifier[themeable] [ literal[string] ]=[] keyword[if] literal[string] keyword[not] keyword[in] identifier[themeable] : identifier[themeable] [ literal[string] ]=[] identifier[themeable] [ literal[string] ]=[] identifier[title_box] = identifier[TextArea] ( identifier[self] . identifier[title] , identifier[textprops] = identifier[dict] ( identifier[color] = literal[string] )) identifier[themeable] [ literal[string] ]. identifier[append] ( identifier[title_box] ) identifier[labels] =[] keyword[for] identifier[item] keyword[in] identifier[self] . identifier[key] [ literal[string] ]: keyword[if] identifier[isinstance] ( identifier[item] , identifier[np] . identifier[float] ) keyword[and] identifier[np] . identifier[float] . identifier[is_integer] ( identifier[item] ): identifier[item] = identifier[np] . identifier[int] ( identifier[item] ) identifier[va] = literal[string] keyword[if] identifier[self] . identifier[label_position] == literal[string] keyword[else] literal[string] identifier[ta] = identifier[TextArea] ( identifier[item] , identifier[textprops] = identifier[dict] ( identifier[color] = literal[string] , identifier[va] = identifier[va] )) identifier[labels] . identifier[append] ( identifier[ta] ) identifier[themeable] [ literal[string] ]. identifier[extend] ( identifier[labels] ) identifier[drawings] =[] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[nbreak] ): identifier[da] = identifier[ColoredDrawingArea] ( identifier[self] . identifier[_keywidth] [ identifier[i] ], identifier[self] . identifier[_keyheight] [ identifier[i] ], literal[int] , literal[int] , identifier[color] = literal[string] ) keyword[for] identifier[gl] keyword[in] identifier[self] . identifier[glayers] : keyword[with] identifier[suppress] ( identifier[IndexError] ): identifier[data] = identifier[gl] . identifier[data] . identifier[iloc] [ identifier[i] ] identifier[da] = identifier[gl] . identifier[geom] . identifier[draw_legend] ( identifier[data] , identifier[da] , identifier[gl] . identifier[layer] ) identifier[drawings] . identifier[append] ( identifier[da] ) identifier[themeable] [ literal[string] ]. identifier[append] ( identifier[drawings] ) identifier[lookup] ={ literal[string] :( identifier[HPacker] , identifier[reverse] ), literal[string] :( identifier[HPacker] , identifier[obverse] ), literal[string] :( identifier[VPacker] , identifier[reverse] ), literal[string] :( identifier[VPacker] , identifier[obverse] )} identifier[packer] , identifier[slc] = identifier[lookup] [ identifier[self] . identifier[label_position] ] identifier[entries] =[] keyword[for] identifier[d] , identifier[l] keyword[in] identifier[zip] ( identifier[drawings] , identifier[labels] ): identifier[e] = identifier[packer] ( identifier[children] =[ identifier[l] , identifier[d] ][ identifier[slc] ], identifier[sep] = identifier[self] . identifier[_label_margin] , identifier[align] = literal[string] , identifier[pad] = literal[int] ) identifier[entries] . identifier[append] ( identifier[e] ) keyword[if] identifier[self] . identifier[byrow] : identifier[chunk_size] , identifier[packers] = identifier[self] . identifier[ncol] ,[ identifier[HPacker] , identifier[VPacker] ] identifier[sep1] = identifier[self] . identifier[_legend_entry_spacing_x] identifier[sep2] = identifier[self] . identifier[_legend_entry_spacing_y] keyword[else] : identifier[chunk_size] , identifier[packers] = identifier[self] . identifier[nrow] ,[ identifier[VPacker] , identifier[HPacker] ] identifier[sep1] = identifier[self] . identifier[_legend_entry_spacing_y] identifier[sep2] = identifier[self] . identifier[_legend_entry_spacing_x] keyword[if] identifier[self] . identifier[reverse] : identifier[entries] = identifier[entries] [::- literal[int] ] identifier[chunks] =[] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[entries] )): identifier[start] = identifier[i] * identifier[chunk_size] identifier[stop] = identifier[start] + identifier[chunk_size] identifier[s] = identifier[islice] ( identifier[entries] , identifier[start] , identifier[stop] ) identifier[chunks] . identifier[append] ( identifier[list] ( identifier[s] )) keyword[if] identifier[stop] >= identifier[len] ( identifier[entries] ): keyword[break] identifier[chunk_boxes] =[] keyword[for] identifier[chunk] keyword[in] identifier[chunks] : identifier[d1] = identifier[packers] [ literal[int] ]( identifier[children] = identifier[chunk] , identifier[align] = literal[string] , identifier[sep] = identifier[sep1] , identifier[pad] = literal[int] ,) identifier[chunk_boxes] . identifier[append] ( identifier[d1] ) identifier[entries_box] = identifier[packers] [ literal[int] ]( identifier[children] = identifier[chunk_boxes] , identifier[align] = literal[string] , identifier[sep] = identifier[sep2] , identifier[pad] = literal[int] ) identifier[packer] , identifier[slc] = identifier[lookup] [ identifier[self] . identifier[title_position] ] identifier[children] =[ identifier[title_box] , identifier[entries_box] ][ identifier[slc] ] identifier[box] = identifier[packer] ( identifier[children] = identifier[children] , identifier[sep] = identifier[self] . identifier[_title_margin] , identifier[align] = identifier[self] . identifier[_title_align] , identifier[pad] = identifier[self] . identifier[_legend_margin] ) keyword[return] identifier[box]
def draw(self): """ Draw guide Returns ------- out : matplotlib.offsetbox.Offsetbox A drawing of this legend """ obverse = slice(0, None) reverse = slice(None, None, -1) nbreak = len(self.key) themeable = self.theme.figure._themeable # When there is more than one guide, we keep # record of all of them using lists if 'legend_title' not in themeable: themeable['legend_title'] = [] # depends on [control=['if'], data=['themeable']] if 'legend_text_legend' not in themeable: themeable['legend_key'] = [] themeable['legend_text_legend'] = [] # depends on [control=['if'], data=['themeable']] # title title_box = TextArea(self.title, textprops=dict(color='black')) themeable['legend_title'].append(title_box) # labels labels = [] for item in self.key['label']: if isinstance(item, np.float) and np.float.is_integer(item): item = np.int(item) # 1.0 to 1 # depends on [control=['if'], data=[]] va = 'center' if self.label_position == 'top' else 'baseline' ta = TextArea(item, textprops=dict(color='black', va=va)) labels.append(ta) themeable['legend_text_legend'].extend(labels) # depends on [control=['for'], data=['item']] # Drawings drawings = [] for i in range(nbreak): da = ColoredDrawingArea(self._keywidth[i], self._keyheight[i], 0, 0, color='white') # overlay geoms for gl in self.glayers: with suppress(IndexError): data = gl.data.iloc[i] da = gl.geom.draw_legend(data, da, gl.layer) # depends on [control=['with'], data=[]] # depends on [control=['for'], data=['gl']] drawings.append(da) # depends on [control=['for'], data=['i']] themeable['legend_key'].append(drawings) # Match Drawings with labels to create the entries lookup = {'right': (HPacker, reverse), 'left': (HPacker, obverse), 'bottom': (VPacker, reverse), 'top': (VPacker, obverse)} (packer, slc) = lookup[self.label_position] entries = [] for (d, l) in zip(drawings, labels): e = packer(children=[l, d][slc], sep=self._label_margin, align='center', pad=0) entries.append(e) # depends on [control=['for'], data=[]] # Put the entries together in rows or columns # A chunk is either a row or a column of entries # for a single legend if self.byrow: (chunk_size, packers) = (self.ncol, [HPacker, VPacker]) sep1 = self._legend_entry_spacing_x sep2 = self._legend_entry_spacing_y # depends on [control=['if'], data=[]] else: (chunk_size, packers) = (self.nrow, [VPacker, HPacker]) sep1 = self._legend_entry_spacing_y sep2 = self._legend_entry_spacing_x if self.reverse: entries = entries[::-1] # depends on [control=['if'], data=[]] chunks = [] for i in range(len(entries)): start = i * chunk_size stop = start + chunk_size s = islice(entries, start, stop) chunks.append(list(s)) if stop >= len(entries): break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] chunk_boxes = [] for chunk in chunks: d1 = packers[0](children=chunk, align='left', sep=sep1, pad=0) chunk_boxes.append(d1) # depends on [control=['for'], data=['chunk']] # Put all the entries (row & columns) together entries_box = packers[1](children=chunk_boxes, align='baseline', sep=sep2, pad=0) # Put the title and entries together (packer, slc) = lookup[self.title_position] children = [title_box, entries_box][slc] box = packer(children=children, sep=self._title_margin, align=self._title_align, pad=self._legend_margin) return box
def tweets_files(string, path): """Iterates over json files in path.""" for filename in os.listdir(path): if re.match(string, filename) and ".jsonl" in filename: f = gzip.open if ".gz" in filename else open yield path + filename, f Ellipsis
def function[tweets_files, parameter[string, path]]: constant[Iterates over json files in path.] for taget[name[filename]] in starred[call[name[os].listdir, parameter[name[path]]]] begin[:] if <ast.BoolOp object at 0x7da20c6a9f90> begin[:] variable[f] assign[=] <ast.IfExp object at 0x7da20c6ab580> <ast.Yield object at 0x7da20c6a97e0> name[Ellipsis]
keyword[def] identifier[tweets_files] ( identifier[string] , identifier[path] ): literal[string] keyword[for] identifier[filename] keyword[in] identifier[os] . identifier[listdir] ( identifier[path] ): keyword[if] identifier[re] . identifier[match] ( identifier[string] , identifier[filename] ) keyword[and] literal[string] keyword[in] identifier[filename] : identifier[f] = identifier[gzip] . identifier[open] keyword[if] literal[string] keyword[in] identifier[filename] keyword[else] identifier[open] keyword[yield] identifier[path] + identifier[filename] , identifier[f] identifier[Ellipsis]
def tweets_files(string, path): """Iterates over json files in path.""" for filename in os.listdir(path): if re.match(string, filename) and '.jsonl' in filename: f = gzip.open if '.gz' in filename else open yield (path + filename, f) Ellipsis # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['filename']]
def predict(dataset, fitmodel_url, save_results=True, show=False): """ Function starts a job that makes predictions to input data with a given model. Parameters ---------- input - dataset object with input urls and other parameters fitmodel_url - model created in fit phase save_results - save results to ddfs show - show info about job execution Returns ------- Urls with predictions on ddfs """ from disco.worker.pipeline.worker import Worker, Stage from disco.core import Job, result_iterator if "linsvm_fitmodel" not in fitmodel_url: raise Exception("Incorrect fit model.") job = Job(worker=Worker(save_results=save_results)) # job parallelizes execution of mappers job.pipeline = [ ("split", Stage("map", input_chain=dataset.params["input_chain"], init=simple_init, process=map_predict))] job.params = dataset.params job.params["fit_params"] = [v for _, v in result_iterator(fitmodel_url["linsvm_fitmodel"])][0] job.run(name="linsvm_predict", input=dataset.params["data_tag"]) return job.wait(show=show)
def function[predict, parameter[dataset, fitmodel_url, save_results, show]]: constant[ Function starts a job that makes predictions to input data with a given model. Parameters ---------- input - dataset object with input urls and other parameters fitmodel_url - model created in fit phase save_results - save results to ddfs show - show info about job execution Returns ------- Urls with predictions on ddfs ] from relative_module[disco.worker.pipeline.worker] import module[Worker], module[Stage] from relative_module[disco.core] import module[Job], module[result_iterator] if compare[constant[linsvm_fitmodel] <ast.NotIn object at 0x7da2590d7190> name[fitmodel_url]] begin[:] <ast.Raise object at 0x7da18eb55a80> variable[job] assign[=] call[name[Job], parameter[]] name[job].pipeline assign[=] list[[<ast.Tuple object at 0x7da18eb55990>]] name[job].params assign[=] name[dataset].params call[name[job].params][constant[fit_params]] assign[=] call[<ast.ListComp object at 0x7da18eb56950>][constant[0]] call[name[job].run, parameter[]] return[call[name[job].wait, parameter[]]]
keyword[def] identifier[predict] ( identifier[dataset] , identifier[fitmodel_url] , identifier[save_results] = keyword[True] , identifier[show] = keyword[False] ): literal[string] keyword[from] identifier[disco] . identifier[worker] . identifier[pipeline] . identifier[worker] keyword[import] identifier[Worker] , identifier[Stage] keyword[from] identifier[disco] . identifier[core] keyword[import] identifier[Job] , identifier[result_iterator] keyword[if] literal[string] keyword[not] keyword[in] identifier[fitmodel_url] : keyword[raise] identifier[Exception] ( literal[string] ) identifier[job] = identifier[Job] ( identifier[worker] = identifier[Worker] ( identifier[save_results] = identifier[save_results] )) identifier[job] . identifier[pipeline] =[ ( literal[string] , identifier[Stage] ( literal[string] , identifier[input_chain] = identifier[dataset] . identifier[params] [ literal[string] ], identifier[init] = identifier[simple_init] , identifier[process] = identifier[map_predict] ))] identifier[job] . identifier[params] = identifier[dataset] . identifier[params] identifier[job] . identifier[params] [ literal[string] ]=[ identifier[v] keyword[for] identifier[_] , identifier[v] keyword[in] identifier[result_iterator] ( identifier[fitmodel_url] [ literal[string] ])][ literal[int] ] identifier[job] . identifier[run] ( identifier[name] = literal[string] , identifier[input] = identifier[dataset] . identifier[params] [ literal[string] ]) keyword[return] identifier[job] . identifier[wait] ( identifier[show] = identifier[show] )
def predict(dataset, fitmodel_url, save_results=True, show=False): """ Function starts a job that makes predictions to input data with a given model. Parameters ---------- input - dataset object with input urls and other parameters fitmodel_url - model created in fit phase save_results - save results to ddfs show - show info about job execution Returns ------- Urls with predictions on ddfs """ from disco.worker.pipeline.worker import Worker, Stage from disco.core import Job, result_iterator if 'linsvm_fitmodel' not in fitmodel_url: raise Exception('Incorrect fit model.') # depends on [control=['if'], data=[]] job = Job(worker=Worker(save_results=save_results)) # job parallelizes execution of mappers job.pipeline = [('split', Stage('map', input_chain=dataset.params['input_chain'], init=simple_init, process=map_predict))] job.params = dataset.params job.params['fit_params'] = [v for (_, v) in result_iterator(fitmodel_url['linsvm_fitmodel'])][0] job.run(name='linsvm_predict', input=dataset.params['data_tag']) return job.wait(show=show)
def send_packet(self, pattern, packet_buffer, callback=None, limit=10): """ Send a buffer as a packet to a network interface and optionally capture a response :param pattern: a wildcard pattern to match the description of a network interface to capture packets on :param packet_buffer: a buffer to send (length shouldn't exceed MAX_INT) :param callback: If not None, a function to call with each intercepted packet :param limit: how many packets to capture (A value of -1 or 0 is equivalent to infinity) """ device_name, desc = WinPcapDevices.get_matching_device(pattern) if device_name is not None: with WinPcap(device_name) as capture: capture.send(packet_buffer) if callback is not None: capture.run(callback=callback, limit=limit)
def function[send_packet, parameter[self, pattern, packet_buffer, callback, limit]]: constant[ Send a buffer as a packet to a network interface and optionally capture a response :param pattern: a wildcard pattern to match the description of a network interface to capture packets on :param packet_buffer: a buffer to send (length shouldn't exceed MAX_INT) :param callback: If not None, a function to call with each intercepted packet :param limit: how many packets to capture (A value of -1 or 0 is equivalent to infinity) ] <ast.Tuple object at 0x7da1b1022950> assign[=] call[name[WinPcapDevices].get_matching_device, parameter[name[pattern]]] if compare[name[device_name] is_not constant[None]] begin[:] with call[name[WinPcap], parameter[name[device_name]]] begin[:] call[name[capture].send, parameter[name[packet_buffer]]] if compare[name[callback] is_not constant[None]] begin[:] call[name[capture].run, parameter[]]
keyword[def] identifier[send_packet] ( identifier[self] , identifier[pattern] , identifier[packet_buffer] , identifier[callback] = keyword[None] , identifier[limit] = literal[int] ): literal[string] identifier[device_name] , identifier[desc] = identifier[WinPcapDevices] . identifier[get_matching_device] ( identifier[pattern] ) keyword[if] identifier[device_name] keyword[is] keyword[not] keyword[None] : keyword[with] identifier[WinPcap] ( identifier[device_name] ) keyword[as] identifier[capture] : identifier[capture] . identifier[send] ( identifier[packet_buffer] ) keyword[if] identifier[callback] keyword[is] keyword[not] keyword[None] : identifier[capture] . identifier[run] ( identifier[callback] = identifier[callback] , identifier[limit] = identifier[limit] )
def send_packet(self, pattern, packet_buffer, callback=None, limit=10): """ Send a buffer as a packet to a network interface and optionally capture a response :param pattern: a wildcard pattern to match the description of a network interface to capture packets on :param packet_buffer: a buffer to send (length shouldn't exceed MAX_INT) :param callback: If not None, a function to call with each intercepted packet :param limit: how many packets to capture (A value of -1 or 0 is equivalent to infinity) """ (device_name, desc) = WinPcapDevices.get_matching_device(pattern) if device_name is not None: with WinPcap(device_name) as capture: capture.send(packet_buffer) if callback is not None: capture.run(callback=callback, limit=limit) # depends on [control=['if'], data=['callback']] # depends on [control=['with'], data=['capture']] # depends on [control=['if'], data=['device_name']]
def get_message(self, dummy0, dummy1, use_cmd=False): """Get a getmore message.""" ns = _UJOIN % (self.db, self.coll) if use_cmd: ns = _UJOIN % (self.db, "$cmd") spec = self.as_command()[0] return query(0, ns, 0, -1, spec, None, self.codec_options) return get_more(ns, self.ntoreturn, self.cursor_id)
def function[get_message, parameter[self, dummy0, dummy1, use_cmd]]: constant[Get a getmore message.] variable[ns] assign[=] binary_operation[name[_UJOIN] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da20c7c8910>, <ast.Attribute object at 0x7da20c7cb820>]]] if name[use_cmd] begin[:] variable[ns] assign[=] binary_operation[name[_UJOIN] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da20c7c9e10>, <ast.Constant object at 0x7da20c7cae30>]]] variable[spec] assign[=] call[call[name[self].as_command, parameter[]]][constant[0]] return[call[name[query], parameter[constant[0], name[ns], constant[0], <ast.UnaryOp object at 0x7da20c7ca5c0>, name[spec], constant[None], name[self].codec_options]]] return[call[name[get_more], parameter[name[ns], name[self].ntoreturn, name[self].cursor_id]]]
keyword[def] identifier[get_message] ( identifier[self] , identifier[dummy0] , identifier[dummy1] , identifier[use_cmd] = keyword[False] ): literal[string] identifier[ns] = identifier[_UJOIN] %( identifier[self] . identifier[db] , identifier[self] . identifier[coll] ) keyword[if] identifier[use_cmd] : identifier[ns] = identifier[_UJOIN] %( identifier[self] . identifier[db] , literal[string] ) identifier[spec] = identifier[self] . identifier[as_command] ()[ literal[int] ] keyword[return] identifier[query] ( literal[int] , identifier[ns] , literal[int] ,- literal[int] , identifier[spec] , keyword[None] , identifier[self] . identifier[codec_options] ) keyword[return] identifier[get_more] ( identifier[ns] , identifier[self] . identifier[ntoreturn] , identifier[self] . identifier[cursor_id] )
def get_message(self, dummy0, dummy1, use_cmd=False): """Get a getmore message.""" ns = _UJOIN % (self.db, self.coll) if use_cmd: ns = _UJOIN % (self.db, '$cmd') spec = self.as_command()[0] return query(0, ns, 0, -1, spec, None, self.codec_options) # depends on [control=['if'], data=[]] return get_more(ns, self.ntoreturn, self.cursor_id)
def _send(self): """ Send all queued messages to the server. """ data = self.output_buffer.view() if not data: return if self.closed(): raise self.Error("Failed to write to closed connection {!r}".format(self.server.address)) if self.defunct(): raise self.Error("Failed to write to defunct connection {!r}".format(self.server.address)) self.socket.sendall(data) self.output_buffer.clear()
def function[_send, parameter[self]]: constant[ Send all queued messages to the server. ] variable[data] assign[=] call[name[self].output_buffer.view, parameter[]] if <ast.UnaryOp object at 0x7da207f02fb0> begin[:] return[None] if call[name[self].closed, parameter[]] begin[:] <ast.Raise object at 0x7da207f01300> if call[name[self].defunct, parameter[]] begin[:] <ast.Raise object at 0x7da207f02f50> call[name[self].socket.sendall, parameter[name[data]]] call[name[self].output_buffer.clear, parameter[]]
keyword[def] identifier[_send] ( identifier[self] ): literal[string] identifier[data] = identifier[self] . identifier[output_buffer] . identifier[view] () keyword[if] keyword[not] identifier[data] : keyword[return] keyword[if] identifier[self] . identifier[closed] (): keyword[raise] identifier[self] . identifier[Error] ( literal[string] . identifier[format] ( identifier[self] . identifier[server] . identifier[address] )) keyword[if] identifier[self] . identifier[defunct] (): keyword[raise] identifier[self] . identifier[Error] ( literal[string] . identifier[format] ( identifier[self] . identifier[server] . identifier[address] )) identifier[self] . identifier[socket] . identifier[sendall] ( identifier[data] ) identifier[self] . identifier[output_buffer] . identifier[clear] ()
def _send(self): """ Send all queued messages to the server. """ data = self.output_buffer.view() if not data: return # depends on [control=['if'], data=[]] if self.closed(): raise self.Error('Failed to write to closed connection {!r}'.format(self.server.address)) # depends on [control=['if'], data=[]] if self.defunct(): raise self.Error('Failed to write to defunct connection {!r}'.format(self.server.address)) # depends on [control=['if'], data=[]] self.socket.sendall(data) self.output_buffer.clear()
def parametrize(self, operator, params): """ Return a parser that parses an operator with parameters. """ return (CaselessKeyword(operator, identChars=alphanums) + self.parameter(params))
def function[parametrize, parameter[self, operator, params]]: constant[ Return a parser that parses an operator with parameters. ] return[binary_operation[call[name[CaselessKeyword], parameter[name[operator]]] + call[name[self].parameter, parameter[name[params]]]]]
keyword[def] identifier[parametrize] ( identifier[self] , identifier[operator] , identifier[params] ): literal[string] keyword[return] ( identifier[CaselessKeyword] ( identifier[operator] , identifier[identChars] = identifier[alphanums] )+ identifier[self] . identifier[parameter] ( identifier[params] ))
def parametrize(self, operator, params): """ Return a parser that parses an operator with parameters. """ return CaselessKeyword(operator, identChars=alphanums) + self.parameter(params)
def _add_details(self, info): """ The 'id' attribute is not supplied directly, but included as part of the 'href' value. Also, convert the dicts for messages into QueueMessage objects. """ msg_dicts = info.pop("messages", []) super(QueueClaim, self)._add_details(info) parsed = urllib.parse.urlparse(self.href) self.id = parsed.path.rsplit("/", 1)[-1] self.messages = [QueueMessage(self.manager._message_manager, item) for item in msg_dicts]
def function[_add_details, parameter[self, info]]: constant[ The 'id' attribute is not supplied directly, but included as part of the 'href' value. Also, convert the dicts for messages into QueueMessage objects. ] variable[msg_dicts] assign[=] call[name[info].pop, parameter[constant[messages], list[[]]]] call[call[name[super], parameter[name[QueueClaim], name[self]]]._add_details, parameter[name[info]]] variable[parsed] assign[=] call[name[urllib].parse.urlparse, parameter[name[self].href]] name[self].id assign[=] call[call[name[parsed].path.rsplit, parameter[constant[/], constant[1]]]][<ast.UnaryOp object at 0x7da18bcca620>] name[self].messages assign[=] <ast.ListComp object at 0x7da18bccb640>
keyword[def] identifier[_add_details] ( identifier[self] , identifier[info] ): literal[string] identifier[msg_dicts] = identifier[info] . identifier[pop] ( literal[string] ,[]) identifier[super] ( identifier[QueueClaim] , identifier[self] ). identifier[_add_details] ( identifier[info] ) identifier[parsed] = identifier[urllib] . identifier[parse] . identifier[urlparse] ( identifier[self] . identifier[href] ) identifier[self] . identifier[id] = identifier[parsed] . identifier[path] . identifier[rsplit] ( literal[string] , literal[int] )[- literal[int] ] identifier[self] . identifier[messages] =[ identifier[QueueMessage] ( identifier[self] . identifier[manager] . identifier[_message_manager] , identifier[item] ) keyword[for] identifier[item] keyword[in] identifier[msg_dicts] ]
def _add_details(self, info): """ The 'id' attribute is not supplied directly, but included as part of the 'href' value. Also, convert the dicts for messages into QueueMessage objects. """ msg_dicts = info.pop('messages', []) super(QueueClaim, self)._add_details(info) parsed = urllib.parse.urlparse(self.href) self.id = parsed.path.rsplit('/', 1)[-1] self.messages = [QueueMessage(self.manager._message_manager, item) for item in msg_dicts]
def stripe_to_db(self, data): """Convert the raw timestamp value to a DateTime representation.""" val = data.get(self.name) # Note: 0 is a possible return value, which is 'falseish' if val is not None: return convert_tstamp(val)
def function[stripe_to_db, parameter[self, data]]: constant[Convert the raw timestamp value to a DateTime representation.] variable[val] assign[=] call[name[data].get, parameter[name[self].name]] if compare[name[val] is_not constant[None]] begin[:] return[call[name[convert_tstamp], parameter[name[val]]]]
keyword[def] identifier[stripe_to_db] ( identifier[self] , identifier[data] ): literal[string] identifier[val] = identifier[data] . identifier[get] ( identifier[self] . identifier[name] ) keyword[if] identifier[val] keyword[is] keyword[not] keyword[None] : keyword[return] identifier[convert_tstamp] ( identifier[val] )
def stripe_to_db(self, data): """Convert the raw timestamp value to a DateTime representation.""" val = data.get(self.name) # Note: 0 is a possible return value, which is 'falseish' if val is not None: return convert_tstamp(val) # depends on [control=['if'], data=['val']]
def x509_name(name): """Parses a subject into a :py:class:`x509.Name <cg:cryptography.x509.Name>`. If ``name`` is a string, :py:func:`parse_name` is used to parse it. >>> x509_name('/C=AT/CN=example.com') <Name(C=AT,CN=example.com)> >>> x509_name([('C', 'AT'), ('CN', 'example.com')]) <Name(C=AT,CN=example.com)> """ if isinstance(name, six.string_types): name = parse_name(name) return x509.Name([x509.NameAttribute(NAME_OID_MAPPINGS[typ], force_text(value)) for typ, value in name])
def function[x509_name, parameter[name]]: constant[Parses a subject into a :py:class:`x509.Name <cg:cryptography.x509.Name>`. If ``name`` is a string, :py:func:`parse_name` is used to parse it. >>> x509_name('/C=AT/CN=example.com') <Name(C=AT,CN=example.com)> >>> x509_name([('C', 'AT'), ('CN', 'example.com')]) <Name(C=AT,CN=example.com)> ] if call[name[isinstance], parameter[name[name], name[six].string_types]] begin[:] variable[name] assign[=] call[name[parse_name], parameter[name[name]]] return[call[name[x509].Name, parameter[<ast.ListComp object at 0x7da1b0d184f0>]]]
keyword[def] identifier[x509_name] ( identifier[name] ): literal[string] keyword[if] identifier[isinstance] ( identifier[name] , identifier[six] . identifier[string_types] ): identifier[name] = identifier[parse_name] ( identifier[name] ) keyword[return] identifier[x509] . identifier[Name] ([ identifier[x509] . identifier[NameAttribute] ( identifier[NAME_OID_MAPPINGS] [ identifier[typ] ], identifier[force_text] ( identifier[value] )) keyword[for] identifier[typ] , identifier[value] keyword[in] identifier[name] ])
def x509_name(name): """Parses a subject into a :py:class:`x509.Name <cg:cryptography.x509.Name>`. If ``name`` is a string, :py:func:`parse_name` is used to parse it. >>> x509_name('/C=AT/CN=example.com') <Name(C=AT,CN=example.com)> >>> x509_name([('C', 'AT'), ('CN', 'example.com')]) <Name(C=AT,CN=example.com)> """ if isinstance(name, six.string_types): name = parse_name(name) # depends on [control=['if'], data=[]] return x509.Name([x509.NameAttribute(NAME_OID_MAPPINGS[typ], force_text(value)) for (typ, value) in name])
def set_dict_options(self, options): """for dictionary-like inputs (as object in Javascript) options must be in python dictionary format """ if isinstance(options, dict): for key, option_data in options.items(): self.set_options(key, option_data) else: raise OptionTypeError("Not An Accepted Input Format: %s. Must be Dictionary" %type(options))
def function[set_dict_options, parameter[self, options]]: constant[for dictionary-like inputs (as object in Javascript) options must be in python dictionary format ] if call[name[isinstance], parameter[name[options], name[dict]]] begin[:] for taget[tuple[[<ast.Name object at 0x7da20e961630>, <ast.Name object at 0x7da20e9632e0>]]] in starred[call[name[options].items, parameter[]]] begin[:] call[name[self].set_options, parameter[name[key], name[option_data]]]
keyword[def] identifier[set_dict_options] ( identifier[self] , identifier[options] ): literal[string] keyword[if] identifier[isinstance] ( identifier[options] , identifier[dict] ): keyword[for] identifier[key] , identifier[option_data] keyword[in] identifier[options] . identifier[items] (): identifier[self] . identifier[set_options] ( identifier[key] , identifier[option_data] ) keyword[else] : keyword[raise] identifier[OptionTypeError] ( literal[string] % identifier[type] ( identifier[options] ))
def set_dict_options(self, options): """for dictionary-like inputs (as object in Javascript) options must be in python dictionary format """ if isinstance(options, dict): for (key, option_data) in options.items(): self.set_options(key, option_data) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] else: raise OptionTypeError('Not An Accepted Input Format: %s. Must be Dictionary' % type(options))
def add_measurement(request, experiment_id): """This is a view to display a form to add single measurements to an experiment. It calls the object MeasurementForm, which has an autocomplete field for animal.""" experiment = get_object_or_404(Experiment, pk=experiment_id) if request.method == 'POST': form = MeasurementForm(request.POST) if form.is_valid(): form.save() return HttpResponseRedirect( experiment.get_absolute_url() ) else: form = MeasurementForm() return render(request, "data_entry_form.html", {"form": form, "experiment": experiment })
def function[add_measurement, parameter[request, experiment_id]]: constant[This is a view to display a form to add single measurements to an experiment. It calls the object MeasurementForm, which has an autocomplete field for animal.] variable[experiment] assign[=] call[name[get_object_or_404], parameter[name[Experiment]]] if compare[name[request].method equal[==] constant[POST]] begin[:] variable[form] assign[=] call[name[MeasurementForm], parameter[name[request].POST]] if call[name[form].is_valid, parameter[]] begin[:] call[name[form].save, parameter[]] return[call[name[HttpResponseRedirect], parameter[call[name[experiment].get_absolute_url, parameter[]]]]] return[call[name[render], parameter[name[request], constant[data_entry_form.html], dictionary[[<ast.Constant object at 0x7da18f09f4f0>, <ast.Constant object at 0x7da18f09d210>], [<ast.Name object at 0x7da18f09f160>, <ast.Name object at 0x7da18f09ee30>]]]]]
keyword[def] identifier[add_measurement] ( identifier[request] , identifier[experiment_id] ): literal[string] identifier[experiment] = identifier[get_object_or_404] ( identifier[Experiment] , identifier[pk] = identifier[experiment_id] ) keyword[if] identifier[request] . identifier[method] == literal[string] : identifier[form] = identifier[MeasurementForm] ( identifier[request] . identifier[POST] ) keyword[if] identifier[form] . identifier[is_valid] (): identifier[form] . identifier[save] () keyword[return] identifier[HttpResponseRedirect] ( identifier[experiment] . identifier[get_absolute_url] ()) keyword[else] : identifier[form] = identifier[MeasurementForm] () keyword[return] identifier[render] ( identifier[request] , literal[string] ,{ literal[string] : identifier[form] , literal[string] : identifier[experiment] })
def add_measurement(request, experiment_id): """This is a view to display a form to add single measurements to an experiment. It calls the object MeasurementForm, which has an autocomplete field for animal.""" experiment = get_object_or_404(Experiment, pk=experiment_id) if request.method == 'POST': form = MeasurementForm(request.POST) if form.is_valid(): form.save() return HttpResponseRedirect(experiment.get_absolute_url()) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: form = MeasurementForm() return render(request, 'data_entry_form.html', {'form': form, 'experiment': experiment})
def projection(radius=5e-6, sphere_index=1.339, medium_index=1.333, wavelength=550e-9, pixel_size=1e-7, grid_size=(80, 80), center=(39.5, 39.5)): """Optical path difference projection of a dielectric sphere Parameters ---------- radius: float Radius of the sphere [m] sphere_index: float Refractive index of the sphere medium_index: float Refractive index of the surrounding medium wavelength: float Vacuum wavelength of the imaging light [m] pixel_size: float Pixel size [m] grid_size: tuple of floats Resulting image size in x and y [px] center: tuple of floats Center position in image coordinates [px] Returns ------- qpi: qpimage.QPImage Quantitative phase data set """ # grid x = np.arange(grid_size[0]).reshape(-1, 1) y = np.arange(grid_size[1]).reshape(1, -1) cx, cy = center # sphere location rpx = radius / pixel_size r = rpx**2 - (x - cx)**2 - (y - cy)**2 # distance z = np.zeros_like(r) rvalid = r > 0 z[rvalid] = 2 * np.sqrt(r[rvalid]) * pixel_size # phase = delta_n * 2PI * z / wavelength phase = (sphere_index - medium_index) * 2 * np.pi * z / wavelength meta_data = {"pixel size": pixel_size, "wavelength": wavelength, "medium index": medium_index, "sim center": center, "sim radius": radius, "sim index": sphere_index, "sim model": "projection", } qpi = qpimage.QPImage(data=phase, which_data="phase", meta_data=meta_data) return qpi
def function[projection, parameter[radius, sphere_index, medium_index, wavelength, pixel_size, grid_size, center]]: constant[Optical path difference projection of a dielectric sphere Parameters ---------- radius: float Radius of the sphere [m] sphere_index: float Refractive index of the sphere medium_index: float Refractive index of the surrounding medium wavelength: float Vacuum wavelength of the imaging light [m] pixel_size: float Pixel size [m] grid_size: tuple of floats Resulting image size in x and y [px] center: tuple of floats Center position in image coordinates [px] Returns ------- qpi: qpimage.QPImage Quantitative phase data set ] variable[x] assign[=] call[call[name[np].arange, parameter[call[name[grid_size]][constant[0]]]].reshape, parameter[<ast.UnaryOp object at 0x7da1b2582320>, constant[1]]] variable[y] assign[=] call[call[name[np].arange, parameter[call[name[grid_size]][constant[1]]]].reshape, parameter[constant[1], <ast.UnaryOp object at 0x7da1b25d7eb0>]] <ast.Tuple object at 0x7da1b25d62c0> assign[=] name[center] variable[rpx] assign[=] binary_operation[name[radius] / name[pixel_size]] variable[r] assign[=] binary_operation[binary_operation[binary_operation[name[rpx] ** constant[2]] - binary_operation[binary_operation[name[x] - name[cx]] ** constant[2]]] - binary_operation[binary_operation[name[y] - name[cy]] ** constant[2]]] variable[z] assign[=] call[name[np].zeros_like, parameter[name[r]]] variable[rvalid] assign[=] compare[name[r] greater[>] constant[0]] call[name[z]][name[rvalid]] assign[=] binary_operation[binary_operation[constant[2] * call[name[np].sqrt, parameter[call[name[r]][name[rvalid]]]]] * name[pixel_size]] variable[phase] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[sphere_index] - name[medium_index]] * constant[2]] * name[np].pi] * name[z]] / name[wavelength]] variable[meta_data] assign[=] dictionary[[<ast.Constant object at 0x7da1b2580cd0>, <ast.Constant object at 0x7da1b2581d20>, <ast.Constant object at 0x7da1b2580430>, <ast.Constant object at 0x7da1b2581930>, <ast.Constant object at 0x7da1b25838b0>, <ast.Constant object at 0x7da1b2582bc0>, <ast.Constant object at 0x7da1b2581180>], [<ast.Name object at 0x7da1b25838e0>, <ast.Name object at 0x7da1b2582f20>, <ast.Name object at 0x7da1b25826e0>, <ast.Name object at 0x7da1b2580d90>, <ast.Name object at 0x7da1b2583550>, <ast.Name object at 0x7da1b25821a0>, <ast.Constant object at 0x7da1b2582e30>]] variable[qpi] assign[=] call[name[qpimage].QPImage, parameter[]] return[name[qpi]]
keyword[def] identifier[projection] ( identifier[radius] = literal[int] , identifier[sphere_index] = literal[int] , identifier[medium_index] = literal[int] , identifier[wavelength] = literal[int] , identifier[pixel_size] = literal[int] , identifier[grid_size] =( literal[int] , literal[int] ), identifier[center] =( literal[int] , literal[int] )): literal[string] identifier[x] = identifier[np] . identifier[arange] ( identifier[grid_size] [ literal[int] ]). identifier[reshape] (- literal[int] , literal[int] ) identifier[y] = identifier[np] . identifier[arange] ( identifier[grid_size] [ literal[int] ]). identifier[reshape] ( literal[int] ,- literal[int] ) identifier[cx] , identifier[cy] = identifier[center] identifier[rpx] = identifier[radius] / identifier[pixel_size] identifier[r] = identifier[rpx] ** literal[int] -( identifier[x] - identifier[cx] )** literal[int] -( identifier[y] - identifier[cy] )** literal[int] identifier[z] = identifier[np] . identifier[zeros_like] ( identifier[r] ) identifier[rvalid] = identifier[r] > literal[int] identifier[z] [ identifier[rvalid] ]= literal[int] * identifier[np] . identifier[sqrt] ( identifier[r] [ identifier[rvalid] ])* identifier[pixel_size] identifier[phase] =( identifier[sphere_index] - identifier[medium_index] )* literal[int] * identifier[np] . identifier[pi] * identifier[z] / identifier[wavelength] identifier[meta_data] ={ literal[string] : identifier[pixel_size] , literal[string] : identifier[wavelength] , literal[string] : identifier[medium_index] , literal[string] : identifier[center] , literal[string] : identifier[radius] , literal[string] : identifier[sphere_index] , literal[string] : literal[string] , } identifier[qpi] = identifier[qpimage] . identifier[QPImage] ( identifier[data] = identifier[phase] , identifier[which_data] = literal[string] , identifier[meta_data] = identifier[meta_data] ) keyword[return] identifier[qpi]
def projection(radius=5e-06, sphere_index=1.339, medium_index=1.333, wavelength=5.5e-07, pixel_size=1e-07, grid_size=(80, 80), center=(39.5, 39.5)): """Optical path difference projection of a dielectric sphere Parameters ---------- radius: float Radius of the sphere [m] sphere_index: float Refractive index of the sphere medium_index: float Refractive index of the surrounding medium wavelength: float Vacuum wavelength of the imaging light [m] pixel_size: float Pixel size [m] grid_size: tuple of floats Resulting image size in x and y [px] center: tuple of floats Center position in image coordinates [px] Returns ------- qpi: qpimage.QPImage Quantitative phase data set """ # grid x = np.arange(grid_size[0]).reshape(-1, 1) y = np.arange(grid_size[1]).reshape(1, -1) (cx, cy) = center # sphere location rpx = radius / pixel_size r = rpx ** 2 - (x - cx) ** 2 - (y - cy) ** 2 # distance z = np.zeros_like(r) rvalid = r > 0 z[rvalid] = 2 * np.sqrt(r[rvalid]) * pixel_size # phase = delta_n * 2PI * z / wavelength phase = (sphere_index - medium_index) * 2 * np.pi * z / wavelength meta_data = {'pixel size': pixel_size, 'wavelength': wavelength, 'medium index': medium_index, 'sim center': center, 'sim radius': radius, 'sim index': sphere_index, 'sim model': 'projection'} qpi = qpimage.QPImage(data=phase, which_data='phase', meta_data=meta_data) return qpi
def chung_dense(T, MW, Tc, Vc, omega, Cvm, Vm, mu, dipole, association=0): r'''Estimates the thermal conductivity of a gas at high pressure as a function of temperature using the reference fluid method of Chung [1]_ as shown in [2]_. .. math:: \lambda = \frac{31.2 \eta^\circ \Psi}{M'}(G_2^{-1} + B_6 y)+qB_7y^2T_r^{1/2}G_2 \Psi = 1 + \alpha \left\{[0.215+0.28288\alpha-1.061\beta+0.26665Z]/ [0.6366+\beta Z + 1.061 \alpha \beta]\right\} \alpha = \frac{C_v}{R}-1.5 \beta = 0.7862-0.7109\omega + 1.3168\omega^2 Z=2+10.5T_r^2 q = 3.586\times 10^{-3} (T_c/M')^{1/2}/V_c^{2/3} y = \frac{V_c}{6V} G_1 = \frac{1-0.5y}{(1-y)^3} G_2 = \frac{(B_1/y)[1-\exp(-B_4y)]+ B_2G_1\exp(B_5y) + B_3G_1} {B_1B_4 + B_2 + B_3} B_i = a_i + b_i \omega + c_i \mu_r^4 + d_i \kappa Parameters ---------- T : float Temperature of the gas [K] MW : float Molecular weight of the gas [g/mol] Tc : float Critical temperature of the gas [K] Vc : float Critical volume of the gas [m^3/mol] omega : float Acentric factor of the gas [-] Cvm : float Molar contant volume heat capacity of the gas [J/mol/K] Vm : float Molar volume of the gas at T and P [m^3/mol] mu : float Low-pressure gas viscosity [Pa*S] dipole : float Dipole moment [debye] association : float, optional Association factor [-] Returns ------- kg : float Estimated dense gas thermal conductivity [W/m/k] Notes ----- MW internally converted to kg/g-mol. Vm internally converted to mL/mol. [1]_ is not the latest form as presented in [1]_. Association factor is assumed 0. Relates to the polarity of the gas. Coefficients as follows: ais = [2.4166E+0, -5.0924E-1, 6.6107E+0, 1.4543E+1, 7.9274E-1, -5.8634E+0, 9.1089E+1] bis = [7.4824E-1, -1.5094E+0, 5.6207E+0, -8.9139E+0, 8.2019E-1, 1.2801E+1, 1.2811E+2] cis = [-9.1858E-1, -4.9991E+1, 6.4760E+1, -5.6379E+0, -6.9369E-1, 9.5893E+0, -5.4217E+1] dis = [1.2172E+2, 6.9983E+1, 2.7039E+1, 7.4344E+1, 6.3173E+0, 6.5529E+1, 5.2381E+2] Examples -------- >>> chung_dense(T=473., MW=42.081, Tc=364.9, Vc=184.6E-6, omega=0.142, ... Cvm=82.67, Vm=172.1E-6, mu=134E-7, dipole=0.4) 0.06160570379787278 References ---------- .. [1] Chung, Ting Horng, Mohammad Ajlan, Lloyd L. Lee, and Kenneth E. Starling. "Generalized Multiparameter Correlation for Nonpolar and Polar Fluid Transport Properties." Industrial & Engineering Chemistry Research 27, no. 4 (April 1, 1988): 671-79. doi:10.1021/ie00076a024. .. [2] Poling, Bruce E. The Properties of Gases and Liquids. 5th edition. New York: McGraw-Hill Professional, 2000. ''' ais = [2.4166E+0, -5.0924E-1, 6.6107E+0, 1.4543E+1, 7.9274E-1, -5.8634E+0, 9.1089E+1] bis = [7.4824E-1, -1.5094E+0, 5.6207E+0, -8.9139E+0, 8.2019E-1, 1.2801E+1, 1.2811E+2] cis = [-9.1858E-1, -4.9991E+1, 6.4760E+1, -5.6379E+0, -6.9369E-1, 9.5893E+0, -5.4217E+1] dis = [1.2172E+2, 6.9983E+1, 2.7039E+1, 7.4344E+1, 6.3173E+0, 6.5529E+1, 5.2381E+2] Tr = T/Tc mur = 131.3*dipole/(Vc*1E6*Tc)**0.5 # From Chung Method alpha = Cvm/R - 1.5 beta = 0.7862 - 0.7109*omega + 1.3168*omega**2 Z = 2 + 10.5*(T/Tc)**2 psi = 1 + alpha*((0.215 + 0.28288*alpha - 1.061*beta + 0.26665*Z)/(0.6366 + beta*Z + 1.061*alpha*beta)) y = Vc/(6*Vm) B1, B2, B3, B4, B5, B6, B7 = [ais[i] + bis[i]*omega + cis[i]*mur**4 + dis[i]*association for i in range(7)] G1 = (1 - 0.5*y)/(1. - y)**3 G2 = (B1/y*(1 - exp(-B4*y)) + B2*G1*exp(B5*y) + B3*G1)/(B1*B4 + B2 + B3) q = 3.586E-3*(Tc/(MW/1000.))**0.5/(Vc*1E6)**(2/3.) return 31.2*mu*psi/(MW/1000.)*(G2**-1 + B6*y) + q*B7*y**2*Tr**0.5*G2
def function[chung_dense, parameter[T, MW, Tc, Vc, omega, Cvm, Vm, mu, dipole, association]]: constant[Estimates the thermal conductivity of a gas at high pressure as a function of temperature using the reference fluid method of Chung [1]_ as shown in [2]_. .. math:: \lambda = \frac{31.2 \eta^\circ \Psi}{M'}(G_2^{-1} + B_6 y)+qB_7y^2T_r^{1/2}G_2 \Psi = 1 + \alpha \left\{[0.215+0.28288\alpha-1.061\beta+0.26665Z]/ [0.6366+\beta Z + 1.061 \alpha \beta]\right\} \alpha = \frac{C_v}{R}-1.5 \beta = 0.7862-0.7109\omega + 1.3168\omega^2 Z=2+10.5T_r^2 q = 3.586\times 10^{-3} (T_c/M')^{1/2}/V_c^{2/3} y = \frac{V_c}{6V} G_1 = \frac{1-0.5y}{(1-y)^3} G_2 = \frac{(B_1/y)[1-\exp(-B_4y)]+ B_2G_1\exp(B_5y) + B_3G_1} {B_1B_4 + B_2 + B_3} B_i = a_i + b_i \omega + c_i \mu_r^4 + d_i \kappa Parameters ---------- T : float Temperature of the gas [K] MW : float Molecular weight of the gas [g/mol] Tc : float Critical temperature of the gas [K] Vc : float Critical volume of the gas [m^3/mol] omega : float Acentric factor of the gas [-] Cvm : float Molar contant volume heat capacity of the gas [J/mol/K] Vm : float Molar volume of the gas at T and P [m^3/mol] mu : float Low-pressure gas viscosity [Pa*S] dipole : float Dipole moment [debye] association : float, optional Association factor [-] Returns ------- kg : float Estimated dense gas thermal conductivity [W/m/k] Notes ----- MW internally converted to kg/g-mol. Vm internally converted to mL/mol. [1]_ is not the latest form as presented in [1]_. Association factor is assumed 0. Relates to the polarity of the gas. Coefficients as follows: ais = [2.4166E+0, -5.0924E-1, 6.6107E+0, 1.4543E+1, 7.9274E-1, -5.8634E+0, 9.1089E+1] bis = [7.4824E-1, -1.5094E+0, 5.6207E+0, -8.9139E+0, 8.2019E-1, 1.2801E+1, 1.2811E+2] cis = [-9.1858E-1, -4.9991E+1, 6.4760E+1, -5.6379E+0, -6.9369E-1, 9.5893E+0, -5.4217E+1] dis = [1.2172E+2, 6.9983E+1, 2.7039E+1, 7.4344E+1, 6.3173E+0, 6.5529E+1, 5.2381E+2] Examples -------- >>> chung_dense(T=473., MW=42.081, Tc=364.9, Vc=184.6E-6, omega=0.142, ... Cvm=82.67, Vm=172.1E-6, mu=134E-7, dipole=0.4) 0.06160570379787278 References ---------- .. [1] Chung, Ting Horng, Mohammad Ajlan, Lloyd L. Lee, and Kenneth E. Starling. "Generalized Multiparameter Correlation for Nonpolar and Polar Fluid Transport Properties." Industrial & Engineering Chemistry Research 27, no. 4 (April 1, 1988): 671-79. doi:10.1021/ie00076a024. .. [2] Poling, Bruce E. The Properties of Gases and Liquids. 5th edition. New York: McGraw-Hill Professional, 2000. ] variable[ais] assign[=] list[[<ast.Constant object at 0x7da20e9612a0>, <ast.UnaryOp object at 0x7da20e962fe0>, <ast.Constant object at 0x7da20e961750>, <ast.Constant object at 0x7da20e9620e0>, <ast.Constant object at 0x7da20e962ce0>, <ast.UnaryOp object at 0x7da20e963df0>, <ast.Constant object at 0x7da20e961840>]] variable[bis] assign[=] list[[<ast.Constant object at 0x7da20e963d60>, <ast.UnaryOp object at 0x7da20e963ac0>, <ast.Constant object at 0x7da20e9604c0>, <ast.UnaryOp object at 0x7da20e9600a0>, <ast.Constant object at 0x7da20e963700>, <ast.Constant object at 0x7da20e960c40>, <ast.Constant object at 0x7da20e963760>]] variable[cis] assign[=] list[[<ast.UnaryOp object at 0x7da20e960fd0>, <ast.UnaryOp object at 0x7da20e962d70>, <ast.Constant object at 0x7da20e960c70>, <ast.UnaryOp object at 0x7da20e9626e0>, <ast.UnaryOp object at 0x7da20e962680>, <ast.Constant object at 0x7da20e962470>, <ast.UnaryOp object at 0x7da20e962770>]] variable[dis] assign[=] list[[<ast.Constant object at 0x7da20e960d00>, <ast.Constant object at 0x7da20e9629b0>, <ast.Constant object at 0x7da20e962860>, <ast.Constant object at 0x7da20e962980>, <ast.Constant object at 0x7da20e961660>, <ast.Constant object at 0x7da20e9609d0>, <ast.Constant object at 0x7da20e960100>]] variable[Tr] assign[=] binary_operation[name[T] / name[Tc]] variable[mur] assign[=] binary_operation[binary_operation[constant[131.3] * name[dipole]] / binary_operation[binary_operation[binary_operation[name[Vc] * constant[1000000.0]] * name[Tc]] ** constant[0.5]]] variable[alpha] assign[=] binary_operation[binary_operation[name[Cvm] / name[R]] - constant[1.5]] variable[beta] assign[=] binary_operation[binary_operation[constant[0.7862] - binary_operation[constant[0.7109] * name[omega]]] + binary_operation[constant[1.3168] * binary_operation[name[omega] ** constant[2]]]] variable[Z] assign[=] binary_operation[constant[2] + binary_operation[constant[10.5] * binary_operation[binary_operation[name[T] / name[Tc]] ** constant[2]]]] variable[psi] assign[=] binary_operation[constant[1] + binary_operation[name[alpha] * binary_operation[binary_operation[binary_operation[binary_operation[constant[0.215] + binary_operation[constant[0.28288] * name[alpha]]] - binary_operation[constant[1.061] * name[beta]]] + binary_operation[constant[0.26665] * name[Z]]] / binary_operation[binary_operation[constant[0.6366] + binary_operation[name[beta] * name[Z]]] + binary_operation[binary_operation[constant[1.061] * name[alpha]] * name[beta]]]]]] variable[y] assign[=] binary_operation[name[Vc] / binary_operation[constant[6] * name[Vm]]] <ast.Tuple object at 0x7da1b021c610> assign[=] <ast.ListComp object at 0x7da1b021f790> variable[G1] assign[=] binary_operation[binary_operation[constant[1] - binary_operation[constant[0.5] * name[y]]] / binary_operation[binary_operation[constant[1.0] - name[y]] ** constant[3]]] variable[G2] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[B1] / name[y]] * binary_operation[constant[1] - call[name[exp], parameter[binary_operation[<ast.UnaryOp object at 0x7da1b021f970> * name[y]]]]]] + binary_operation[binary_operation[name[B2] * name[G1]] * call[name[exp], parameter[binary_operation[name[B5] * name[y]]]]]] + binary_operation[name[B3] * name[G1]]] / binary_operation[binary_operation[binary_operation[name[B1] * name[B4]] + name[B2]] + name[B3]]] variable[q] assign[=] binary_operation[binary_operation[constant[0.003586] * binary_operation[binary_operation[name[Tc] / binary_operation[name[MW] / constant[1000.0]]] ** constant[0.5]]] / binary_operation[binary_operation[name[Vc] * constant[1000000.0]] ** binary_operation[constant[2] / constant[3.0]]]] return[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[constant[31.2] * name[mu]] * name[psi]] / binary_operation[name[MW] / constant[1000.0]]] * binary_operation[binary_operation[name[G2] ** <ast.UnaryOp object at 0x7da20c992c50>] + binary_operation[name[B6] * name[y]]]] + binary_operation[binary_operation[binary_operation[binary_operation[name[q] * name[B7]] * binary_operation[name[y] ** constant[2]]] * binary_operation[name[Tr] ** constant[0.5]]] * name[G2]]]]
keyword[def] identifier[chung_dense] ( identifier[T] , identifier[MW] , identifier[Tc] , identifier[Vc] , identifier[omega] , identifier[Cvm] , identifier[Vm] , identifier[mu] , identifier[dipole] , identifier[association] = literal[int] ): literal[string] identifier[ais] =[ literal[int] ,- literal[int] , literal[int] , literal[int] , literal[int] ,- literal[int] , literal[int] ] identifier[bis] =[ literal[int] ,- literal[int] , literal[int] ,- literal[int] , literal[int] , literal[int] , literal[int] ] identifier[cis] =[- literal[int] ,- literal[int] , literal[int] ,- literal[int] ,- literal[int] , literal[int] ,- literal[int] ] identifier[dis] =[ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ] identifier[Tr] = identifier[T] / identifier[Tc] identifier[mur] = literal[int] * identifier[dipole] /( identifier[Vc] * literal[int] * identifier[Tc] )** literal[int] identifier[alpha] = identifier[Cvm] / identifier[R] - literal[int] identifier[beta] = literal[int] - literal[int] * identifier[omega] + literal[int] * identifier[omega] ** literal[int] identifier[Z] = literal[int] + literal[int] *( identifier[T] / identifier[Tc] )** literal[int] identifier[psi] = literal[int] + identifier[alpha] *(( literal[int] + literal[int] * identifier[alpha] - literal[int] * identifier[beta] + literal[int] * identifier[Z] )/( literal[int] + identifier[beta] * identifier[Z] + literal[int] * identifier[alpha] * identifier[beta] )) identifier[y] = identifier[Vc] /( literal[int] * identifier[Vm] ) identifier[B1] , identifier[B2] , identifier[B3] , identifier[B4] , identifier[B5] , identifier[B6] , identifier[B7] =[ identifier[ais] [ identifier[i] ]+ identifier[bis] [ identifier[i] ]* identifier[omega] + identifier[cis] [ identifier[i] ]* identifier[mur] ** literal[int] + identifier[dis] [ identifier[i] ]* identifier[association] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] )] identifier[G1] =( literal[int] - literal[int] * identifier[y] )/( literal[int] - identifier[y] )** literal[int] identifier[G2] =( identifier[B1] / identifier[y] *( literal[int] - identifier[exp] (- identifier[B4] * identifier[y] ))+ identifier[B2] * identifier[G1] * identifier[exp] ( identifier[B5] * identifier[y] )+ identifier[B3] * identifier[G1] )/( identifier[B1] * identifier[B4] + identifier[B2] + identifier[B3] ) identifier[q] = literal[int] *( identifier[Tc] /( identifier[MW] / literal[int] ))** literal[int] /( identifier[Vc] * literal[int] )**( literal[int] / literal[int] ) keyword[return] literal[int] * identifier[mu] * identifier[psi] /( identifier[MW] / literal[int] )*( identifier[G2] **- literal[int] + identifier[B6] * identifier[y] )+ identifier[q] * identifier[B7] * identifier[y] ** literal[int] * identifier[Tr] ** literal[int] * identifier[G2]
def chung_dense(T, MW, Tc, Vc, omega, Cvm, Vm, mu, dipole, association=0): """Estimates the thermal conductivity of a gas at high pressure as a function of temperature using the reference fluid method of Chung [1]_ as shown in [2]_. .. math:: \\lambda = \\frac{31.2 \\eta^\\circ \\Psi}{M'}(G_2^{-1} + B_6 y)+qB_7y^2T_r^{1/2}G_2 \\Psi = 1 + \\alpha \\left\\{[0.215+0.28288\\alpha-1.061\\beta+0.26665Z]/ [0.6366+\\beta Z + 1.061 \\alpha \\beta]\\right\\} \\alpha = \\frac{C_v}{R}-1.5 \\beta = 0.7862-0.7109\\omega + 1.3168\\omega^2 Z=2+10.5T_r^2 q = 3.586\\times 10^{-3} (T_c/M')^{1/2}/V_c^{2/3} y = \\frac{V_c}{6V} G_1 = \\frac{1-0.5y}{(1-y)^3} G_2 = \\frac{(B_1/y)[1-\\exp(-B_4y)]+ B_2G_1\\exp(B_5y) + B_3G_1} {B_1B_4 + B_2 + B_3} B_i = a_i + b_i \\omega + c_i \\mu_r^4 + d_i \\kappa Parameters ---------- T : float Temperature of the gas [K] MW : float Molecular weight of the gas [g/mol] Tc : float Critical temperature of the gas [K] Vc : float Critical volume of the gas [m^3/mol] omega : float Acentric factor of the gas [-] Cvm : float Molar contant volume heat capacity of the gas [J/mol/K] Vm : float Molar volume of the gas at T and P [m^3/mol] mu : float Low-pressure gas viscosity [Pa*S] dipole : float Dipole moment [debye] association : float, optional Association factor [-] Returns ------- kg : float Estimated dense gas thermal conductivity [W/m/k] Notes ----- MW internally converted to kg/g-mol. Vm internally converted to mL/mol. [1]_ is not the latest form as presented in [1]_. Association factor is assumed 0. Relates to the polarity of the gas. Coefficients as follows: ais = [2.4166E+0, -5.0924E-1, 6.6107E+0, 1.4543E+1, 7.9274E-1, -5.8634E+0, 9.1089E+1] bis = [7.4824E-1, -1.5094E+0, 5.6207E+0, -8.9139E+0, 8.2019E-1, 1.2801E+1, 1.2811E+2] cis = [-9.1858E-1, -4.9991E+1, 6.4760E+1, -5.6379E+0, -6.9369E-1, 9.5893E+0, -5.4217E+1] dis = [1.2172E+2, 6.9983E+1, 2.7039E+1, 7.4344E+1, 6.3173E+0, 6.5529E+1, 5.2381E+2] Examples -------- >>> chung_dense(T=473., MW=42.081, Tc=364.9, Vc=184.6E-6, omega=0.142, ... Cvm=82.67, Vm=172.1E-6, mu=134E-7, dipole=0.4) 0.06160570379787278 References ---------- .. [1] Chung, Ting Horng, Mohammad Ajlan, Lloyd L. Lee, and Kenneth E. Starling. "Generalized Multiparameter Correlation for Nonpolar and Polar Fluid Transport Properties." Industrial & Engineering Chemistry Research 27, no. 4 (April 1, 1988): 671-79. doi:10.1021/ie00076a024. .. [2] Poling, Bruce E. The Properties of Gases and Liquids. 5th edition. New York: McGraw-Hill Professional, 2000. """ ais = [2.4166, -0.50924, 6.6107, 14.543, 0.79274, -5.8634, 91.089] bis = [0.74824, -1.5094, 5.6207, -8.9139, 0.82019, 12.801, 128.11] cis = [-0.91858, -49.991, 64.76, -5.6379, -0.69369, 9.5893, -54.217] dis = [121.72, 69.983, 27.039, 74.344, 6.3173, 65.529, 523.81] Tr = T / Tc mur = 131.3 * dipole / (Vc * 1000000.0 * Tc) ** 0.5 # From Chung Method alpha = Cvm / R - 1.5 beta = 0.7862 - 0.7109 * omega + 1.3168 * omega ** 2 Z = 2 + 10.5 * (T / Tc) ** 2 psi = 1 + alpha * ((0.215 + 0.28288 * alpha - 1.061 * beta + 0.26665 * Z) / (0.6366 + beta * Z + 1.061 * alpha * beta)) y = Vc / (6 * Vm) (B1, B2, B3, B4, B5, B6, B7) = [ais[i] + bis[i] * omega + cis[i] * mur ** 4 + dis[i] * association for i in range(7)] G1 = (1 - 0.5 * y) / (1.0 - y) ** 3 G2 = (B1 / y * (1 - exp(-B4 * y)) + B2 * G1 * exp(B5 * y) + B3 * G1) / (B1 * B4 + B2 + B3) q = 0.003586 * (Tc / (MW / 1000.0)) ** 0.5 / (Vc * 1000000.0) ** (2 / 3.0) return 31.2 * mu * psi / (MW / 1000.0) * (G2 ** (-1) + B6 * y) + q * B7 * y ** 2 * Tr ** 0.5 * G2
def days_and_sids_for_frames(frames): """ Returns the date index and sid columns shared by a list of dataframes, ensuring they all match. Parameters ---------- frames : list[pd.DataFrame] A list of dataframes indexed by day, with a column per sid. Returns ------- days : np.array[datetime64[ns]] The days in these dataframes. sids : np.array[int64] The sids in these dataframes. Raises ------ ValueError If the dataframes passed are not all indexed by the same days and sids. """ if not frames: days = np.array([], dtype='datetime64[ns]') sids = np.array([], dtype='int64') return days, sids # Ensure the indices and columns all match. check_indexes_all_same( [frame.index for frame in frames], message='Frames have mistmatched days.', ) check_indexes_all_same( [frame.columns for frame in frames], message='Frames have mismatched sids.', ) return frames[0].index.values, frames[0].columns.values
def function[days_and_sids_for_frames, parameter[frames]]: constant[ Returns the date index and sid columns shared by a list of dataframes, ensuring they all match. Parameters ---------- frames : list[pd.DataFrame] A list of dataframes indexed by day, with a column per sid. Returns ------- days : np.array[datetime64[ns]] The days in these dataframes. sids : np.array[int64] The sids in these dataframes. Raises ------ ValueError If the dataframes passed are not all indexed by the same days and sids. ] if <ast.UnaryOp object at 0x7da1b2045d50> begin[:] variable[days] assign[=] call[name[np].array, parameter[list[[]]]] variable[sids] assign[=] call[name[np].array, parameter[list[[]]]] return[tuple[[<ast.Name object at 0x7da1b2064100>, <ast.Name object at 0x7da1b2067280>]]] call[name[check_indexes_all_same], parameter[<ast.ListComp object at 0x7da1b2067940>]] call[name[check_indexes_all_same], parameter[<ast.ListComp object at 0x7da1b2066740>]] return[tuple[[<ast.Attribute object at 0x7da1b2066350>, <ast.Attribute object at 0x7da1b20648b0>]]]
keyword[def] identifier[days_and_sids_for_frames] ( identifier[frames] ): literal[string] keyword[if] keyword[not] identifier[frames] : identifier[days] = identifier[np] . identifier[array] ([], identifier[dtype] = literal[string] ) identifier[sids] = identifier[np] . identifier[array] ([], identifier[dtype] = literal[string] ) keyword[return] identifier[days] , identifier[sids] identifier[check_indexes_all_same] ( [ identifier[frame] . identifier[index] keyword[for] identifier[frame] keyword[in] identifier[frames] ], identifier[message] = literal[string] , ) identifier[check_indexes_all_same] ( [ identifier[frame] . identifier[columns] keyword[for] identifier[frame] keyword[in] identifier[frames] ], identifier[message] = literal[string] , ) keyword[return] identifier[frames] [ literal[int] ]. identifier[index] . identifier[values] , identifier[frames] [ literal[int] ]. identifier[columns] . identifier[values]
def days_and_sids_for_frames(frames): """ Returns the date index and sid columns shared by a list of dataframes, ensuring they all match. Parameters ---------- frames : list[pd.DataFrame] A list of dataframes indexed by day, with a column per sid. Returns ------- days : np.array[datetime64[ns]] The days in these dataframes. sids : np.array[int64] The sids in these dataframes. Raises ------ ValueError If the dataframes passed are not all indexed by the same days and sids. """ if not frames: days = np.array([], dtype='datetime64[ns]') sids = np.array([], dtype='int64') return (days, sids) # depends on [control=['if'], data=[]] # Ensure the indices and columns all match. check_indexes_all_same([frame.index for frame in frames], message='Frames have mistmatched days.') check_indexes_all_same([frame.columns for frame in frames], message='Frames have mismatched sids.') return (frames[0].index.values, frames[0].columns.values)
def _extract_pynn_components_to_neuroml(nl_model, nml_doc=None): """ Parse the NeuroMLlite description for cell, synapses and inputs described as PyNN elements (e.g. IF_cond_alpha, DCSource) and parameters, and convert these to the equivalent elements in a NeuroMLDocument """ if nml_doc == None: from neuroml import NeuroMLDocument nml_doc = NeuroMLDocument(id="temp") for c in nl_model.cells: if c.pynn_cell: if nml_doc.get_by_id(c.id) == None: import pyNN.neuroml cell_params = c.parameters if c.parameters else {} #print('------- %s: %s' % (c, cell_params)) for p in cell_params: cell_params[p] = evaluate(cell_params[p], nl_model.parameters) #print('====== %s: %s' % (c, cell_params)) for proj in nl_model.projections: synapse = nl_model.get_child(proj.synapse, 'synapses') post_pop = nl_model.get_child(proj.postsynaptic, 'populations') if post_pop.component == c.id: #print("--------- Cell %s in post pop %s of %s uses %s"%(c.id,post_pop.id, proj.id, synapse)) if synapse.pynn_receptor_type == 'excitatory': post = '_E' elif synapse.pynn_receptor_type == 'inhibitory': post = '_I' for p in synapse.parameters: cell_params['%s%s' % (p, post)] = synapse.parameters[p] temp_cell = eval('pyNN.neuroml.%s(**cell_params)' % c.pynn_cell) if c.pynn_cell != 'SpikeSourcePoisson': temp_cell.default_initial_values['v'] = temp_cell.parameter_space['v_rest'].base_value cell_id = temp_cell.add_to_nml_doc(nml_doc, None) cell = nml_doc.get_by_id(cell_id) cell.id = c.id for s in nl_model.synapses: if nml_doc.get_by_id(s.id) == None: if s.pynn_synapse_type and s.pynn_receptor_type: import neuroml if s.pynn_synapse_type == 'cond_exp': syn = neuroml.ExpCondSynapse(id=s.id, tau_syn=s.parameters['tau_syn'], e_rev=s.parameters['e_rev']) nml_doc.exp_cond_synapses.append(syn) elif s.pynn_synapse_type == 'cond_alpha': syn = neuroml.AlphaCondSynapse(id=s.id, tau_syn=s.parameters['tau_syn'], e_rev=s.parameters['e_rev']) nml_doc.alpha_cond_synapses.append(syn) elif s.pynn_synapse_type == 'curr_exp': syn = neuroml.ExpCurrSynapse(id=s.id, tau_syn=s.parameters['tau_syn']) nml_doc.exp_curr_synapses.append(syn) elif s.pynn_synapse_type == 'curr_alpha': syn = neuroml.AlphaCurrSynapse(id=s.id, tau_syn=s.parameters['tau_syn']) nml_doc.alpha_curr_synapses.append(syn) for i in nl_model.input_sources: #if nml_doc.get_by_id(i.id) == None: if i.pynn_input: import pyNN.neuroml input_params = i.parameters if i.parameters else {} exec('input__%s = pyNN.neuroml.%s(**input_params)' % (i.id, i.pynn_input)) exec('temp_input = input__%s' % i.id) pg_id = temp_input.add_to_nml_doc(nml_doc, None) #for pp in nml_doc.pulse_generators: # print('PG: %s: %s'%(pp,pp.id)) pg = nml_doc.get_by_id(pg_id) pg.id = i.id return nml_doc
def function[_extract_pynn_components_to_neuroml, parameter[nl_model, nml_doc]]: constant[ Parse the NeuroMLlite description for cell, synapses and inputs described as PyNN elements (e.g. IF_cond_alpha, DCSource) and parameters, and convert these to the equivalent elements in a NeuroMLDocument ] if compare[name[nml_doc] equal[==] constant[None]] begin[:] from relative_module[neuroml] import module[NeuroMLDocument] variable[nml_doc] assign[=] call[name[NeuroMLDocument], parameter[]] for taget[name[c]] in starred[name[nl_model].cells] begin[:] if name[c].pynn_cell begin[:] if compare[call[name[nml_doc].get_by_id, parameter[name[c].id]] equal[==] constant[None]] begin[:] import module[pyNN.neuroml] variable[cell_params] assign[=] <ast.IfExp object at 0x7da1b18548e0> for taget[name[p]] in starred[name[cell_params]] begin[:] call[name[cell_params]][name[p]] assign[=] call[name[evaluate], parameter[call[name[cell_params]][name[p]], name[nl_model].parameters]] for taget[name[proj]] in starred[name[nl_model].projections] begin[:] variable[synapse] assign[=] call[name[nl_model].get_child, parameter[name[proj].synapse, constant[synapses]]] variable[post_pop] assign[=] call[name[nl_model].get_child, parameter[name[proj].postsynaptic, constant[populations]]] if compare[name[post_pop].component equal[==] name[c].id] begin[:] if compare[name[synapse].pynn_receptor_type equal[==] constant[excitatory]] begin[:] variable[post] assign[=] constant[_E] for taget[name[p]] in starred[name[synapse].parameters] begin[:] call[name[cell_params]][binary_operation[constant[%s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b18555a0>, <ast.Name object at 0x7da1b18555d0>]]]] assign[=] call[name[synapse].parameters][name[p]] variable[temp_cell] assign[=] call[name[eval], parameter[binary_operation[constant[pyNN.neuroml.%s(**cell_params)] <ast.Mod object at 0x7da2590d6920> name[c].pynn_cell]]] if compare[name[c].pynn_cell not_equal[!=] constant[SpikeSourcePoisson]] begin[:] call[name[temp_cell].default_initial_values][constant[v]] assign[=] call[name[temp_cell].parameter_space][constant[v_rest]].base_value variable[cell_id] assign[=] call[name[temp_cell].add_to_nml_doc, parameter[name[nml_doc], constant[None]]] variable[cell] assign[=] call[name[nml_doc].get_by_id, parameter[name[cell_id]]] name[cell].id assign[=] name[c].id for taget[name[s]] in starred[name[nl_model].synapses] begin[:] if compare[call[name[nml_doc].get_by_id, parameter[name[s].id]] equal[==] constant[None]] begin[:] if <ast.BoolOp object at 0x7da1b18560e0> begin[:] import module[neuroml] if compare[name[s].pynn_synapse_type equal[==] constant[cond_exp]] begin[:] variable[syn] assign[=] call[name[neuroml].ExpCondSynapse, parameter[]] call[name[nml_doc].exp_cond_synapses.append, parameter[name[syn]]] for taget[name[i]] in starred[name[nl_model].input_sources] begin[:] if name[i].pynn_input begin[:] import module[pyNN.neuroml] variable[input_params] assign[=] <ast.IfExp object at 0x7da1b180e290> call[name[exec], parameter[binary_operation[constant[input__%s = pyNN.neuroml.%s(**input_params)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b180ea40>, <ast.Attribute object at 0x7da1b180f280>]]]]] call[name[exec], parameter[binary_operation[constant[temp_input = input__%s] <ast.Mod object at 0x7da2590d6920> name[i].id]]] variable[pg_id] assign[=] call[name[temp_input].add_to_nml_doc, parameter[name[nml_doc], constant[None]]] variable[pg] assign[=] call[name[nml_doc].get_by_id, parameter[name[pg_id]]] name[pg].id assign[=] name[i].id return[name[nml_doc]]
keyword[def] identifier[_extract_pynn_components_to_neuroml] ( identifier[nl_model] , identifier[nml_doc] = keyword[None] ): literal[string] keyword[if] identifier[nml_doc] == keyword[None] : keyword[from] identifier[neuroml] keyword[import] identifier[NeuroMLDocument] identifier[nml_doc] = identifier[NeuroMLDocument] ( identifier[id] = literal[string] ) keyword[for] identifier[c] keyword[in] identifier[nl_model] . identifier[cells] : keyword[if] identifier[c] . identifier[pynn_cell] : keyword[if] identifier[nml_doc] . identifier[get_by_id] ( identifier[c] . identifier[id] )== keyword[None] : keyword[import] identifier[pyNN] . identifier[neuroml] identifier[cell_params] = identifier[c] . identifier[parameters] keyword[if] identifier[c] . identifier[parameters] keyword[else] {} keyword[for] identifier[p] keyword[in] identifier[cell_params] : identifier[cell_params] [ identifier[p] ]= identifier[evaluate] ( identifier[cell_params] [ identifier[p] ], identifier[nl_model] . identifier[parameters] ) keyword[for] identifier[proj] keyword[in] identifier[nl_model] . identifier[projections] : identifier[synapse] = identifier[nl_model] . identifier[get_child] ( identifier[proj] . identifier[synapse] , literal[string] ) identifier[post_pop] = identifier[nl_model] . identifier[get_child] ( identifier[proj] . identifier[postsynaptic] , literal[string] ) keyword[if] identifier[post_pop] . identifier[component] == identifier[c] . identifier[id] : keyword[if] identifier[synapse] . identifier[pynn_receptor_type] == literal[string] : identifier[post] = literal[string] keyword[elif] identifier[synapse] . identifier[pynn_receptor_type] == literal[string] : identifier[post] = literal[string] keyword[for] identifier[p] keyword[in] identifier[synapse] . identifier[parameters] : identifier[cell_params] [ literal[string] %( identifier[p] , identifier[post] )]= identifier[synapse] . identifier[parameters] [ identifier[p] ] identifier[temp_cell] = identifier[eval] ( literal[string] % identifier[c] . identifier[pynn_cell] ) keyword[if] identifier[c] . identifier[pynn_cell] != literal[string] : identifier[temp_cell] . identifier[default_initial_values] [ literal[string] ]= identifier[temp_cell] . identifier[parameter_space] [ literal[string] ]. identifier[base_value] identifier[cell_id] = identifier[temp_cell] . identifier[add_to_nml_doc] ( identifier[nml_doc] , keyword[None] ) identifier[cell] = identifier[nml_doc] . identifier[get_by_id] ( identifier[cell_id] ) identifier[cell] . identifier[id] = identifier[c] . identifier[id] keyword[for] identifier[s] keyword[in] identifier[nl_model] . identifier[synapses] : keyword[if] identifier[nml_doc] . identifier[get_by_id] ( identifier[s] . identifier[id] )== keyword[None] : keyword[if] identifier[s] . identifier[pynn_synapse_type] keyword[and] identifier[s] . identifier[pynn_receptor_type] : keyword[import] identifier[neuroml] keyword[if] identifier[s] . identifier[pynn_synapse_type] == literal[string] : identifier[syn] = identifier[neuroml] . identifier[ExpCondSynapse] ( identifier[id] = identifier[s] . identifier[id] , identifier[tau_syn] = identifier[s] . identifier[parameters] [ literal[string] ], identifier[e_rev] = identifier[s] . identifier[parameters] [ literal[string] ]) identifier[nml_doc] . identifier[exp_cond_synapses] . identifier[append] ( identifier[syn] ) keyword[elif] identifier[s] . identifier[pynn_synapse_type] == literal[string] : identifier[syn] = identifier[neuroml] . identifier[AlphaCondSynapse] ( identifier[id] = identifier[s] . identifier[id] , identifier[tau_syn] = identifier[s] . identifier[parameters] [ literal[string] ], identifier[e_rev] = identifier[s] . identifier[parameters] [ literal[string] ]) identifier[nml_doc] . identifier[alpha_cond_synapses] . identifier[append] ( identifier[syn] ) keyword[elif] identifier[s] . identifier[pynn_synapse_type] == literal[string] : identifier[syn] = identifier[neuroml] . identifier[ExpCurrSynapse] ( identifier[id] = identifier[s] . identifier[id] , identifier[tau_syn] = identifier[s] . identifier[parameters] [ literal[string] ]) identifier[nml_doc] . identifier[exp_curr_synapses] . identifier[append] ( identifier[syn] ) keyword[elif] identifier[s] . identifier[pynn_synapse_type] == literal[string] : identifier[syn] = identifier[neuroml] . identifier[AlphaCurrSynapse] ( identifier[id] = identifier[s] . identifier[id] , identifier[tau_syn] = identifier[s] . identifier[parameters] [ literal[string] ]) identifier[nml_doc] . identifier[alpha_curr_synapses] . identifier[append] ( identifier[syn] ) keyword[for] identifier[i] keyword[in] identifier[nl_model] . identifier[input_sources] : keyword[if] identifier[i] . identifier[pynn_input] : keyword[import] identifier[pyNN] . identifier[neuroml] identifier[input_params] = identifier[i] . identifier[parameters] keyword[if] identifier[i] . identifier[parameters] keyword[else] {} identifier[exec] ( literal[string] %( identifier[i] . identifier[id] , identifier[i] . identifier[pynn_input] )) identifier[exec] ( literal[string] % identifier[i] . identifier[id] ) identifier[pg_id] = identifier[temp_input] . identifier[add_to_nml_doc] ( identifier[nml_doc] , keyword[None] ) identifier[pg] = identifier[nml_doc] . identifier[get_by_id] ( identifier[pg_id] ) identifier[pg] . identifier[id] = identifier[i] . identifier[id] keyword[return] identifier[nml_doc]
def _extract_pynn_components_to_neuroml(nl_model, nml_doc=None): """ Parse the NeuroMLlite description for cell, synapses and inputs described as PyNN elements (e.g. IF_cond_alpha, DCSource) and parameters, and convert these to the equivalent elements in a NeuroMLDocument """ if nml_doc == None: from neuroml import NeuroMLDocument nml_doc = NeuroMLDocument(id='temp') # depends on [control=['if'], data=['nml_doc']] for c in nl_model.cells: if c.pynn_cell: if nml_doc.get_by_id(c.id) == None: import pyNN.neuroml cell_params = c.parameters if c.parameters else {} #print('------- %s: %s' % (c, cell_params)) for p in cell_params: cell_params[p] = evaluate(cell_params[p], nl_model.parameters) # depends on [control=['for'], data=['p']] #print('====== %s: %s' % (c, cell_params)) for proj in nl_model.projections: synapse = nl_model.get_child(proj.synapse, 'synapses') post_pop = nl_model.get_child(proj.postsynaptic, 'populations') if post_pop.component == c.id: #print("--------- Cell %s in post pop %s of %s uses %s"%(c.id,post_pop.id, proj.id, synapse)) if synapse.pynn_receptor_type == 'excitatory': post = '_E' # depends on [control=['if'], data=[]] elif synapse.pynn_receptor_type == 'inhibitory': post = '_I' # depends on [control=['if'], data=[]] for p in synapse.parameters: cell_params['%s%s' % (p, post)] = synapse.parameters[p] # depends on [control=['for'], data=['p']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['proj']] temp_cell = eval('pyNN.neuroml.%s(**cell_params)' % c.pynn_cell) if c.pynn_cell != 'SpikeSourcePoisson': temp_cell.default_initial_values['v'] = temp_cell.parameter_space['v_rest'].base_value # depends on [control=['if'], data=[]] cell_id = temp_cell.add_to_nml_doc(nml_doc, None) cell = nml_doc.get_by_id(cell_id) cell.id = c.id # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['c']] for s in nl_model.synapses: if nml_doc.get_by_id(s.id) == None: if s.pynn_synapse_type and s.pynn_receptor_type: import neuroml if s.pynn_synapse_type == 'cond_exp': syn = neuroml.ExpCondSynapse(id=s.id, tau_syn=s.parameters['tau_syn'], e_rev=s.parameters['e_rev']) nml_doc.exp_cond_synapses.append(syn) # depends on [control=['if'], data=[]] elif s.pynn_synapse_type == 'cond_alpha': syn = neuroml.AlphaCondSynapse(id=s.id, tau_syn=s.parameters['tau_syn'], e_rev=s.parameters['e_rev']) nml_doc.alpha_cond_synapses.append(syn) # depends on [control=['if'], data=[]] elif s.pynn_synapse_type == 'curr_exp': syn = neuroml.ExpCurrSynapse(id=s.id, tau_syn=s.parameters['tau_syn']) nml_doc.exp_curr_synapses.append(syn) # depends on [control=['if'], data=[]] elif s.pynn_synapse_type == 'curr_alpha': syn = neuroml.AlphaCurrSynapse(id=s.id, tau_syn=s.parameters['tau_syn']) nml_doc.alpha_curr_synapses.append(syn) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['s']] for i in nl_model.input_sources: #if nml_doc.get_by_id(i.id) == None: if i.pynn_input: import pyNN.neuroml input_params = i.parameters if i.parameters else {} exec('input__%s = pyNN.neuroml.%s(**input_params)' % (i.id, i.pynn_input)) exec('temp_input = input__%s' % i.id) pg_id = temp_input.add_to_nml_doc(nml_doc, None) #for pp in nml_doc.pulse_generators: # print('PG: %s: %s'%(pp,pp.id)) pg = nml_doc.get_by_id(pg_id) pg.id = i.id # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] return nml_doc
def calc_arguments(args): ''' calc_arguments is a calculator that parses the command-line arguments for the registration command and produces the subject, the model, the log function, and the additional options. ''' (args, opts) = _retinotopy_parser(args) # We do some of the options right here... if opts['help']: print(info, file=sys.stdout) sys.exit(1) # and if we are verbose, lets setup a note function verbose = opts['verbose'] def note(s): if verbose: print(s, file=sys.stdout) sys.stdout.flush() return verbose def error(s): print(s, file=sys.stderr) sys.stderr.flush() sys.exit(1) if len(args) < 1: error('subject argument is required') # Add the subjects directory, if there is one if 'subjects_dir' in opts and opts['subjects_dir'] is not None: add_subject_path(opts['subjects_dir']) # Get the subject now try: sub = subject(args[0]) except Exception: error('Failed to load subject %s' % args[0]) # and the model if len(args) > 1: mdl_name = args[1] elif opts['model_sym']: mdl_name = 'schira' else: mdl_name = 'benson17' try: if opts['model_sym']: model = {h:retinotopy_model(mdl_name).persist() for h in ['lh', 'rh']} else: model = {h:retinotopy_model(mdl_name, hemi=h).persist() for h in ['lh', 'rh']} except Exception: error('Could not load retinotopy model %s' % mdl_name) # Now, we want to run a few filters on the options # Parse the simple numbers for o in ['weight_min', 'scale', 'max_step_size', 'max_out_eccen', 'max_in_eccen', 'min_in_eccen', 'field_sign_weight', 'radius_weight']: opts[o] = float(opts[o]) opts['max_steps'] = int(opts['max_steps']) # Make a note: note('Processing subject: %s' % sub.name) del opts['help'] del opts['verbose'] del opts['subjects_dir'] # That's all we need! return pimms.merge(opts, {'subject': sub.persist(), 'model': pyr.pmap(model), 'options': pyr.pmap(opts), 'note': note, 'error': error})
def function[calc_arguments, parameter[args]]: constant[ calc_arguments is a calculator that parses the command-line arguments for the registration command and produces the subject, the model, the log function, and the additional options. ] <ast.Tuple object at 0x7da1b0ebf4c0> assign[=] call[name[_retinotopy_parser], parameter[name[args]]] if call[name[opts]][constant[help]] begin[:] call[name[print], parameter[name[info]]] call[name[sys].exit, parameter[constant[1]]] variable[verbose] assign[=] call[name[opts]][constant[verbose]] def function[note, parameter[s]]: if name[verbose] begin[:] call[name[print], parameter[name[s]]] call[name[sys].stdout.flush, parameter[]] return[name[verbose]] def function[error, parameter[s]]: call[name[print], parameter[name[s]]] call[name[sys].stderr.flush, parameter[]] call[name[sys].exit, parameter[constant[1]]] if compare[call[name[len], parameter[name[args]]] less[<] constant[1]] begin[:] call[name[error], parameter[constant[subject argument is required]]] if <ast.BoolOp object at 0x7da20e9b27d0> begin[:] call[name[add_subject_path], parameter[call[name[opts]][constant[subjects_dir]]]] <ast.Try object at 0x7da20e9b12a0> if compare[call[name[len], parameter[name[args]]] greater[>] constant[1]] begin[:] variable[mdl_name] assign[=] call[name[args]][constant[1]] <ast.Try object at 0x7da20e9b0970> for taget[name[o]] in starred[list[[<ast.Constant object at 0x7da20e9b23e0>, <ast.Constant object at 0x7da20e9b0160>, <ast.Constant object at 0x7da20e9b3fd0>, <ast.Constant object at 0x7da20e9b0b50>, <ast.Constant object at 0x7da20e9b3610>, <ast.Constant object at 0x7da20e9b0220>, <ast.Constant object at 0x7da20e9b2590>, <ast.Constant object at 0x7da20e9b0430>]]] begin[:] call[name[opts]][name[o]] assign[=] call[name[float], parameter[call[name[opts]][name[o]]]] call[name[opts]][constant[max_steps]] assign[=] call[name[int], parameter[call[name[opts]][constant[max_steps]]]] call[name[note], parameter[binary_operation[constant[Processing subject: %s] <ast.Mod object at 0x7da2590d6920> name[sub].name]]] <ast.Delete object at 0x7da204564040> <ast.Delete object at 0x7da204566350> <ast.Delete object at 0x7da204567190> return[call[name[pimms].merge, parameter[name[opts], dictionary[[<ast.Constant object at 0x7da2045646d0>, <ast.Constant object at 0x7da204566c20>, <ast.Constant object at 0x7da204565fc0>, <ast.Constant object at 0x7da204565060>, <ast.Constant object at 0x7da204565f30>], [<ast.Call object at 0x7da204565a50>, <ast.Call object at 0x7da2045657b0>, <ast.Call object at 0x7da204565180>, <ast.Name object at 0x7da204567e80>, <ast.Name object at 0x7da204564fa0>]]]]]
keyword[def] identifier[calc_arguments] ( identifier[args] ): literal[string] ( identifier[args] , identifier[opts] )= identifier[_retinotopy_parser] ( identifier[args] ) keyword[if] identifier[opts] [ literal[string] ]: identifier[print] ( identifier[info] , identifier[file] = identifier[sys] . identifier[stdout] ) identifier[sys] . identifier[exit] ( literal[int] ) identifier[verbose] = identifier[opts] [ literal[string] ] keyword[def] identifier[note] ( identifier[s] ): keyword[if] identifier[verbose] : identifier[print] ( identifier[s] , identifier[file] = identifier[sys] . identifier[stdout] ) identifier[sys] . identifier[stdout] . identifier[flush] () keyword[return] identifier[verbose] keyword[def] identifier[error] ( identifier[s] ): identifier[print] ( identifier[s] , identifier[file] = identifier[sys] . identifier[stderr] ) identifier[sys] . identifier[stderr] . identifier[flush] () identifier[sys] . identifier[exit] ( literal[int] ) keyword[if] identifier[len] ( identifier[args] )< literal[int] : identifier[error] ( literal[string] ) keyword[if] literal[string] keyword[in] identifier[opts] keyword[and] identifier[opts] [ literal[string] ] keyword[is] keyword[not] keyword[None] : identifier[add_subject_path] ( identifier[opts] [ literal[string] ]) keyword[try] : identifier[sub] = identifier[subject] ( identifier[args] [ literal[int] ]) keyword[except] identifier[Exception] : identifier[error] ( literal[string] % identifier[args] [ literal[int] ]) keyword[if] identifier[len] ( identifier[args] )> literal[int] : identifier[mdl_name] = identifier[args] [ literal[int] ] keyword[elif] identifier[opts] [ literal[string] ]: identifier[mdl_name] = literal[string] keyword[else] : identifier[mdl_name] = literal[string] keyword[try] : keyword[if] identifier[opts] [ literal[string] ]: identifier[model] ={ identifier[h] : identifier[retinotopy_model] ( identifier[mdl_name] ). identifier[persist] () keyword[for] identifier[h] keyword[in] [ literal[string] , literal[string] ]} keyword[else] : identifier[model] ={ identifier[h] : identifier[retinotopy_model] ( identifier[mdl_name] , identifier[hemi] = identifier[h] ). identifier[persist] () keyword[for] identifier[h] keyword[in] [ literal[string] , literal[string] ]} keyword[except] identifier[Exception] : identifier[error] ( literal[string] % identifier[mdl_name] ) keyword[for] identifier[o] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]: identifier[opts] [ identifier[o] ]= identifier[float] ( identifier[opts] [ identifier[o] ]) identifier[opts] [ literal[string] ]= identifier[int] ( identifier[opts] [ literal[string] ]) identifier[note] ( literal[string] % identifier[sub] . identifier[name] ) keyword[del] identifier[opts] [ literal[string] ] keyword[del] identifier[opts] [ literal[string] ] keyword[del] identifier[opts] [ literal[string] ] keyword[return] identifier[pimms] . identifier[merge] ( identifier[opts] , { literal[string] : identifier[sub] . identifier[persist] (), literal[string] : identifier[pyr] . identifier[pmap] ( identifier[model] ), literal[string] : identifier[pyr] . identifier[pmap] ( identifier[opts] ), literal[string] : identifier[note] , literal[string] : identifier[error] })
def calc_arguments(args): """ calc_arguments is a calculator that parses the command-line arguments for the registration command and produces the subject, the model, the log function, and the additional options. """ (args, opts) = _retinotopy_parser(args) # We do some of the options right here... if opts['help']: print(info, file=sys.stdout) sys.exit(1) # depends on [control=['if'], data=[]] # and if we are verbose, lets setup a note function verbose = opts['verbose'] def note(s): if verbose: print(s, file=sys.stdout) sys.stdout.flush() # depends on [control=['if'], data=[]] return verbose def error(s): print(s, file=sys.stderr) sys.stderr.flush() sys.exit(1) if len(args) < 1: error('subject argument is required') # depends on [control=['if'], data=[]] # Add the subjects directory, if there is one if 'subjects_dir' in opts and opts['subjects_dir'] is not None: add_subject_path(opts['subjects_dir']) # depends on [control=['if'], data=[]] # Get the subject now try: sub = subject(args[0]) # depends on [control=['try'], data=[]] except Exception: error('Failed to load subject %s' % args[0]) # depends on [control=['except'], data=[]] # and the model if len(args) > 1: mdl_name = args[1] # depends on [control=['if'], data=[]] elif opts['model_sym']: mdl_name = 'schira' # depends on [control=['if'], data=[]] else: mdl_name = 'benson17' try: if opts['model_sym']: model = {h: retinotopy_model(mdl_name).persist() for h in ['lh', 'rh']} # depends on [control=['if'], data=[]] else: model = {h: retinotopy_model(mdl_name, hemi=h).persist() for h in ['lh', 'rh']} # depends on [control=['try'], data=[]] except Exception: error('Could not load retinotopy model %s' % mdl_name) # depends on [control=['except'], data=[]] # Now, we want to run a few filters on the options # Parse the simple numbers for o in ['weight_min', 'scale', 'max_step_size', 'max_out_eccen', 'max_in_eccen', 'min_in_eccen', 'field_sign_weight', 'radius_weight']: opts[o] = float(opts[o]) # depends on [control=['for'], data=['o']] opts['max_steps'] = int(opts['max_steps']) # Make a note: note('Processing subject: %s' % sub.name) del opts['help'] del opts['verbose'] del opts['subjects_dir'] # That's all we need! return pimms.merge(opts, {'subject': sub.persist(), 'model': pyr.pmap(model), 'options': pyr.pmap(opts), 'note': note, 'error': error})
def discover_connectors( domain: str, loop=None, logger=logger): """ Discover all connection options for a domain, in descending order of preference. This coroutine returns options discovered from SRV records, or if none are found, the generic option using the domain name and the default XMPP client port. Each option is represented by a triple ``(host, port, connector)``. `connector` is a :class:`aioxmpp.connector.BaseConnector` instance which is suitable to connect to the given host and port. `logger` is the logger used by the function. The following sources are supported: * :rfc:`6120` SRV records. One option is returned per SRV record. If one of the SRV records points to the root name (``.``), :class:`ValueError` is raised (the domain specifically said that XMPP is not supported here). * :xep:`368` SRV records. One option is returned per SRV record. * :rfc:`6120` fallback process (only if no SRV records are found). One option is returned for the host name with the default XMPP client port. The options discovered from SRV records are mixed together, ordered by priority and then within priorities are shuffled according to their weight. Thus, if there are multiple records of equal priority, the result of the function is not deterministic. .. versionadded:: 0.6 """ domain_encoded = domain.encode("idna") + b"." starttls_srv_failed = False tls_srv_failed = False try: starttls_srv_records = yield from network.lookup_srv( domain_encoded, "xmpp-client", ) starttls_srv_disabled = False except dns.resolver.NoNameservers as exc: starttls_srv_records = [] starttls_srv_disabled = False starttls_srv_failed = True starttls_srv_exc = exc logger.debug("xmpp-client SRV lookup for domain %s failed " "(may not be fatal)", domain_encoded, exc_info=True) except ValueError: starttls_srv_records = [] starttls_srv_disabled = True try: tls_srv_records = yield from network.lookup_srv( domain_encoded, "xmpps-client", ) tls_srv_disabled = False except dns.resolver.NoNameservers: tls_srv_records = [] tls_srv_disabled = False tls_srv_failed = True logger.debug("xmpps-client SRV lookup for domain %s failed " "(may not be fatal)", domain_encoded, exc_info=True) except ValueError: tls_srv_records = [] tls_srv_disabled = True if starttls_srv_failed and (tls_srv_failed or tls_srv_records is None): # the failure is probably more useful as a diagnostic # if we find a good reason to allow this scenario, we might change it # later. raise starttls_srv_exc if starttls_srv_disabled and (tls_srv_disabled or tls_srv_records is None): raise ValueError( "XMPP not enabled on domain {!r}".format(domain), ) if starttls_srv_records is None and tls_srv_records is None: # no SRV records published, fall back logger.debug( "no SRV records found for %s, falling back", domain, ) return [ (domain, 5222, connector.STARTTLSConnector()), ] starttls_srv_records = starttls_srv_records or [] tls_srv_records = tls_srv_records or [] srv_records = [ (prio, weight, (host.decode("ascii"), port, connector.STARTTLSConnector())) for prio, weight, (host, port) in starttls_srv_records ] srv_records.extend( (prio, weight, (host.decode("ascii"), port, connector.XMPPOverTLSConnector())) for prio, weight, (host, port) in tls_srv_records ) options = list( network.group_and_order_srv_records(srv_records) ) logger.debug( "options for %s: %r", domain, options, ) return options
def function[discover_connectors, parameter[domain, loop, logger]]: constant[ Discover all connection options for a domain, in descending order of preference. This coroutine returns options discovered from SRV records, or if none are found, the generic option using the domain name and the default XMPP client port. Each option is represented by a triple ``(host, port, connector)``. `connector` is a :class:`aioxmpp.connector.BaseConnector` instance which is suitable to connect to the given host and port. `logger` is the logger used by the function. The following sources are supported: * :rfc:`6120` SRV records. One option is returned per SRV record. If one of the SRV records points to the root name (``.``), :class:`ValueError` is raised (the domain specifically said that XMPP is not supported here). * :xep:`368` SRV records. One option is returned per SRV record. * :rfc:`6120` fallback process (only if no SRV records are found). One option is returned for the host name with the default XMPP client port. The options discovered from SRV records are mixed together, ordered by priority and then within priorities are shuffled according to their weight. Thus, if there are multiple records of equal priority, the result of the function is not deterministic. .. versionadded:: 0.6 ] variable[domain_encoded] assign[=] binary_operation[call[name[domain].encode, parameter[constant[idna]]] + constant[b'.']] variable[starttls_srv_failed] assign[=] constant[False] variable[tls_srv_failed] assign[=] constant[False] <ast.Try object at 0x7da20e9b2740> <ast.Try object at 0x7da18c4cf1f0> if <ast.BoolOp object at 0x7da18c4cc5b0> begin[:] <ast.Raise object at 0x7da18c4cc2b0> if <ast.BoolOp object at 0x7da18c4cfdc0> begin[:] <ast.Raise object at 0x7da18f58cc10> if <ast.BoolOp object at 0x7da18f58fa90> begin[:] call[name[logger].debug, parameter[constant[no SRV records found for %s, falling back], name[domain]]] return[list[[<ast.Tuple object at 0x7da18f58e5f0>]]] variable[starttls_srv_records] assign[=] <ast.BoolOp object at 0x7da18f58ce80> variable[tls_srv_records] assign[=] <ast.BoolOp object at 0x7da18f58c940> variable[srv_records] assign[=] <ast.ListComp object at 0x7da18f58f580> call[name[srv_records].extend, parameter[<ast.GeneratorExp object at 0x7da20ed9bd30>]] variable[options] assign[=] call[name[list], parameter[call[name[network].group_and_order_srv_records, parameter[name[srv_records]]]]] call[name[logger].debug, parameter[constant[options for %s: %r], name[domain], name[options]]] return[name[options]]
keyword[def] identifier[discover_connectors] ( identifier[domain] : identifier[str] , identifier[loop] = keyword[None] , identifier[logger] = identifier[logger] ): literal[string] identifier[domain_encoded] = identifier[domain] . identifier[encode] ( literal[string] )+ literal[string] identifier[starttls_srv_failed] = keyword[False] identifier[tls_srv_failed] = keyword[False] keyword[try] : identifier[starttls_srv_records] = keyword[yield] keyword[from] identifier[network] . identifier[lookup_srv] ( identifier[domain_encoded] , literal[string] , ) identifier[starttls_srv_disabled] = keyword[False] keyword[except] identifier[dns] . identifier[resolver] . identifier[NoNameservers] keyword[as] identifier[exc] : identifier[starttls_srv_records] =[] identifier[starttls_srv_disabled] = keyword[False] identifier[starttls_srv_failed] = keyword[True] identifier[starttls_srv_exc] = identifier[exc] identifier[logger] . identifier[debug] ( literal[string] literal[string] , identifier[domain_encoded] , identifier[exc_info] = keyword[True] ) keyword[except] identifier[ValueError] : identifier[starttls_srv_records] =[] identifier[starttls_srv_disabled] = keyword[True] keyword[try] : identifier[tls_srv_records] = keyword[yield] keyword[from] identifier[network] . identifier[lookup_srv] ( identifier[domain_encoded] , literal[string] , ) identifier[tls_srv_disabled] = keyword[False] keyword[except] identifier[dns] . identifier[resolver] . identifier[NoNameservers] : identifier[tls_srv_records] =[] identifier[tls_srv_disabled] = keyword[False] identifier[tls_srv_failed] = keyword[True] identifier[logger] . identifier[debug] ( literal[string] literal[string] , identifier[domain_encoded] , identifier[exc_info] = keyword[True] ) keyword[except] identifier[ValueError] : identifier[tls_srv_records] =[] identifier[tls_srv_disabled] = keyword[True] keyword[if] identifier[starttls_srv_failed] keyword[and] ( identifier[tls_srv_failed] keyword[or] identifier[tls_srv_records] keyword[is] keyword[None] ): keyword[raise] identifier[starttls_srv_exc] keyword[if] identifier[starttls_srv_disabled] keyword[and] ( identifier[tls_srv_disabled] keyword[or] identifier[tls_srv_records] keyword[is] keyword[None] ): keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[domain] ), ) keyword[if] identifier[starttls_srv_records] keyword[is] keyword[None] keyword[and] identifier[tls_srv_records] keyword[is] keyword[None] : identifier[logger] . identifier[debug] ( literal[string] , identifier[domain] , ) keyword[return] [ ( identifier[domain] , literal[int] , identifier[connector] . identifier[STARTTLSConnector] ()), ] identifier[starttls_srv_records] = identifier[starttls_srv_records] keyword[or] [] identifier[tls_srv_records] = identifier[tls_srv_records] keyword[or] [] identifier[srv_records] =[ ( identifier[prio] , identifier[weight] ,( identifier[host] . identifier[decode] ( literal[string] ), identifier[port] , identifier[connector] . identifier[STARTTLSConnector] ())) keyword[for] identifier[prio] , identifier[weight] ,( identifier[host] , identifier[port] ) keyword[in] identifier[starttls_srv_records] ] identifier[srv_records] . identifier[extend] ( ( identifier[prio] , identifier[weight] ,( identifier[host] . identifier[decode] ( literal[string] ), identifier[port] , identifier[connector] . identifier[XMPPOverTLSConnector] ())) keyword[for] identifier[prio] , identifier[weight] ,( identifier[host] , identifier[port] ) keyword[in] identifier[tls_srv_records] ) identifier[options] = identifier[list] ( identifier[network] . identifier[group_and_order_srv_records] ( identifier[srv_records] ) ) identifier[logger] . identifier[debug] ( literal[string] , identifier[domain] , identifier[options] , ) keyword[return] identifier[options]
def discover_connectors(domain: str, loop=None, logger=logger): """ Discover all connection options for a domain, in descending order of preference. This coroutine returns options discovered from SRV records, or if none are found, the generic option using the domain name and the default XMPP client port. Each option is represented by a triple ``(host, port, connector)``. `connector` is a :class:`aioxmpp.connector.BaseConnector` instance which is suitable to connect to the given host and port. `logger` is the logger used by the function. The following sources are supported: * :rfc:`6120` SRV records. One option is returned per SRV record. If one of the SRV records points to the root name (``.``), :class:`ValueError` is raised (the domain specifically said that XMPP is not supported here). * :xep:`368` SRV records. One option is returned per SRV record. * :rfc:`6120` fallback process (only if no SRV records are found). One option is returned for the host name with the default XMPP client port. The options discovered from SRV records are mixed together, ordered by priority and then within priorities are shuffled according to their weight. Thus, if there are multiple records of equal priority, the result of the function is not deterministic. .. versionadded:: 0.6 """ domain_encoded = domain.encode('idna') + b'.' starttls_srv_failed = False tls_srv_failed = False try: starttls_srv_records = (yield from network.lookup_srv(domain_encoded, 'xmpp-client')) starttls_srv_disabled = False # depends on [control=['try'], data=[]] except dns.resolver.NoNameservers as exc: starttls_srv_records = [] starttls_srv_disabled = False starttls_srv_failed = True starttls_srv_exc = exc logger.debug('xmpp-client SRV lookup for domain %s failed (may not be fatal)', domain_encoded, exc_info=True) # depends on [control=['except'], data=['exc']] except ValueError: starttls_srv_records = [] starttls_srv_disabled = True # depends on [control=['except'], data=[]] try: tls_srv_records = (yield from network.lookup_srv(domain_encoded, 'xmpps-client')) tls_srv_disabled = False # depends on [control=['try'], data=[]] except dns.resolver.NoNameservers: tls_srv_records = [] tls_srv_disabled = False tls_srv_failed = True logger.debug('xmpps-client SRV lookup for domain %s failed (may not be fatal)', domain_encoded, exc_info=True) # depends on [control=['except'], data=[]] except ValueError: tls_srv_records = [] tls_srv_disabled = True # depends on [control=['except'], data=[]] if starttls_srv_failed and (tls_srv_failed or tls_srv_records is None): # the failure is probably more useful as a diagnostic # if we find a good reason to allow this scenario, we might change it # later. raise starttls_srv_exc # depends on [control=['if'], data=[]] if starttls_srv_disabled and (tls_srv_disabled or tls_srv_records is None): raise ValueError('XMPP not enabled on domain {!r}'.format(domain)) # depends on [control=['if'], data=[]] if starttls_srv_records is None and tls_srv_records is None: # no SRV records published, fall back logger.debug('no SRV records found for %s, falling back', domain) return [(domain, 5222, connector.STARTTLSConnector())] # depends on [control=['if'], data=[]] starttls_srv_records = starttls_srv_records or [] tls_srv_records = tls_srv_records or [] srv_records = [(prio, weight, (host.decode('ascii'), port, connector.STARTTLSConnector())) for (prio, weight, (host, port)) in starttls_srv_records] srv_records.extend(((prio, weight, (host.decode('ascii'), port, connector.XMPPOverTLSConnector())) for (prio, weight, (host, port)) in tls_srv_records)) options = list(network.group_and_order_srv_records(srv_records)) logger.debug('options for %s: %r', domain, options) return options
def LogGamma(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex: """ Returns the log of the gamma of the inputVertex :param input_vertex: the vertex """ return Double(context.jvm_view().LogGammaVertex, label, cast_to_double_vertex(input_vertex))
def function[LogGamma, parameter[input_vertex, label]]: constant[ Returns the log of the gamma of the inputVertex :param input_vertex: the vertex ] return[call[name[Double], parameter[call[name[context].jvm_view, parameter[]].LogGammaVertex, name[label], call[name[cast_to_double_vertex], parameter[name[input_vertex]]]]]]
keyword[def] identifier[LogGamma] ( identifier[input_vertex] : identifier[vertex_constructor_param_types] , identifier[label] : identifier[Optional] [ identifier[str] ]= keyword[None] )-> identifier[Vertex] : literal[string] keyword[return] identifier[Double] ( identifier[context] . identifier[jvm_view] (). identifier[LogGammaVertex] , identifier[label] , identifier[cast_to_double_vertex] ( identifier[input_vertex] ))
def LogGamma(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex: """ Returns the log of the gamma of the inputVertex :param input_vertex: the vertex """ return Double(context.jvm_view().LogGammaVertex, label, cast_to_double_vertex(input_vertex))
def has_true(self, e, extra_constraints=(), solver=None, model_callback=None): #pylint:disable=unused-argument """ Should return True if `e` can possible be True. :param e: The AST. :param extra_constraints: Extra constraints (as ASTs) to add to the solver for this solve. :param solver: A solver, for backends that require it. :param model_callback: a function that will be executed with recovered models (if any) :return: A boolean """ #if self._solver_required and solver is None: # raise BackendError("%s requires a solver for evaluation" % self.__class__.__name__) return self._has_true(self.convert(e), extra_constraints=extra_constraints, solver=solver, model_callback=model_callback)
def function[has_true, parameter[self, e, extra_constraints, solver, model_callback]]: constant[ Should return True if `e` can possible be True. :param e: The AST. :param extra_constraints: Extra constraints (as ASTs) to add to the solver for this solve. :param solver: A solver, for backends that require it. :param model_callback: a function that will be executed with recovered models (if any) :return: A boolean ] return[call[name[self]._has_true, parameter[call[name[self].convert, parameter[name[e]]]]]]
keyword[def] identifier[has_true] ( identifier[self] , identifier[e] , identifier[extra_constraints] =(), identifier[solver] = keyword[None] , identifier[model_callback] = keyword[None] ): literal[string] keyword[return] identifier[self] . identifier[_has_true] ( identifier[self] . identifier[convert] ( identifier[e] ), identifier[extra_constraints] = identifier[extra_constraints] , identifier[solver] = identifier[solver] , identifier[model_callback] = identifier[model_callback] )
def has_true(self, e, extra_constraints=(), solver=None, model_callback=None): #pylint:disable=unused-argument '\n Should return True if `e` can possible be True.\n\n :param e: The AST.\n :param extra_constraints: Extra constraints (as ASTs) to add to the solver for this solve.\n :param solver: A solver, for backends that require it.\n :param model_callback: a function that will be executed with recovered models (if any)\n :return: A boolean\n ' #if self._solver_required and solver is None: # raise BackendError("%s requires a solver for evaluation" % self.__class__.__name__) return self._has_true(self.convert(e), extra_constraints=extra_constraints, solver=solver, model_callback=model_callback)
def compute_transformed(context): """Compute transformed key for opening database""" if context._._.transformed_key is not None: transformed_key = context._._transformed_key else: transformed_key = aes_kdf( context._.header.value.dynamic_header.transform_seed.data, context._.header.value.dynamic_header.transform_rounds.data, password=context._._.password, keyfile=context._._.keyfile ) return transformed_key
def function[compute_transformed, parameter[context]]: constant[Compute transformed key for opening database] if compare[name[context]._._.transformed_key is_not constant[None]] begin[:] variable[transformed_key] assign[=] name[context]._._transformed_key return[name[transformed_key]]
keyword[def] identifier[compute_transformed] ( identifier[context] ): literal[string] keyword[if] identifier[context] . identifier[_] . identifier[_] . identifier[transformed_key] keyword[is] keyword[not] keyword[None] : identifier[transformed_key] = identifier[context] . identifier[_] . identifier[_transformed_key] keyword[else] : identifier[transformed_key] = identifier[aes_kdf] ( identifier[context] . identifier[_] . identifier[header] . identifier[value] . identifier[dynamic_header] . identifier[transform_seed] . identifier[data] , identifier[context] . identifier[_] . identifier[header] . identifier[value] . identifier[dynamic_header] . identifier[transform_rounds] . identifier[data] , identifier[password] = identifier[context] . identifier[_] . identifier[_] . identifier[password] , identifier[keyfile] = identifier[context] . identifier[_] . identifier[_] . identifier[keyfile] ) keyword[return] identifier[transformed_key]
def compute_transformed(context): """Compute transformed key for opening database""" if context._._.transformed_key is not None: transformed_key = context._._transformed_key # depends on [control=['if'], data=[]] else: transformed_key = aes_kdf(context._.header.value.dynamic_header.transform_seed.data, context._.header.value.dynamic_header.transform_rounds.data, password=context._._.password, keyfile=context._._.keyfile) return transformed_key
def get_app(self, reference_app=None): """Helper method that implements the logic to look up an application.""" if reference_app is not None: return reference_app if self.app is not None: return self.app ctx = stack.top if ctx is not None: return ctx.app raise RuntimeError('Application not registered on Bouncer' ' instance and no application bound' ' to current context')
def function[get_app, parameter[self, reference_app]]: constant[Helper method that implements the logic to look up an application.] if compare[name[reference_app] is_not constant[None]] begin[:] return[name[reference_app]] if compare[name[self].app is_not constant[None]] begin[:] return[name[self].app] variable[ctx] assign[=] name[stack].top if compare[name[ctx] is_not constant[None]] begin[:] return[name[ctx].app] <ast.Raise object at 0x7da1b2344370>
keyword[def] identifier[get_app] ( identifier[self] , identifier[reference_app] = keyword[None] ): literal[string] keyword[if] identifier[reference_app] keyword[is] keyword[not] keyword[None] : keyword[return] identifier[reference_app] keyword[if] identifier[self] . identifier[app] keyword[is] keyword[not] keyword[None] : keyword[return] identifier[self] . identifier[app] identifier[ctx] = identifier[stack] . identifier[top] keyword[if] identifier[ctx] keyword[is] keyword[not] keyword[None] : keyword[return] identifier[ctx] . identifier[app] keyword[raise] identifier[RuntimeError] ( literal[string] literal[string] literal[string] )
def get_app(self, reference_app=None): """Helper method that implements the logic to look up an application.""" if reference_app is not None: return reference_app # depends on [control=['if'], data=['reference_app']] if self.app is not None: return self.app # depends on [control=['if'], data=[]] ctx = stack.top if ctx is not None: return ctx.app # depends on [control=['if'], data=['ctx']] raise RuntimeError('Application not registered on Bouncer instance and no application bound to current context')
def schedule(self): """Initiate distribution of the test collection. Initiate scheduling of the items across the nodes. If this gets called again later it behaves the same as calling ``._reschedule()`` on all nodes so that newly added nodes will start to be used. If ``.collection_is_completed`` is True, this is called by the hook: - ``DSession.worker_collectionfinish``. """ assert self.collection_is_completed # Initial distribution already happened, reschedule on all nodes if self.collection is not None: for node in self.nodes: self._reschedule(node) return # Check that all nodes collected the same tests if not self._check_nodes_have_same_collection(): self.log("**Different tests collected, aborting run**") return # Collections are identical, create the final list of items self.collection = list(next(iter(self.registered_collections.values()))) if not self.collection: return # Determine chunks of work (scopes) for nodeid in self.collection: scope = self._split_scope(nodeid) work_unit = self.workqueue.setdefault(scope, default=OrderedDict()) work_unit[nodeid] = False # Avoid having more workers than work extra_nodes = len(self.nodes) - len(self.workqueue) if extra_nodes > 0: self.log("Shuting down {0} nodes".format(extra_nodes)) for _ in range(extra_nodes): unused_node, assigned = self.assigned_work.popitem(last=True) self.log("Shuting down unused node {0}".format(unused_node)) unused_node.shutdown() # Assign initial workload for node in self.nodes: self._assign_work_unit(node) # Ensure nodes start with at least two work units if possible (#277) for node in self.nodes: self._reschedule(node) # Initial distribution sent all tests, start node shutdown if not self.workqueue: for node in self.nodes: node.shutdown()
def function[schedule, parameter[self]]: constant[Initiate distribution of the test collection. Initiate scheduling of the items across the nodes. If this gets called again later it behaves the same as calling ``._reschedule()`` on all nodes so that newly added nodes will start to be used. If ``.collection_is_completed`` is True, this is called by the hook: - ``DSession.worker_collectionfinish``. ] assert[name[self].collection_is_completed] if compare[name[self].collection is_not constant[None]] begin[:] for taget[name[node]] in starred[name[self].nodes] begin[:] call[name[self]._reschedule, parameter[name[node]]] return[None] if <ast.UnaryOp object at 0x7da1b17a48b0> begin[:] call[name[self].log, parameter[constant[**Different tests collected, aborting run**]]] return[None] name[self].collection assign[=] call[name[list], parameter[call[name[next], parameter[call[name[iter], parameter[call[name[self].registered_collections.values, parameter[]]]]]]]] if <ast.UnaryOp object at 0x7da1b1737370> begin[:] return[None] for taget[name[nodeid]] in starred[name[self].collection] begin[:] variable[scope] assign[=] call[name[self]._split_scope, parameter[name[nodeid]]] variable[work_unit] assign[=] call[name[self].workqueue.setdefault, parameter[name[scope]]] call[name[work_unit]][name[nodeid]] assign[=] constant[False] variable[extra_nodes] assign[=] binary_operation[call[name[len], parameter[name[self].nodes]] - call[name[len], parameter[name[self].workqueue]]] if compare[name[extra_nodes] greater[>] constant[0]] begin[:] call[name[self].log, parameter[call[constant[Shuting down {0} nodes].format, parameter[name[extra_nodes]]]]] for taget[name[_]] in starred[call[name[range], parameter[name[extra_nodes]]]] begin[:] <ast.Tuple object at 0x7da1b1737310> assign[=] call[name[self].assigned_work.popitem, parameter[]] call[name[self].log, parameter[call[constant[Shuting down unused node {0}].format, parameter[name[unused_node]]]]] call[name[unused_node].shutdown, parameter[]] for taget[name[node]] in starred[name[self].nodes] begin[:] call[name[self]._assign_work_unit, parameter[name[node]]] for taget[name[node]] in starred[name[self].nodes] begin[:] call[name[self]._reschedule, parameter[name[node]]] if <ast.UnaryOp object at 0x7da1b17363b0> begin[:] for taget[name[node]] in starred[name[self].nodes] begin[:] call[name[node].shutdown, parameter[]]
keyword[def] identifier[schedule] ( identifier[self] ): literal[string] keyword[assert] identifier[self] . identifier[collection_is_completed] keyword[if] identifier[self] . identifier[collection] keyword[is] keyword[not] keyword[None] : keyword[for] identifier[node] keyword[in] identifier[self] . identifier[nodes] : identifier[self] . identifier[_reschedule] ( identifier[node] ) keyword[return] keyword[if] keyword[not] identifier[self] . identifier[_check_nodes_have_same_collection] (): identifier[self] . identifier[log] ( literal[string] ) keyword[return] identifier[self] . identifier[collection] = identifier[list] ( identifier[next] ( identifier[iter] ( identifier[self] . identifier[registered_collections] . identifier[values] ()))) keyword[if] keyword[not] identifier[self] . identifier[collection] : keyword[return] keyword[for] identifier[nodeid] keyword[in] identifier[self] . identifier[collection] : identifier[scope] = identifier[self] . identifier[_split_scope] ( identifier[nodeid] ) identifier[work_unit] = identifier[self] . identifier[workqueue] . identifier[setdefault] ( identifier[scope] , identifier[default] = identifier[OrderedDict] ()) identifier[work_unit] [ identifier[nodeid] ]= keyword[False] identifier[extra_nodes] = identifier[len] ( identifier[self] . identifier[nodes] )- identifier[len] ( identifier[self] . identifier[workqueue] ) keyword[if] identifier[extra_nodes] > literal[int] : identifier[self] . identifier[log] ( literal[string] . identifier[format] ( identifier[extra_nodes] )) keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[extra_nodes] ): identifier[unused_node] , identifier[assigned] = identifier[self] . identifier[assigned_work] . identifier[popitem] ( identifier[last] = keyword[True] ) identifier[self] . identifier[log] ( literal[string] . identifier[format] ( identifier[unused_node] )) identifier[unused_node] . identifier[shutdown] () keyword[for] identifier[node] keyword[in] identifier[self] . identifier[nodes] : identifier[self] . identifier[_assign_work_unit] ( identifier[node] ) keyword[for] identifier[node] keyword[in] identifier[self] . identifier[nodes] : identifier[self] . identifier[_reschedule] ( identifier[node] ) keyword[if] keyword[not] identifier[self] . identifier[workqueue] : keyword[for] identifier[node] keyword[in] identifier[self] . identifier[nodes] : identifier[node] . identifier[shutdown] ()
def schedule(self): """Initiate distribution of the test collection. Initiate scheduling of the items across the nodes. If this gets called again later it behaves the same as calling ``._reschedule()`` on all nodes so that newly added nodes will start to be used. If ``.collection_is_completed`` is True, this is called by the hook: - ``DSession.worker_collectionfinish``. """ assert self.collection_is_completed # Initial distribution already happened, reschedule on all nodes if self.collection is not None: for node in self.nodes: self._reschedule(node) # depends on [control=['for'], data=['node']] return # depends on [control=['if'], data=[]] # Check that all nodes collected the same tests if not self._check_nodes_have_same_collection(): self.log('**Different tests collected, aborting run**') return # depends on [control=['if'], data=[]] # Collections are identical, create the final list of items self.collection = list(next(iter(self.registered_collections.values()))) if not self.collection: return # depends on [control=['if'], data=[]] # Determine chunks of work (scopes) for nodeid in self.collection: scope = self._split_scope(nodeid) work_unit = self.workqueue.setdefault(scope, default=OrderedDict()) work_unit[nodeid] = False # depends on [control=['for'], data=['nodeid']] # Avoid having more workers than work extra_nodes = len(self.nodes) - len(self.workqueue) if extra_nodes > 0: self.log('Shuting down {0} nodes'.format(extra_nodes)) for _ in range(extra_nodes): (unused_node, assigned) = self.assigned_work.popitem(last=True) self.log('Shuting down unused node {0}'.format(unused_node)) unused_node.shutdown() # depends on [control=['for'], data=[]] # depends on [control=['if'], data=['extra_nodes']] # Assign initial workload for node in self.nodes: self._assign_work_unit(node) # depends on [control=['for'], data=['node']] # Ensure nodes start with at least two work units if possible (#277) for node in self.nodes: self._reschedule(node) # depends on [control=['for'], data=['node']] # Initial distribution sent all tests, start node shutdown if not self.workqueue: for node in self.nodes: node.shutdown() # depends on [control=['for'], data=['node']] # depends on [control=['if'], data=[]]
def p_recipe(self, t): """recipe : RECIPE_LINE | RECIPE_LINE recipe""" if len(t) == 3: t[0] = t[1] + t[2] else: t[0] = t[1]
def function[p_recipe, parameter[self, t]]: constant[recipe : RECIPE_LINE | RECIPE_LINE recipe] if compare[call[name[len], parameter[name[t]]] equal[==] constant[3]] begin[:] call[name[t]][constant[0]] assign[=] binary_operation[call[name[t]][constant[1]] + call[name[t]][constant[2]]]
keyword[def] identifier[p_recipe] ( identifier[self] , identifier[t] ): literal[string] keyword[if] identifier[len] ( identifier[t] )== literal[int] : identifier[t] [ literal[int] ]= identifier[t] [ literal[int] ]+ identifier[t] [ literal[int] ] keyword[else] : identifier[t] [ literal[int] ]= identifier[t] [ literal[int] ]
def p_recipe(self, t): """recipe : RECIPE_LINE | RECIPE_LINE recipe""" if len(t) == 3: t[0] = t[1] + t[2] # depends on [control=['if'], data=[]] else: t[0] = t[1]
def moment(self, axis, channel=0, moment=1, *, resultant=None): """Take the nth moment the dataset along one axis, adding lower rank channels. New channels have names ``<channel name>_<axis name>_moment_<moment num>``. Moment 0 is the integral of the slice. Moment 1 is the weighted average or "Center of Mass", normalized by the integral Moment 2 is the variance, the central moment about the center of mass, normalized by the integral Moments 3+ are central moments about the center of mass, normalized by the integral and by the standard deviation to the power of the moment. Moments, especially higher order moments, are susceptible to noise and baseline. It is recommended when used with real data to use :meth:`WrightTools.data.Channel.clip` in conjunction with moments to reduce effects of noise. Parameters ---------- axis : int or str The axis to take the moment along. If given as an integer, the axis with that index is used. If given as a string, the axis with that name is used. The axis must exist, and be a 1D array-aligned axis. (i.e. have a shape with a single value which is not ``1``) The collapsed axis must be monotonic to produce correct results. The axis to collapse along is inferred from the shape of the axis. channel : int or str The channel to take the moment. If given as an integer, the channel with that index is used. If given as a string, the channel with that name is used. The channel must have values along the axis (i.e. its shape must not be ``1`` in the dimension for which the axis is not ``1``) Default is 0, the first channel. moment : int or tuple of int The moments to take. One channel will be created for each number given. Default is 1, the center of mass. resultant : tuple of int The resultant shape after the moment operation. By default, it is intuited by the axis along which the moment is being taken. This default only works if that axis is 1D, so resultant is required if a multidimensional axis is passed as the first argument. The requirement of monotonicity applies on a per pixel basis. See Also -------- collapse Reduce dimensionality by some mathematical operation clip Set values above/below a threshold to a particular value WrightTools.kit.joint_shape Useful for setting `resultant` kwarg based off of axes not collapsed. """ # get axis index -------------------------------------------------------------------------- axis_index = None if resultant is not None: for i, (s, r) in enumerate(zip(self.shape, resultant)): if s != r and r == 1 and axis_index is None: axis_index = i elif s == r: continue else: raise wt_exceptions.ValueError( f"Invalid resultant shape '{resultant}' for shape {self.shape}. " + "Consider using `wt.kit.joint_shape` to join non-collapsed axes." ) index = wt_kit.get_index(self.axis_names, axis) if axis_index is None: axes = [i for i in range(self.ndim) if self.axes[index].shape[i] > 1] if len(axes) > 1: raise wt_exceptions.MultidimensionalAxisError(axis, "moment") elif len(axes) == 0: raise wt_exceptions.ValueError( "Axis {} is a single point, cannot compute moment".format(axis) ) axis_index = axes[0] warnings.warn("moment", category=wt_exceptions.EntireDatasetInMemoryWarning) channel_index = wt_kit.get_index(self.channel_names, channel) channel = self.channel_names[channel_index] if self[channel].shape[axis_index] == 1: raise wt_exceptions.ValueError( "Channel '{}' has a single point along Axis '{}', cannot compute moment".format( channel, axis ) ) new_shape = list(self[channel].shape) new_shape[axis_index] = 1 channel = self[channel] axis_inp = axis axis = self.axes[index] x = axis[:] if np.any(np.isnan(x)): raise wt_exceptions.ValueError("Axis '{}' includes NaN".format(axis_inp)) y = np.nan_to_num(channel[:]) try: moments = tuple(moment) except TypeError: moments = (moment,) multiplier = 1 if 0 in moments: # May be possible to optimize, probably doesn't need the sum # only matters for integral, all others normalize by integral multiplier = np.sign( np.sum(np.diff(x, axis=axis_index), axis=axis_index, keepdims=True) ) for moment in moments: about = 0 norm = 1 if moment > 0: norm = np.trapz(y, x, axis=axis_index) norm = np.array(norm) norm.shape = new_shape if moment > 1: about = np.trapz(x * y, x, axis=axis_index) about = np.array(about) about.shape = new_shape about /= norm if moment > 2: sigma = np.trapz((x - about) ** 2 * y, x, axis=axis_index) sigma = np.array(sigma) sigma.shape = new_shape sigma /= norm sigma **= 0.5 norm *= sigma ** moment values = np.trapz((x - about) ** moment * y, x, axis=axis_index) values = np.array(values) values.shape = new_shape values /= norm if moment == 0: values *= multiplier self.create_channel( "{}_{}_{}_{}".format(channel.natural_name, axis_inp, "moment", moment), values=values, )
def function[moment, parameter[self, axis, channel, moment]]: constant[Take the nth moment the dataset along one axis, adding lower rank channels. New channels have names ``<channel name>_<axis name>_moment_<moment num>``. Moment 0 is the integral of the slice. Moment 1 is the weighted average or "Center of Mass", normalized by the integral Moment 2 is the variance, the central moment about the center of mass, normalized by the integral Moments 3+ are central moments about the center of mass, normalized by the integral and by the standard deviation to the power of the moment. Moments, especially higher order moments, are susceptible to noise and baseline. It is recommended when used with real data to use :meth:`WrightTools.data.Channel.clip` in conjunction with moments to reduce effects of noise. Parameters ---------- axis : int or str The axis to take the moment along. If given as an integer, the axis with that index is used. If given as a string, the axis with that name is used. The axis must exist, and be a 1D array-aligned axis. (i.e. have a shape with a single value which is not ``1``) The collapsed axis must be monotonic to produce correct results. The axis to collapse along is inferred from the shape of the axis. channel : int or str The channel to take the moment. If given as an integer, the channel with that index is used. If given as a string, the channel with that name is used. The channel must have values along the axis (i.e. its shape must not be ``1`` in the dimension for which the axis is not ``1``) Default is 0, the first channel. moment : int or tuple of int The moments to take. One channel will be created for each number given. Default is 1, the center of mass. resultant : tuple of int The resultant shape after the moment operation. By default, it is intuited by the axis along which the moment is being taken. This default only works if that axis is 1D, so resultant is required if a multidimensional axis is passed as the first argument. The requirement of monotonicity applies on a per pixel basis. See Also -------- collapse Reduce dimensionality by some mathematical operation clip Set values above/below a threshold to a particular value WrightTools.kit.joint_shape Useful for setting `resultant` kwarg based off of axes not collapsed. ] variable[axis_index] assign[=] constant[None] if compare[name[resultant] is_not constant[None]] begin[:] for taget[tuple[[<ast.Name object at 0x7da1b0b7e2f0>, <ast.Tuple object at 0x7da1b0b7c070>]]] in starred[call[name[enumerate], parameter[call[name[zip], parameter[name[self].shape, name[resultant]]]]]] begin[:] if <ast.BoolOp object at 0x7da1b0b7d300> begin[:] variable[axis_index] assign[=] name[i] variable[index] assign[=] call[name[wt_kit].get_index, parameter[name[self].axis_names, name[axis]]] if compare[name[axis_index] is constant[None]] begin[:] variable[axes] assign[=] <ast.ListComp object at 0x7da1b0b7ce80> if compare[call[name[len], parameter[name[axes]]] greater[>] constant[1]] begin[:] <ast.Raise object at 0x7da1b0b7f640> variable[axis_index] assign[=] call[name[axes]][constant[0]] call[name[warnings].warn, parameter[constant[moment]]] variable[channel_index] assign[=] call[name[wt_kit].get_index, parameter[name[self].channel_names, name[channel]]] variable[channel] assign[=] call[name[self].channel_names][name[channel_index]] if compare[call[call[name[self]][name[channel]].shape][name[axis_index]] equal[==] constant[1]] begin[:] <ast.Raise object at 0x7da1b0b7e620> variable[new_shape] assign[=] call[name[list], parameter[call[name[self]][name[channel]].shape]] call[name[new_shape]][name[axis_index]] assign[=] constant[1] variable[channel] assign[=] call[name[self]][name[channel]] variable[axis_inp] assign[=] name[axis] variable[axis] assign[=] call[name[self].axes][name[index]] variable[x] assign[=] call[name[axis]][<ast.Slice object at 0x7da1b0b7f430>] if call[name[np].any, parameter[call[name[np].isnan, parameter[name[x]]]]] begin[:] <ast.Raise object at 0x7da1b0b7e500> variable[y] assign[=] call[name[np].nan_to_num, parameter[call[name[channel]][<ast.Slice object at 0x7da1b0b7c0a0>]]] <ast.Try object at 0x7da1b0b7cb50> variable[multiplier] assign[=] constant[1] if compare[constant[0] in name[moments]] begin[:] variable[multiplier] assign[=] call[name[np].sign, parameter[call[name[np].sum, parameter[call[name[np].diff, parameter[name[x]]]]]]] for taget[name[moment]] in starred[name[moments]] begin[:] variable[about] assign[=] constant[0] variable[norm] assign[=] constant[1] if compare[name[moment] greater[>] constant[0]] begin[:] variable[norm] assign[=] call[name[np].trapz, parameter[name[y], name[x]]] variable[norm] assign[=] call[name[np].array, parameter[name[norm]]] name[norm].shape assign[=] name[new_shape] if compare[name[moment] greater[>] constant[1]] begin[:] variable[about] assign[=] call[name[np].trapz, parameter[binary_operation[name[x] * name[y]], name[x]]] variable[about] assign[=] call[name[np].array, parameter[name[about]]] name[about].shape assign[=] name[new_shape] <ast.AugAssign object at 0x7da204345d20> if compare[name[moment] greater[>] constant[2]] begin[:] variable[sigma] assign[=] call[name[np].trapz, parameter[binary_operation[binary_operation[binary_operation[name[x] - name[about]] ** constant[2]] * name[y]], name[x]]] variable[sigma] assign[=] call[name[np].array, parameter[name[sigma]]] name[sigma].shape assign[=] name[new_shape] <ast.AugAssign object at 0x7da2043465f0> <ast.AugAssign object at 0x7da204346980> <ast.AugAssign object at 0x7da204346da0> variable[values] assign[=] call[name[np].trapz, parameter[binary_operation[binary_operation[binary_operation[name[x] - name[about]] ** name[moment]] * name[y]], name[x]]] variable[values] assign[=] call[name[np].array, parameter[name[values]]] name[values].shape assign[=] name[new_shape] <ast.AugAssign object at 0x7da2043472b0> if compare[name[moment] equal[==] constant[0]] begin[:] <ast.AugAssign object at 0x7da2043446a0> call[name[self].create_channel, parameter[call[constant[{}_{}_{}_{}].format, parameter[name[channel].natural_name, name[axis_inp], constant[moment], name[moment]]]]]
keyword[def] identifier[moment] ( identifier[self] , identifier[axis] , identifier[channel] = literal[int] , identifier[moment] = literal[int] ,*, identifier[resultant] = keyword[None] ): literal[string] identifier[axis_index] = keyword[None] keyword[if] identifier[resultant] keyword[is] keyword[not] keyword[None] : keyword[for] identifier[i] ,( identifier[s] , identifier[r] ) keyword[in] identifier[enumerate] ( identifier[zip] ( identifier[self] . identifier[shape] , identifier[resultant] )): keyword[if] identifier[s] != identifier[r] keyword[and] identifier[r] == literal[int] keyword[and] identifier[axis_index] keyword[is] keyword[None] : identifier[axis_index] = identifier[i] keyword[elif] identifier[s] == identifier[r] : keyword[continue] keyword[else] : keyword[raise] identifier[wt_exceptions] . identifier[ValueError] ( literal[string] + literal[string] ) identifier[index] = identifier[wt_kit] . identifier[get_index] ( identifier[self] . identifier[axis_names] , identifier[axis] ) keyword[if] identifier[axis_index] keyword[is] keyword[None] : identifier[axes] =[ identifier[i] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[self] . identifier[ndim] ) keyword[if] identifier[self] . identifier[axes] [ identifier[index] ]. identifier[shape] [ identifier[i] ]> literal[int] ] keyword[if] identifier[len] ( identifier[axes] )> literal[int] : keyword[raise] identifier[wt_exceptions] . identifier[MultidimensionalAxisError] ( identifier[axis] , literal[string] ) keyword[elif] identifier[len] ( identifier[axes] )== literal[int] : keyword[raise] identifier[wt_exceptions] . identifier[ValueError] ( literal[string] . identifier[format] ( identifier[axis] ) ) identifier[axis_index] = identifier[axes] [ literal[int] ] identifier[warnings] . identifier[warn] ( literal[string] , identifier[category] = identifier[wt_exceptions] . identifier[EntireDatasetInMemoryWarning] ) identifier[channel_index] = identifier[wt_kit] . identifier[get_index] ( identifier[self] . identifier[channel_names] , identifier[channel] ) identifier[channel] = identifier[self] . identifier[channel_names] [ identifier[channel_index] ] keyword[if] identifier[self] [ identifier[channel] ]. identifier[shape] [ identifier[axis_index] ]== literal[int] : keyword[raise] identifier[wt_exceptions] . identifier[ValueError] ( literal[string] . identifier[format] ( identifier[channel] , identifier[axis] ) ) identifier[new_shape] = identifier[list] ( identifier[self] [ identifier[channel] ]. identifier[shape] ) identifier[new_shape] [ identifier[axis_index] ]= literal[int] identifier[channel] = identifier[self] [ identifier[channel] ] identifier[axis_inp] = identifier[axis] identifier[axis] = identifier[self] . identifier[axes] [ identifier[index] ] identifier[x] = identifier[axis] [:] keyword[if] identifier[np] . identifier[any] ( identifier[np] . identifier[isnan] ( identifier[x] )): keyword[raise] identifier[wt_exceptions] . identifier[ValueError] ( literal[string] . identifier[format] ( identifier[axis_inp] )) identifier[y] = identifier[np] . identifier[nan_to_num] ( identifier[channel] [:]) keyword[try] : identifier[moments] = identifier[tuple] ( identifier[moment] ) keyword[except] identifier[TypeError] : identifier[moments] =( identifier[moment] ,) identifier[multiplier] = literal[int] keyword[if] literal[int] keyword[in] identifier[moments] : identifier[multiplier] = identifier[np] . identifier[sign] ( identifier[np] . identifier[sum] ( identifier[np] . identifier[diff] ( identifier[x] , identifier[axis] = identifier[axis_index] ), identifier[axis] = identifier[axis_index] , identifier[keepdims] = keyword[True] ) ) keyword[for] identifier[moment] keyword[in] identifier[moments] : identifier[about] = literal[int] identifier[norm] = literal[int] keyword[if] identifier[moment] > literal[int] : identifier[norm] = identifier[np] . identifier[trapz] ( identifier[y] , identifier[x] , identifier[axis] = identifier[axis_index] ) identifier[norm] = identifier[np] . identifier[array] ( identifier[norm] ) identifier[norm] . identifier[shape] = identifier[new_shape] keyword[if] identifier[moment] > literal[int] : identifier[about] = identifier[np] . identifier[trapz] ( identifier[x] * identifier[y] , identifier[x] , identifier[axis] = identifier[axis_index] ) identifier[about] = identifier[np] . identifier[array] ( identifier[about] ) identifier[about] . identifier[shape] = identifier[new_shape] identifier[about] /= identifier[norm] keyword[if] identifier[moment] > literal[int] : identifier[sigma] = identifier[np] . identifier[trapz] (( identifier[x] - identifier[about] )** literal[int] * identifier[y] , identifier[x] , identifier[axis] = identifier[axis_index] ) identifier[sigma] = identifier[np] . identifier[array] ( identifier[sigma] ) identifier[sigma] . identifier[shape] = identifier[new_shape] identifier[sigma] /= identifier[norm] identifier[sigma] **= literal[int] identifier[norm] *= identifier[sigma] ** identifier[moment] identifier[values] = identifier[np] . identifier[trapz] (( identifier[x] - identifier[about] )** identifier[moment] * identifier[y] , identifier[x] , identifier[axis] = identifier[axis_index] ) identifier[values] = identifier[np] . identifier[array] ( identifier[values] ) identifier[values] . identifier[shape] = identifier[new_shape] identifier[values] /= identifier[norm] keyword[if] identifier[moment] == literal[int] : identifier[values] *= identifier[multiplier] identifier[self] . identifier[create_channel] ( literal[string] . identifier[format] ( identifier[channel] . identifier[natural_name] , identifier[axis_inp] , literal[string] , identifier[moment] ), identifier[values] = identifier[values] , )
def moment(self, axis, channel=0, moment=1, *, resultant=None): """Take the nth moment the dataset along one axis, adding lower rank channels. New channels have names ``<channel name>_<axis name>_moment_<moment num>``. Moment 0 is the integral of the slice. Moment 1 is the weighted average or "Center of Mass", normalized by the integral Moment 2 is the variance, the central moment about the center of mass, normalized by the integral Moments 3+ are central moments about the center of mass, normalized by the integral and by the standard deviation to the power of the moment. Moments, especially higher order moments, are susceptible to noise and baseline. It is recommended when used with real data to use :meth:`WrightTools.data.Channel.clip` in conjunction with moments to reduce effects of noise. Parameters ---------- axis : int or str The axis to take the moment along. If given as an integer, the axis with that index is used. If given as a string, the axis with that name is used. The axis must exist, and be a 1D array-aligned axis. (i.e. have a shape with a single value which is not ``1``) The collapsed axis must be monotonic to produce correct results. The axis to collapse along is inferred from the shape of the axis. channel : int or str The channel to take the moment. If given as an integer, the channel with that index is used. If given as a string, the channel with that name is used. The channel must have values along the axis (i.e. its shape must not be ``1`` in the dimension for which the axis is not ``1``) Default is 0, the first channel. moment : int or tuple of int The moments to take. One channel will be created for each number given. Default is 1, the center of mass. resultant : tuple of int The resultant shape after the moment operation. By default, it is intuited by the axis along which the moment is being taken. This default only works if that axis is 1D, so resultant is required if a multidimensional axis is passed as the first argument. The requirement of monotonicity applies on a per pixel basis. See Also -------- collapse Reduce dimensionality by some mathematical operation clip Set values above/below a threshold to a particular value WrightTools.kit.joint_shape Useful for setting `resultant` kwarg based off of axes not collapsed. """ # get axis index -------------------------------------------------------------------------- axis_index = None if resultant is not None: for (i, (s, r)) in enumerate(zip(self.shape, resultant)): if s != r and r == 1 and (axis_index is None): axis_index = i # depends on [control=['if'], data=[]] elif s == r: continue # depends on [control=['if'], data=[]] else: raise wt_exceptions.ValueError(f"Invalid resultant shape '{resultant}' for shape {self.shape}. " + 'Consider using `wt.kit.joint_shape` to join non-collapsed axes.') # depends on [control=['for'], data=[]] # depends on [control=['if'], data=['resultant']] index = wt_kit.get_index(self.axis_names, axis) if axis_index is None: axes = [i for i in range(self.ndim) if self.axes[index].shape[i] > 1] if len(axes) > 1: raise wt_exceptions.MultidimensionalAxisError(axis, 'moment') # depends on [control=['if'], data=[]] elif len(axes) == 0: raise wt_exceptions.ValueError('Axis {} is a single point, cannot compute moment'.format(axis)) # depends on [control=['if'], data=[]] axis_index = axes[0] # depends on [control=['if'], data=['axis_index']] warnings.warn('moment', category=wt_exceptions.EntireDatasetInMemoryWarning) channel_index = wt_kit.get_index(self.channel_names, channel) channel = self.channel_names[channel_index] if self[channel].shape[axis_index] == 1: raise wt_exceptions.ValueError("Channel '{}' has a single point along Axis '{}', cannot compute moment".format(channel, axis)) # depends on [control=['if'], data=[]] new_shape = list(self[channel].shape) new_shape[axis_index] = 1 channel = self[channel] axis_inp = axis axis = self.axes[index] x = axis[:] if np.any(np.isnan(x)): raise wt_exceptions.ValueError("Axis '{}' includes NaN".format(axis_inp)) # depends on [control=['if'], data=[]] y = np.nan_to_num(channel[:]) try: moments = tuple(moment) # depends on [control=['try'], data=[]] except TypeError: moments = (moment,) # depends on [control=['except'], data=[]] multiplier = 1 if 0 in moments: # May be possible to optimize, probably doesn't need the sum # only matters for integral, all others normalize by integral multiplier = np.sign(np.sum(np.diff(x, axis=axis_index), axis=axis_index, keepdims=True)) # depends on [control=['if'], data=[]] for moment in moments: about = 0 norm = 1 if moment > 0: norm = np.trapz(y, x, axis=axis_index) norm = np.array(norm) norm.shape = new_shape # depends on [control=['if'], data=[]] if moment > 1: about = np.trapz(x * y, x, axis=axis_index) about = np.array(about) about.shape = new_shape about /= norm # depends on [control=['if'], data=[]] if moment > 2: sigma = np.trapz((x - about) ** 2 * y, x, axis=axis_index) sigma = np.array(sigma) sigma.shape = new_shape sigma /= norm sigma **= 0.5 norm *= sigma ** moment # depends on [control=['if'], data=['moment']] values = np.trapz((x - about) ** moment * y, x, axis=axis_index) values = np.array(values) values.shape = new_shape values /= norm if moment == 0: values *= multiplier # depends on [control=['if'], data=[]] self.create_channel('{}_{}_{}_{}'.format(channel.natural_name, axis_inp, 'moment', moment), values=values) # depends on [control=['for'], data=['moment']]
def _handle_posix(self, i, result, end_range): """Handle posix classes.""" last_posix = False m = i.match(RE_POSIX) if m: last_posix = True # Cannot do range with posix class # so escape last `-` if we think this # is the end of a range. if end_range and i.index - 1 >= end_range: result[-1] = '\\' + result[-1] posix_type = uniprops.POSIX_BYTES if self.is_bytes else uniprops.POSIX result.append(uniprops.get_posix_property(m.group(1), posix_type)) return last_posix
def function[_handle_posix, parameter[self, i, result, end_range]]: constant[Handle posix classes.] variable[last_posix] assign[=] constant[False] variable[m] assign[=] call[name[i].match, parameter[name[RE_POSIX]]] if name[m] begin[:] variable[last_posix] assign[=] constant[True] if <ast.BoolOp object at 0x7da18ede61d0> begin[:] call[name[result]][<ast.UnaryOp object at 0x7da18ede70a0>] assign[=] binary_operation[constant[\] + call[name[result]][<ast.UnaryOp object at 0x7da18ede6950>]] variable[posix_type] assign[=] <ast.IfExp object at 0x7da18ede6ec0> call[name[result].append, parameter[call[name[uniprops].get_posix_property, parameter[call[name[m].group, parameter[constant[1]]], name[posix_type]]]]] return[name[last_posix]]
keyword[def] identifier[_handle_posix] ( identifier[self] , identifier[i] , identifier[result] , identifier[end_range] ): literal[string] identifier[last_posix] = keyword[False] identifier[m] = identifier[i] . identifier[match] ( identifier[RE_POSIX] ) keyword[if] identifier[m] : identifier[last_posix] = keyword[True] keyword[if] identifier[end_range] keyword[and] identifier[i] . identifier[index] - literal[int] >= identifier[end_range] : identifier[result] [- literal[int] ]= literal[string] + identifier[result] [- literal[int] ] identifier[posix_type] = identifier[uniprops] . identifier[POSIX_BYTES] keyword[if] identifier[self] . identifier[is_bytes] keyword[else] identifier[uniprops] . identifier[POSIX] identifier[result] . identifier[append] ( identifier[uniprops] . identifier[get_posix_property] ( identifier[m] . identifier[group] ( literal[int] ), identifier[posix_type] )) keyword[return] identifier[last_posix]
def _handle_posix(self, i, result, end_range): """Handle posix classes.""" last_posix = False m = i.match(RE_POSIX) if m: last_posix = True # Cannot do range with posix class # so escape last `-` if we think this # is the end of a range. if end_range and i.index - 1 >= end_range: result[-1] = '\\' + result[-1] # depends on [control=['if'], data=[]] posix_type = uniprops.POSIX_BYTES if self.is_bytes else uniprops.POSIX result.append(uniprops.get_posix_property(m.group(1), posix_type)) # depends on [control=['if'], data=[]] return last_posix
def require_component_access(view_func, component): """Perform component can_access check to access the view. :param component containing the view (panel or dashboard). Raises a :exc:`~horizon.exceptions.NotAuthorized` exception if the user cannot access the component containing the view. By example the check of component policy rules will be applied to its views. """ from horizon.exceptions import NotAuthorized @functools.wraps(view_func, assigned=available_attrs(view_func)) def dec(request, *args, **kwargs): if not component.can_access({'request': request}): raise NotAuthorized(_("You are not authorized to access %s") % request.path) return view_func(request, *args, **kwargs) return dec
def function[require_component_access, parameter[view_func, component]]: constant[Perform component can_access check to access the view. :param component containing the view (panel or dashboard). Raises a :exc:`~horizon.exceptions.NotAuthorized` exception if the user cannot access the component containing the view. By example the check of component policy rules will be applied to its views. ] from relative_module[horizon.exceptions] import module[NotAuthorized] def function[dec, parameter[request]]: if <ast.UnaryOp object at 0x7da1b1987f70> begin[:] <ast.Raise object at 0x7da1b1985b10> return[call[name[view_func], parameter[name[request], <ast.Starred object at 0x7da1b1986500>]]] return[name[dec]]
keyword[def] identifier[require_component_access] ( identifier[view_func] , identifier[component] ): literal[string] keyword[from] identifier[horizon] . identifier[exceptions] keyword[import] identifier[NotAuthorized] @ identifier[functools] . identifier[wraps] ( identifier[view_func] , identifier[assigned] = identifier[available_attrs] ( identifier[view_func] )) keyword[def] identifier[dec] ( identifier[request] ,* identifier[args] ,** identifier[kwargs] ): keyword[if] keyword[not] identifier[component] . identifier[can_access] ({ literal[string] : identifier[request] }): keyword[raise] identifier[NotAuthorized] ( identifier[_] ( literal[string] ) % identifier[request] . identifier[path] ) keyword[return] identifier[view_func] ( identifier[request] ,* identifier[args] ,** identifier[kwargs] ) keyword[return] identifier[dec]
def require_component_access(view_func, component): """Perform component can_access check to access the view. :param component containing the view (panel or dashboard). Raises a :exc:`~horizon.exceptions.NotAuthorized` exception if the user cannot access the component containing the view. By example the check of component policy rules will be applied to its views. """ from horizon.exceptions import NotAuthorized @functools.wraps(view_func, assigned=available_attrs(view_func)) def dec(request, *args, **kwargs): if not component.can_access({'request': request}): raise NotAuthorized(_('You are not authorized to access %s') % request.path) # depends on [control=['if'], data=[]] return view_func(request, *args, **kwargs) return dec
def show_details(item_data: Dict[Any, Any]) -> str: """Format catalog item output Parameters: item_data: item's attributes values Returns: [rich_message]: list of formatted rich message """ txt = "" for key, value in item_data.items(): txt += "**" + str(key) + "**" + ': ' + str(value) + " \n" return txt
def function[show_details, parameter[item_data]]: constant[Format catalog item output Parameters: item_data: item's attributes values Returns: [rich_message]: list of formatted rich message ] variable[txt] assign[=] constant[] for taget[tuple[[<ast.Name object at 0x7da20e9b0bb0>, <ast.Name object at 0x7da20e9b3550>]]] in starred[call[name[item_data].items, parameter[]]] begin[:] <ast.AugAssign object at 0x7da20e9b3f10> return[name[txt]]
keyword[def] identifier[show_details] ( identifier[item_data] : identifier[Dict] [ identifier[Any] , identifier[Any] ])-> identifier[str] : literal[string] identifier[txt] = literal[string] keyword[for] identifier[key] , identifier[value] keyword[in] identifier[item_data] . identifier[items] (): identifier[txt] += literal[string] + identifier[str] ( identifier[key] )+ literal[string] + literal[string] + identifier[str] ( identifier[value] )+ literal[string] keyword[return] identifier[txt]
def show_details(item_data: Dict[Any, Any]) -> str: """Format catalog item output Parameters: item_data: item's attributes values Returns: [rich_message]: list of formatted rich message """ txt = '' for (key, value) in item_data.items(): txt += '**' + str(key) + '**' + ': ' + str(value) + ' \n' # depends on [control=['for'], data=[]] return txt
def jsonresolver_loader(url_map): """Resolve the OpenAIRE grant.""" from flask import current_app url_map.add(Rule( '/grants/10.13039/<path:doi_grant_code>', endpoint=resolve_grant_endpoint, host=current_app.config['OPENAIRE_JSONRESOLVER_GRANTS_HOST']))
def function[jsonresolver_loader, parameter[url_map]]: constant[Resolve the OpenAIRE grant.] from relative_module[flask] import module[current_app] call[name[url_map].add, parameter[call[name[Rule], parameter[constant[/grants/10.13039/<path:doi_grant_code>]]]]]
keyword[def] identifier[jsonresolver_loader] ( identifier[url_map] ): literal[string] keyword[from] identifier[flask] keyword[import] identifier[current_app] identifier[url_map] . identifier[add] ( identifier[Rule] ( literal[string] , identifier[endpoint] = identifier[resolve_grant_endpoint] , identifier[host] = identifier[current_app] . identifier[config] [ literal[string] ]))
def jsonresolver_loader(url_map): """Resolve the OpenAIRE grant.""" from flask import current_app url_map.add(Rule('/grants/10.13039/<path:doi_grant_code>', endpoint=resolve_grant_endpoint, host=current_app.config['OPENAIRE_JSONRESOLVER_GRANTS_HOST']))
def tryload_cache_list(dpath, fname, cfgstr_list, verbose=False): """ loads a list of similar cached datas. Returns flags that needs to be computed """ data_list = [tryload_cache(dpath, fname, cfgstr, verbose) for cfgstr in cfgstr_list] ismiss_list = [data is None for data in data_list] return data_list, ismiss_list
def function[tryload_cache_list, parameter[dpath, fname, cfgstr_list, verbose]]: constant[ loads a list of similar cached datas. Returns flags that needs to be computed ] variable[data_list] assign[=] <ast.ListComp object at 0x7da1b24e5060> variable[ismiss_list] assign[=] <ast.ListComp object at 0x7da1b24befe0> return[tuple[[<ast.Name object at 0x7da1b24bd750>, <ast.Name object at 0x7da1b24bd060>]]]
keyword[def] identifier[tryload_cache_list] ( identifier[dpath] , identifier[fname] , identifier[cfgstr_list] , identifier[verbose] = keyword[False] ): literal[string] identifier[data_list] =[ identifier[tryload_cache] ( identifier[dpath] , identifier[fname] , identifier[cfgstr] , identifier[verbose] ) keyword[for] identifier[cfgstr] keyword[in] identifier[cfgstr_list] ] identifier[ismiss_list] =[ identifier[data] keyword[is] keyword[None] keyword[for] identifier[data] keyword[in] identifier[data_list] ] keyword[return] identifier[data_list] , identifier[ismiss_list]
def tryload_cache_list(dpath, fname, cfgstr_list, verbose=False): """ loads a list of similar cached datas. Returns flags that needs to be computed """ data_list = [tryload_cache(dpath, fname, cfgstr, verbose) for cfgstr in cfgstr_list] ismiss_list = [data is None for data in data_list] return (data_list, ismiss_list)
def set_interval(self, interval): """Set the polling interval for the process thread. :param interval: How often to poll the Hottop :type interval: int or float :returns: None :raises: InvalidInput """ if type(interval) != float or type(interval) != int: raise InvalidInput("Interval value must be of float or int") self._config['interval']
def function[set_interval, parameter[self, interval]]: constant[Set the polling interval for the process thread. :param interval: How often to poll the Hottop :type interval: int or float :returns: None :raises: InvalidInput ] if <ast.BoolOp object at 0x7da204565ea0> begin[:] <ast.Raise object at 0x7da204565180> call[name[self]._config][constant[interval]]
keyword[def] identifier[set_interval] ( identifier[self] , identifier[interval] ): literal[string] keyword[if] identifier[type] ( identifier[interval] )!= identifier[float] keyword[or] identifier[type] ( identifier[interval] )!= identifier[int] : keyword[raise] identifier[InvalidInput] ( literal[string] ) identifier[self] . identifier[_config] [ literal[string] ]
def set_interval(self, interval): """Set the polling interval for the process thread. :param interval: How often to poll the Hottop :type interval: int or float :returns: None :raises: InvalidInput """ if type(interval) != float or type(interval) != int: raise InvalidInput('Interval value must be of float or int') # depends on [control=['if'], data=[]] self._config['interval']
def fetch(self, **kwargs) -> 'FetchContextManager': ''' Sends the request to the server and reads the response. You may use this method either with plain synchronous Session or AsyncSession. Both the followings patterns are valid: .. code-block:: python3 from ai.backend.client.request import Request from ai.backend.client.session import Session with Session() as sess: rqst = Request(sess, 'GET', ...) with rqst.fetch() as resp: print(resp.text()) .. code-block:: python3 from ai.backend.client.request import Request from ai.backend.client.session import AsyncSession async with AsyncSession() as sess: rqst = Request(sess, 'GET', ...) async with rqst.fetch() as resp: print(await resp.text()) ''' assert self.method in self._allowed_methods, \ 'Disallowed HTTP method: {}'.format(self.method) self.date = datetime.now(tzutc()) self.headers['Date'] = self.date.isoformat() if self.content_type is not None: self.headers['Content-Type'] = self.content_type full_url = self._build_url() self._sign(full_url.relative()) rqst_ctx = self.session.aiohttp_session.request( self.method, str(full_url), data=self._pack_content(), timeout=_default_request_timeout, headers=self.headers) return FetchContextManager(self.session, rqst_ctx, **kwargs)
def function[fetch, parameter[self]]: constant[ Sends the request to the server and reads the response. You may use this method either with plain synchronous Session or AsyncSession. Both the followings patterns are valid: .. code-block:: python3 from ai.backend.client.request import Request from ai.backend.client.session import Session with Session() as sess: rqst = Request(sess, 'GET', ...) with rqst.fetch() as resp: print(resp.text()) .. code-block:: python3 from ai.backend.client.request import Request from ai.backend.client.session import AsyncSession async with AsyncSession() as sess: rqst = Request(sess, 'GET', ...) async with rqst.fetch() as resp: print(await resp.text()) ] assert[compare[name[self].method in name[self]._allowed_methods]] name[self].date assign[=] call[name[datetime].now, parameter[call[name[tzutc], parameter[]]]] call[name[self].headers][constant[Date]] assign[=] call[name[self].date.isoformat, parameter[]] if compare[name[self].content_type is_not constant[None]] begin[:] call[name[self].headers][constant[Content-Type]] assign[=] name[self].content_type variable[full_url] assign[=] call[name[self]._build_url, parameter[]] call[name[self]._sign, parameter[call[name[full_url].relative, parameter[]]]] variable[rqst_ctx] assign[=] call[name[self].session.aiohttp_session.request, parameter[name[self].method, call[name[str], parameter[name[full_url]]]]] return[call[name[FetchContextManager], parameter[name[self].session, name[rqst_ctx]]]]
keyword[def] identifier[fetch] ( identifier[self] ,** identifier[kwargs] )-> literal[string] : literal[string] keyword[assert] identifier[self] . identifier[method] keyword[in] identifier[self] . identifier[_allowed_methods] , literal[string] . identifier[format] ( identifier[self] . identifier[method] ) identifier[self] . identifier[date] = identifier[datetime] . identifier[now] ( identifier[tzutc] ()) identifier[self] . identifier[headers] [ literal[string] ]= identifier[self] . identifier[date] . identifier[isoformat] () keyword[if] identifier[self] . identifier[content_type] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[headers] [ literal[string] ]= identifier[self] . identifier[content_type] identifier[full_url] = identifier[self] . identifier[_build_url] () identifier[self] . identifier[_sign] ( identifier[full_url] . identifier[relative] ()) identifier[rqst_ctx] = identifier[self] . identifier[session] . identifier[aiohttp_session] . identifier[request] ( identifier[self] . identifier[method] , identifier[str] ( identifier[full_url] ), identifier[data] = identifier[self] . identifier[_pack_content] (), identifier[timeout] = identifier[_default_request_timeout] , identifier[headers] = identifier[self] . identifier[headers] ) keyword[return] identifier[FetchContextManager] ( identifier[self] . identifier[session] , identifier[rqst_ctx] ,** identifier[kwargs] )
def fetch(self, **kwargs) -> 'FetchContextManager': """ Sends the request to the server and reads the response. You may use this method either with plain synchronous Session or AsyncSession. Both the followings patterns are valid: .. code-block:: python3 from ai.backend.client.request import Request from ai.backend.client.session import Session with Session() as sess: rqst = Request(sess, 'GET', ...) with rqst.fetch() as resp: print(resp.text()) .. code-block:: python3 from ai.backend.client.request import Request from ai.backend.client.session import AsyncSession async with AsyncSession() as sess: rqst = Request(sess, 'GET', ...) async with rqst.fetch() as resp: print(await resp.text()) """ assert self.method in self._allowed_methods, 'Disallowed HTTP method: {}'.format(self.method) self.date = datetime.now(tzutc()) self.headers['Date'] = self.date.isoformat() if self.content_type is not None: self.headers['Content-Type'] = self.content_type # depends on [control=['if'], data=[]] full_url = self._build_url() self._sign(full_url.relative()) rqst_ctx = self.session.aiohttp_session.request(self.method, str(full_url), data=self._pack_content(), timeout=_default_request_timeout, headers=self.headers) return FetchContextManager(self.session, rqst_ctx, **kwargs)
def dpar(self, cl=1): """Return dpar-style executable assignment for parameter Default is to write CL version of code; if cl parameter is false, writes Python executable code instead. Note that dpar doesn't even work for arrays in the CL, so we just use Python syntax here. """ sval = list(map(self.toString, self.value, len(self.value)*[1])) for i in range(len(sval)): if sval[i] == "": sval[i] = "None" s = "%s = [%s]" % (self.name, ', '.join(sval)) return s
def function[dpar, parameter[self, cl]]: constant[Return dpar-style executable assignment for parameter Default is to write CL version of code; if cl parameter is false, writes Python executable code instead. Note that dpar doesn't even work for arrays in the CL, so we just use Python syntax here. ] variable[sval] assign[=] call[name[list], parameter[call[name[map], parameter[name[self].toString, name[self].value, binary_operation[call[name[len], parameter[name[self].value]] * list[[<ast.Constant object at 0x7da1b0faed40>]]]]]]] for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[sval]]]]]] begin[:] if compare[call[name[sval]][name[i]] equal[==] constant[]] begin[:] call[name[sval]][name[i]] assign[=] constant[None] variable[s] assign[=] binary_operation[constant[%s = [%s]] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b0faecb0>, <ast.Call object at 0x7da1b0fac5e0>]]] return[name[s]]
keyword[def] identifier[dpar] ( identifier[self] , identifier[cl] = literal[int] ): literal[string] identifier[sval] = identifier[list] ( identifier[map] ( identifier[self] . identifier[toString] , identifier[self] . identifier[value] , identifier[len] ( identifier[self] . identifier[value] )*[ literal[int] ])) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[sval] )): keyword[if] identifier[sval] [ identifier[i] ]== literal[string] : identifier[sval] [ identifier[i] ]= literal[string] identifier[s] = literal[string] %( identifier[self] . identifier[name] , literal[string] . identifier[join] ( identifier[sval] )) keyword[return] identifier[s]
def dpar(self, cl=1): """Return dpar-style executable assignment for parameter Default is to write CL version of code; if cl parameter is false, writes Python executable code instead. Note that dpar doesn't even work for arrays in the CL, so we just use Python syntax here. """ sval = list(map(self.toString, self.value, len(self.value) * [1])) for i in range(len(sval)): if sval[i] == '': sval[i] = 'None' # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] s = '%s = [%s]' % (self.name, ', '.join(sval)) return s
def table(self, data, header=None): """Example:: +----------+------------+ | CityName | Population | +----------+------------+ | Adelaide | 1158259 | +----------+------------+ | Darwin | 120900 | +----------+------------+ """ if header: x = PrettyTable(header) else: x = PrettyTable() # construct ascii text table, split header and body for row in data: x.add_row(row) s = x.get_string(border=True) lines = s.split("\n") header_ = lines[:2] body = lines[2:] n_body = len(body) ruler = body[0] # add more rulers between each rows new_body = list() counter = 0 for line in body: counter += 1 new_body.append(line) if (2 <= counter) and (counter < (n_body - 1)): new_body.append(ruler) if header: return "\n".join(header_ + new_body) else: return "\n".join(new_body)
def function[table, parameter[self, data, header]]: constant[Example:: +----------+------------+ | CityName | Population | +----------+------------+ | Adelaide | 1158259 | +----------+------------+ | Darwin | 120900 | +----------+------------+ ] if name[header] begin[:] variable[x] assign[=] call[name[PrettyTable], parameter[name[header]]] for taget[name[row]] in starred[name[data]] begin[:] call[name[x].add_row, parameter[name[row]]] variable[s] assign[=] call[name[x].get_string, parameter[]] variable[lines] assign[=] call[name[s].split, parameter[constant[ ]]] variable[header_] assign[=] call[name[lines]][<ast.Slice object at 0x7da1b1629930>] variable[body] assign[=] call[name[lines]][<ast.Slice object at 0x7da1b162ad10>] variable[n_body] assign[=] call[name[len], parameter[name[body]]] variable[ruler] assign[=] call[name[body]][constant[0]] variable[new_body] assign[=] call[name[list], parameter[]] variable[counter] assign[=] constant[0] for taget[name[line]] in starred[name[body]] begin[:] <ast.AugAssign object at 0x7da1b162a560> call[name[new_body].append, parameter[name[line]]] if <ast.BoolOp object at 0x7da1b162a1a0> begin[:] call[name[new_body].append, parameter[name[ruler]]] if name[header] begin[:] return[call[constant[ ].join, parameter[binary_operation[name[header_] + name[new_body]]]]]
keyword[def] identifier[table] ( identifier[self] , identifier[data] , identifier[header] = keyword[None] ): literal[string] keyword[if] identifier[header] : identifier[x] = identifier[PrettyTable] ( identifier[header] ) keyword[else] : identifier[x] = identifier[PrettyTable] () keyword[for] identifier[row] keyword[in] identifier[data] : identifier[x] . identifier[add_row] ( identifier[row] ) identifier[s] = identifier[x] . identifier[get_string] ( identifier[border] = keyword[True] ) identifier[lines] = identifier[s] . identifier[split] ( literal[string] ) identifier[header_] = identifier[lines] [: literal[int] ] identifier[body] = identifier[lines] [ literal[int] :] identifier[n_body] = identifier[len] ( identifier[body] ) identifier[ruler] = identifier[body] [ literal[int] ] identifier[new_body] = identifier[list] () identifier[counter] = literal[int] keyword[for] identifier[line] keyword[in] identifier[body] : identifier[counter] += literal[int] identifier[new_body] . identifier[append] ( identifier[line] ) keyword[if] ( literal[int] <= identifier[counter] ) keyword[and] ( identifier[counter] <( identifier[n_body] - literal[int] )): identifier[new_body] . identifier[append] ( identifier[ruler] ) keyword[if] identifier[header] : keyword[return] literal[string] . identifier[join] ( identifier[header_] + identifier[new_body] ) keyword[else] : keyword[return] literal[string] . identifier[join] ( identifier[new_body] )
def table(self, data, header=None): """Example:: +----------+------------+ | CityName | Population | +----------+------------+ | Adelaide | 1158259 | +----------+------------+ | Darwin | 120900 | +----------+------------+ """ if header: x = PrettyTable(header) # depends on [control=['if'], data=[]] else: x = PrettyTable() # construct ascii text table, split header and body for row in data: x.add_row(row) # depends on [control=['for'], data=['row']] s = x.get_string(border=True) lines = s.split('\n') header_ = lines[:2] body = lines[2:] n_body = len(body) ruler = body[0] # add more rulers between each rows new_body = list() counter = 0 for line in body: counter += 1 new_body.append(line) if 2 <= counter and counter < n_body - 1: new_body.append(ruler) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] if header: return '\n'.join(header_ + new_body) # depends on [control=['if'], data=[]] else: return '\n'.join(new_body)
def parse(self, data): """ Converts a CNML structure to a NetworkX Graph object which is then returned. """ graph = self._init_graph() # loop over links and create networkx graph # Add only working nodes with working links for link in data.get_inner_links(): if link.status != libcnml.libcnml.Status.WORKING: continue interface_a, interface_b = link.getLinkedInterfaces() source = interface_a.ipv4 dest = interface_b.ipv4 # add link to Graph graph.add_edge(source, dest, weight=1) return graph
def function[parse, parameter[self, data]]: constant[ Converts a CNML structure to a NetworkX Graph object which is then returned. ] variable[graph] assign[=] call[name[self]._init_graph, parameter[]] for taget[name[link]] in starred[call[name[data].get_inner_links, parameter[]]] begin[:] if compare[name[link].status not_equal[!=] name[libcnml].libcnml.Status.WORKING] begin[:] continue <ast.Tuple object at 0x7da18c4cecb0> assign[=] call[name[link].getLinkedInterfaces, parameter[]] variable[source] assign[=] name[interface_a].ipv4 variable[dest] assign[=] name[interface_b].ipv4 call[name[graph].add_edge, parameter[name[source], name[dest]]] return[name[graph]]
keyword[def] identifier[parse] ( identifier[self] , identifier[data] ): literal[string] identifier[graph] = identifier[self] . identifier[_init_graph] () keyword[for] identifier[link] keyword[in] identifier[data] . identifier[get_inner_links] (): keyword[if] identifier[link] . identifier[status] != identifier[libcnml] . identifier[libcnml] . identifier[Status] . identifier[WORKING] : keyword[continue] identifier[interface_a] , identifier[interface_b] = identifier[link] . identifier[getLinkedInterfaces] () identifier[source] = identifier[interface_a] . identifier[ipv4] identifier[dest] = identifier[interface_b] . identifier[ipv4] identifier[graph] . identifier[add_edge] ( identifier[source] , identifier[dest] , identifier[weight] = literal[int] ) keyword[return] identifier[graph]
def parse(self, data): """ Converts a CNML structure to a NetworkX Graph object which is then returned. """ graph = self._init_graph() # loop over links and create networkx graph # Add only working nodes with working links for link in data.get_inner_links(): if link.status != libcnml.libcnml.Status.WORKING: continue # depends on [control=['if'], data=[]] (interface_a, interface_b) = link.getLinkedInterfaces() source = interface_a.ipv4 dest = interface_b.ipv4 # add link to Graph graph.add_edge(source, dest, weight=1) # depends on [control=['for'], data=['link']] return graph
def delete_event(self, calendar_id, event_id): """Delete an event from the specified calendar. :param string calendar_id: ID of calendar to delete from. :param string event_id: ID of event to delete. """ self.request_handler.delete(endpoint='calendars/%s/events' % calendar_id, data={'event_id': event_id})
def function[delete_event, parameter[self, calendar_id, event_id]]: constant[Delete an event from the specified calendar. :param string calendar_id: ID of calendar to delete from. :param string event_id: ID of event to delete. ] call[name[self].request_handler.delete, parameter[]]
keyword[def] identifier[delete_event] ( identifier[self] , identifier[calendar_id] , identifier[event_id] ): literal[string] identifier[self] . identifier[request_handler] . identifier[delete] ( identifier[endpoint] = literal[string] % identifier[calendar_id] , identifier[data] ={ literal[string] : identifier[event_id] })
def delete_event(self, calendar_id, event_id): """Delete an event from the specified calendar. :param string calendar_id: ID of calendar to delete from. :param string event_id: ID of event to delete. """ self.request_handler.delete(endpoint='calendars/%s/events' % calendar_id, data={'event_id': event_id})
def equilibrium_transition_matrix(Xi, omega, sigma, reversible=True, return_lcc=True): """ Compute equilibrium transition matrix from OOM components: Parameters ---------- Xi : ndarray(M, N, M) matrix of set-observable operators omega: ndarray(M,) information state vector of OOM sigma : ndarray(M,) evaluator of OOM reversible : bool, optional, default=True symmetrize corrected count matrix in order to obtain a reversible transition matrix. return_lcc: bool, optional, default=True return indices of largest connected set. Returns ------- Tt_Eq : ndarray(N, N) equilibrium transition matrix lcc : ndarray(M,) the largest connected set of the transition matrix. """ import msmtools.estimation as me # Compute equilibrium transition matrix: Ct_Eq = np.einsum('j,jkl,lmn,n->km', omega, Xi, Xi, sigma) # Remove negative entries: Ct_Eq[Ct_Eq < 0.0] = 0.0 # Compute transition matrix after symmetrization: pi_r = np.sum(Ct_Eq, axis=1) if reversible: pi_c = np.sum(Ct_Eq, axis=0) pi_sym = pi_r + pi_c # Avoid zero row-sums. States with zero row-sums will be eliminated by active set update. ind0 = np.where(pi_sym == 0.0)[0] pi_sym[ind0] = 1.0 Tt_Eq = (Ct_Eq + Ct_Eq.T) / pi_sym[:, None] else: # Avoid zero row-sums. States with zero row-sums will be eliminated by active set update. ind0 = np.where(pi_r == 0.0)[0] pi_r[ind0] = 1.0 Tt_Eq = Ct_Eq / pi_r[:, None] # Perform active set update: lcc = me.largest_connected_set(Tt_Eq) Tt_Eq = me.largest_connected_submatrix(Tt_Eq, lcc=lcc) if return_lcc: return Tt_Eq, lcc else: return Tt_Eq
def function[equilibrium_transition_matrix, parameter[Xi, omega, sigma, reversible, return_lcc]]: constant[ Compute equilibrium transition matrix from OOM components: Parameters ---------- Xi : ndarray(M, N, M) matrix of set-observable operators omega: ndarray(M,) information state vector of OOM sigma : ndarray(M,) evaluator of OOM reversible : bool, optional, default=True symmetrize corrected count matrix in order to obtain a reversible transition matrix. return_lcc: bool, optional, default=True return indices of largest connected set. Returns ------- Tt_Eq : ndarray(N, N) equilibrium transition matrix lcc : ndarray(M,) the largest connected set of the transition matrix. ] import module[msmtools.estimation] as alias[me] variable[Ct_Eq] assign[=] call[name[np].einsum, parameter[constant[j,jkl,lmn,n->km], name[omega], name[Xi], name[Xi], name[sigma]]] call[name[Ct_Eq]][compare[name[Ct_Eq] less[<] constant[0.0]]] assign[=] constant[0.0] variable[pi_r] assign[=] call[name[np].sum, parameter[name[Ct_Eq]]] if name[reversible] begin[:] variable[pi_c] assign[=] call[name[np].sum, parameter[name[Ct_Eq]]] variable[pi_sym] assign[=] binary_operation[name[pi_r] + name[pi_c]] variable[ind0] assign[=] call[call[name[np].where, parameter[compare[name[pi_sym] equal[==] constant[0.0]]]]][constant[0]] call[name[pi_sym]][name[ind0]] assign[=] constant[1.0] variable[Tt_Eq] assign[=] binary_operation[binary_operation[name[Ct_Eq] + name[Ct_Eq].T] / call[name[pi_sym]][tuple[[<ast.Slice object at 0x7da18bcc8c40>, <ast.Constant object at 0x7da18bcc8700>]]]] variable[lcc] assign[=] call[name[me].largest_connected_set, parameter[name[Tt_Eq]]] variable[Tt_Eq] assign[=] call[name[me].largest_connected_submatrix, parameter[name[Tt_Eq]]] if name[return_lcc] begin[:] return[tuple[[<ast.Name object at 0x7da20c6a8430>, <ast.Name object at 0x7da20c6a89a0>]]]
keyword[def] identifier[equilibrium_transition_matrix] ( identifier[Xi] , identifier[omega] , identifier[sigma] , identifier[reversible] = keyword[True] , identifier[return_lcc] = keyword[True] ): literal[string] keyword[import] identifier[msmtools] . identifier[estimation] keyword[as] identifier[me] identifier[Ct_Eq] = identifier[np] . identifier[einsum] ( literal[string] , identifier[omega] , identifier[Xi] , identifier[Xi] , identifier[sigma] ) identifier[Ct_Eq] [ identifier[Ct_Eq] < literal[int] ]= literal[int] identifier[pi_r] = identifier[np] . identifier[sum] ( identifier[Ct_Eq] , identifier[axis] = literal[int] ) keyword[if] identifier[reversible] : identifier[pi_c] = identifier[np] . identifier[sum] ( identifier[Ct_Eq] , identifier[axis] = literal[int] ) identifier[pi_sym] = identifier[pi_r] + identifier[pi_c] identifier[ind0] = identifier[np] . identifier[where] ( identifier[pi_sym] == literal[int] )[ literal[int] ] identifier[pi_sym] [ identifier[ind0] ]= literal[int] identifier[Tt_Eq] =( identifier[Ct_Eq] + identifier[Ct_Eq] . identifier[T] )/ identifier[pi_sym] [:, keyword[None] ] keyword[else] : identifier[ind0] = identifier[np] . identifier[where] ( identifier[pi_r] == literal[int] )[ literal[int] ] identifier[pi_r] [ identifier[ind0] ]= literal[int] identifier[Tt_Eq] = identifier[Ct_Eq] / identifier[pi_r] [:, keyword[None] ] identifier[lcc] = identifier[me] . identifier[largest_connected_set] ( identifier[Tt_Eq] ) identifier[Tt_Eq] = identifier[me] . identifier[largest_connected_submatrix] ( identifier[Tt_Eq] , identifier[lcc] = identifier[lcc] ) keyword[if] identifier[return_lcc] : keyword[return] identifier[Tt_Eq] , identifier[lcc] keyword[else] : keyword[return] identifier[Tt_Eq]
def equilibrium_transition_matrix(Xi, omega, sigma, reversible=True, return_lcc=True): """ Compute equilibrium transition matrix from OOM components: Parameters ---------- Xi : ndarray(M, N, M) matrix of set-observable operators omega: ndarray(M,) information state vector of OOM sigma : ndarray(M,) evaluator of OOM reversible : bool, optional, default=True symmetrize corrected count matrix in order to obtain a reversible transition matrix. return_lcc: bool, optional, default=True return indices of largest connected set. Returns ------- Tt_Eq : ndarray(N, N) equilibrium transition matrix lcc : ndarray(M,) the largest connected set of the transition matrix. """ import msmtools.estimation as me # Compute equilibrium transition matrix: Ct_Eq = np.einsum('j,jkl,lmn,n->km', omega, Xi, Xi, sigma) # Remove negative entries: Ct_Eq[Ct_Eq < 0.0] = 0.0 # Compute transition matrix after symmetrization: pi_r = np.sum(Ct_Eq, axis=1) if reversible: pi_c = np.sum(Ct_Eq, axis=0) pi_sym = pi_r + pi_c # Avoid zero row-sums. States with zero row-sums will be eliminated by active set update. ind0 = np.where(pi_sym == 0.0)[0] pi_sym[ind0] = 1.0 Tt_Eq = (Ct_Eq + Ct_Eq.T) / pi_sym[:, None] # depends on [control=['if'], data=[]] else: # Avoid zero row-sums. States with zero row-sums will be eliminated by active set update. ind0 = np.where(pi_r == 0.0)[0] pi_r[ind0] = 1.0 Tt_Eq = Ct_Eq / pi_r[:, None] # Perform active set update: lcc = me.largest_connected_set(Tt_Eq) Tt_Eq = me.largest_connected_submatrix(Tt_Eq, lcc=lcc) if return_lcc: return (Tt_Eq, lcc) # depends on [control=['if'], data=[]] else: return Tt_Eq
def shift_image(im, shift, borderValue=0): """shift the image Parameters ---------- im: 2d array The image shift: 2 numbers (y,x) the shift in y and x direction borderValue: number, default 0 The value for the pixels outside the border (default 0) Returns ------- im: 2d array The shifted image Notes ----- The output image has the same size as the input. Therefore the image will be cropped in the process. """ im = np.asarray(im, dtype=np.float32) rows, cols = im.shape M = np.asarray([[1, 0, shift[1]], [0, 1, shift[0]]], dtype=np.float32) return cv2.warpAffine(im, M, (cols, rows), borderMode=cv2.BORDER_CONSTANT, flags=cv2.INTER_CUBIC, borderValue=borderValue)
def function[shift_image, parameter[im, shift, borderValue]]: constant[shift the image Parameters ---------- im: 2d array The image shift: 2 numbers (y,x) the shift in y and x direction borderValue: number, default 0 The value for the pixels outside the border (default 0) Returns ------- im: 2d array The shifted image Notes ----- The output image has the same size as the input. Therefore the image will be cropped in the process. ] variable[im] assign[=] call[name[np].asarray, parameter[name[im]]] <ast.Tuple object at 0x7da18c4cefe0> assign[=] name[im].shape variable[M] assign[=] call[name[np].asarray, parameter[list[[<ast.List object at 0x7da18c4cd4e0>, <ast.List object at 0x7da18c4cd300>]]]] return[call[name[cv2].warpAffine, parameter[name[im], name[M], tuple[[<ast.Name object at 0x7da18c4cdde0>, <ast.Name object at 0x7da18c4cc9a0>]]]]]
keyword[def] identifier[shift_image] ( identifier[im] , identifier[shift] , identifier[borderValue] = literal[int] ): literal[string] identifier[im] = identifier[np] . identifier[asarray] ( identifier[im] , identifier[dtype] = identifier[np] . identifier[float32] ) identifier[rows] , identifier[cols] = identifier[im] . identifier[shape] identifier[M] = identifier[np] . identifier[asarray] ([[ literal[int] , literal[int] , identifier[shift] [ literal[int] ]],[ literal[int] , literal[int] , identifier[shift] [ literal[int] ]]], identifier[dtype] = identifier[np] . identifier[float32] ) keyword[return] identifier[cv2] . identifier[warpAffine] ( identifier[im] , identifier[M] ,( identifier[cols] , identifier[rows] ), identifier[borderMode] = identifier[cv2] . identifier[BORDER_CONSTANT] , identifier[flags] = identifier[cv2] . identifier[INTER_CUBIC] , identifier[borderValue] = identifier[borderValue] )
def shift_image(im, shift, borderValue=0): """shift the image Parameters ---------- im: 2d array The image shift: 2 numbers (y,x) the shift in y and x direction borderValue: number, default 0 The value for the pixels outside the border (default 0) Returns ------- im: 2d array The shifted image Notes ----- The output image has the same size as the input. Therefore the image will be cropped in the process. """ im = np.asarray(im, dtype=np.float32) (rows, cols) = im.shape M = np.asarray([[1, 0, shift[1]], [0, 1, shift[0]]], dtype=np.float32) return cv2.warpAffine(im, M, (cols, rows), borderMode=cv2.BORDER_CONSTANT, flags=cv2.INTER_CUBIC, borderValue=borderValue)
def customchain(**kwargsChain): """ This decorator allows you to access ``ctx.peerplays`` which is an instance of Peerplays. But in contrast to @chain, this is a decorator that expects parameters that are directed right to ``PeerPlays()``. ... code-block::python @main.command() @click.option("--worker", default=None) @click.pass_context @customchain(foo="bar") @unlock def list(ctx, worker): print(ctx.obj) """ def wrap(f): @click.pass_context @verbose def new_func(ctx, *args, **kwargs): newoptions = ctx.obj newoptions.update(kwargsChain) ctx.peerplays = PeerPlays(**newoptions) ctx.blockchain = ctx.peerplays set_shared_peerplays_instance(ctx.peerplays) return ctx.invoke(f, *args, **kwargs) return update_wrapper(new_func, f) return wrap
def function[customchain, parameter[]]: constant[ This decorator allows you to access ``ctx.peerplays`` which is an instance of Peerplays. But in contrast to @chain, this is a decorator that expects parameters that are directed right to ``PeerPlays()``. ... code-block::python @main.command() @click.option("--worker", default=None) @click.pass_context @customchain(foo="bar") @unlock def list(ctx, worker): print(ctx.obj) ] def function[wrap, parameter[f]]: def function[new_func, parameter[ctx]]: variable[newoptions] assign[=] name[ctx].obj call[name[newoptions].update, parameter[name[kwargsChain]]] name[ctx].peerplays assign[=] call[name[PeerPlays], parameter[]] name[ctx].blockchain assign[=] name[ctx].peerplays call[name[set_shared_peerplays_instance], parameter[name[ctx].peerplays]] return[call[name[ctx].invoke, parameter[name[f], <ast.Starred object at 0x7da1b103b9a0>]]] return[call[name[update_wrapper], parameter[name[new_func], name[f]]]] return[name[wrap]]
keyword[def] identifier[customchain] (** identifier[kwargsChain] ): literal[string] keyword[def] identifier[wrap] ( identifier[f] ): @ identifier[click] . identifier[pass_context] @ identifier[verbose] keyword[def] identifier[new_func] ( identifier[ctx] ,* identifier[args] ,** identifier[kwargs] ): identifier[newoptions] = identifier[ctx] . identifier[obj] identifier[newoptions] . identifier[update] ( identifier[kwargsChain] ) identifier[ctx] . identifier[peerplays] = identifier[PeerPlays] (** identifier[newoptions] ) identifier[ctx] . identifier[blockchain] = identifier[ctx] . identifier[peerplays] identifier[set_shared_peerplays_instance] ( identifier[ctx] . identifier[peerplays] ) keyword[return] identifier[ctx] . identifier[invoke] ( identifier[f] ,* identifier[args] ,** identifier[kwargs] ) keyword[return] identifier[update_wrapper] ( identifier[new_func] , identifier[f] ) keyword[return] identifier[wrap]
def customchain(**kwargsChain): """ This decorator allows you to access ``ctx.peerplays`` which is an instance of Peerplays. But in contrast to @chain, this is a decorator that expects parameters that are directed right to ``PeerPlays()``. ... code-block::python @main.command() @click.option("--worker", default=None) @click.pass_context @customchain(foo="bar") @unlock def list(ctx, worker): print(ctx.obj) """ def wrap(f): @click.pass_context @verbose def new_func(ctx, *args, **kwargs): newoptions = ctx.obj newoptions.update(kwargsChain) ctx.peerplays = PeerPlays(**newoptions) ctx.blockchain = ctx.peerplays set_shared_peerplays_instance(ctx.peerplays) return ctx.invoke(f, *args, **kwargs) return update_wrapper(new_func, f) return wrap
def writeCleanup(self, varBind, **context): """Finalize Managed Object Instance modification. Implements the successful third step of the multi-step workflow of the SNMP SET command processing (:RFC:`1905#section-4.2.5`). The goal of the third (successful) phase is to seal the new state of the requested Managed Object Instance. Once the system transition into the *cleanup* state, no roll back to the previous Managed Object Instance state is possible. The role of this object in the MIB tree is non-terminal. It does not access the actual Managed Object Instance, but just traverses one level down the MIB tree and hands off the query to the underlying objects. Parameters ---------- varBind: :py:class:`~pysnmp.smi.rfc1902.ObjectType` object representing new Managed Object Instance value to set Other Parameters ---------------- \*\*context: Query parameters: * `cbFun` (callable) - user-supplied callable that is invoked to pass the new value of the Managed Object Instance or an error. Notes ----- The callback functions (e.g. `cbFun`) have the same signature as this method where `varBind` contains the new Managed Object Instance value. In case of an error, the `error` key in the `context` dict will contain an exception object. """ name, val = varBind (debug.logger & debug.FLAG_INS and debug.logger('%s: writeCleanup(%s, %r)' % (self, name, val))) cbFun = context['cbFun'] self.branchVersionId += 1 instances = context['instances'].setdefault(self.name, {self.ST_CREATE: {}, self.ST_DESTROY: {}}) idx = context['idx'] if idx in instances[self.ST_CREATE]: self.createCleanup(varBind, **context) return if idx in instances[self.ST_DESTROY]: self.destroyCleanup(varBind, **context) return try: node = self.getBranch(name, **context) except (error.NoSuchInstanceError, error.NoSuchObjectError) as exc: cbFun(varBind, **dict(context, error=exc)) else: node.writeCleanup(varBind, **context)
def function[writeCleanup, parameter[self, varBind]]: constant[Finalize Managed Object Instance modification. Implements the successful third step of the multi-step workflow of the SNMP SET command processing (:RFC:`1905#section-4.2.5`). The goal of the third (successful) phase is to seal the new state of the requested Managed Object Instance. Once the system transition into the *cleanup* state, no roll back to the previous Managed Object Instance state is possible. The role of this object in the MIB tree is non-terminal. It does not access the actual Managed Object Instance, but just traverses one level down the MIB tree and hands off the query to the underlying objects. Parameters ---------- varBind: :py:class:`~pysnmp.smi.rfc1902.ObjectType` object representing new Managed Object Instance value to set Other Parameters ---------------- \*\*context: Query parameters: * `cbFun` (callable) - user-supplied callable that is invoked to pass the new value of the Managed Object Instance or an error. Notes ----- The callback functions (e.g. `cbFun`) have the same signature as this method where `varBind` contains the new Managed Object Instance value. In case of an error, the `error` key in the `context` dict will contain an exception object. ] <ast.Tuple object at 0x7da1b155d3f0> assign[=] name[varBind] <ast.BoolOp object at 0x7da1b16beb90> variable[cbFun] assign[=] call[name[context]][constant[cbFun]] <ast.AugAssign object at 0x7da1b16bdc90> variable[instances] assign[=] call[call[name[context]][constant[instances]].setdefault, parameter[name[self].name, dictionary[[<ast.Attribute object at 0x7da1b16bdc30>, <ast.Attribute object at 0x7da1b16bf2e0>], [<ast.Dict object at 0x7da1b16bdc60>, <ast.Dict object at 0x7da1b16bccd0>]]]] variable[idx] assign[=] call[name[context]][constant[idx]] if compare[name[idx] in call[name[instances]][name[self].ST_CREATE]] begin[:] call[name[self].createCleanup, parameter[name[varBind]]] return[None] if compare[name[idx] in call[name[instances]][name[self].ST_DESTROY]] begin[:] call[name[self].destroyCleanup, parameter[name[varBind]]] return[None] <ast.Try object at 0x7da1b16bcd90>
keyword[def] identifier[writeCleanup] ( identifier[self] , identifier[varBind] ,** identifier[context] ): literal[string] identifier[name] , identifier[val] = identifier[varBind] ( identifier[debug] . identifier[logger] & identifier[debug] . identifier[FLAG_INS] keyword[and] identifier[debug] . identifier[logger] ( literal[string] %( identifier[self] , identifier[name] , identifier[val] ))) identifier[cbFun] = identifier[context] [ literal[string] ] identifier[self] . identifier[branchVersionId] += literal[int] identifier[instances] = identifier[context] [ literal[string] ]. identifier[setdefault] ( identifier[self] . identifier[name] ,{ identifier[self] . identifier[ST_CREATE] :{}, identifier[self] . identifier[ST_DESTROY] :{}}) identifier[idx] = identifier[context] [ literal[string] ] keyword[if] identifier[idx] keyword[in] identifier[instances] [ identifier[self] . identifier[ST_CREATE] ]: identifier[self] . identifier[createCleanup] ( identifier[varBind] ,** identifier[context] ) keyword[return] keyword[if] identifier[idx] keyword[in] identifier[instances] [ identifier[self] . identifier[ST_DESTROY] ]: identifier[self] . identifier[destroyCleanup] ( identifier[varBind] ,** identifier[context] ) keyword[return] keyword[try] : identifier[node] = identifier[self] . identifier[getBranch] ( identifier[name] ,** identifier[context] ) keyword[except] ( identifier[error] . identifier[NoSuchInstanceError] , identifier[error] . identifier[NoSuchObjectError] ) keyword[as] identifier[exc] : identifier[cbFun] ( identifier[varBind] ,** identifier[dict] ( identifier[context] , identifier[error] = identifier[exc] )) keyword[else] : identifier[node] . identifier[writeCleanup] ( identifier[varBind] ,** identifier[context] )
def writeCleanup(self, varBind, **context): """Finalize Managed Object Instance modification. Implements the successful third step of the multi-step workflow of the SNMP SET command processing (:RFC:`1905#section-4.2.5`). The goal of the third (successful) phase is to seal the new state of the requested Managed Object Instance. Once the system transition into the *cleanup* state, no roll back to the previous Managed Object Instance state is possible. The role of this object in the MIB tree is non-terminal. It does not access the actual Managed Object Instance, but just traverses one level down the MIB tree and hands off the query to the underlying objects. Parameters ---------- varBind: :py:class:`~pysnmp.smi.rfc1902.ObjectType` object representing new Managed Object Instance value to set Other Parameters ---------------- \\*\\*context: Query parameters: * `cbFun` (callable) - user-supplied callable that is invoked to pass the new value of the Managed Object Instance or an error. Notes ----- The callback functions (e.g. `cbFun`) have the same signature as this method where `varBind` contains the new Managed Object Instance value. In case of an error, the `error` key in the `context` dict will contain an exception object. """ (name, val) = varBind debug.logger & debug.FLAG_INS and debug.logger('%s: writeCleanup(%s, %r)' % (self, name, val)) cbFun = context['cbFun'] self.branchVersionId += 1 instances = context['instances'].setdefault(self.name, {self.ST_CREATE: {}, self.ST_DESTROY: {}}) idx = context['idx'] if idx in instances[self.ST_CREATE]: self.createCleanup(varBind, **context) return # depends on [control=['if'], data=[]] if idx in instances[self.ST_DESTROY]: self.destroyCleanup(varBind, **context) return # depends on [control=['if'], data=[]] try: node = self.getBranch(name, **context) # depends on [control=['try'], data=[]] except (error.NoSuchInstanceError, error.NoSuchObjectError) as exc: cbFun(varBind, **dict(context, error=exc)) # depends on [control=['except'], data=['exc']] else: node.writeCleanup(varBind, **context)
def _dumps(self, obj): """ If :prop:serialized is True, @obj will be serialized using :prop:serializer """ if not self.serialized: return obj return self.serializer.dumps(obj)
def function[_dumps, parameter[self, obj]]: constant[ If :prop:serialized is True, @obj will be serialized using :prop:serializer ] if <ast.UnaryOp object at 0x7da1b28fae00> begin[:] return[name[obj]] return[call[name[self].serializer.dumps, parameter[name[obj]]]]
keyword[def] identifier[_dumps] ( identifier[self] , identifier[obj] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[serialized] : keyword[return] identifier[obj] keyword[return] identifier[self] . identifier[serializer] . identifier[dumps] ( identifier[obj] )
def _dumps(self, obj): """ If :prop:serialized is True, @obj will be serialized using :prop:serializer """ if not self.serialized: return obj # depends on [control=['if'], data=[]] return self.serializer.dumps(obj)
def get_repository_ids_by_asset(self, asset_id): """Gets the list of ``Repository`` ``Ids`` mapped to an ``Asset``. arg: asset_id (osid.id.Id): ``Id`` of an ``Asset`` return: (osid.id.IdList) - list of repository ``Ids`` raise: NotFound - ``asset_id`` is not found raise: NullArgument - ``asset_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceBinSession.get_bin_ids_by_resource mgr = self._get_provider_manager('REPOSITORY', local=True) lookup_session = mgr.get_asset_lookup_session(proxy=self._proxy) lookup_session.use_federated_repository_view() asset = lookup_session.get_asset(asset_id) id_list = [] for idstr in asset._my_map['assignedRepositoryIds']: id_list.append(Id(idstr)) return IdList(id_list)
def function[get_repository_ids_by_asset, parameter[self, asset_id]]: constant[Gets the list of ``Repository`` ``Ids`` mapped to an ``Asset``. arg: asset_id (osid.id.Id): ``Id`` of an ``Asset`` return: (osid.id.IdList) - list of repository ``Ids`` raise: NotFound - ``asset_id`` is not found raise: NullArgument - ``asset_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* ] variable[mgr] assign[=] call[name[self]._get_provider_manager, parameter[constant[REPOSITORY]]] variable[lookup_session] assign[=] call[name[mgr].get_asset_lookup_session, parameter[]] call[name[lookup_session].use_federated_repository_view, parameter[]] variable[asset] assign[=] call[name[lookup_session].get_asset, parameter[name[asset_id]]] variable[id_list] assign[=] list[[]] for taget[name[idstr]] in starred[call[name[asset]._my_map][constant[assignedRepositoryIds]]] begin[:] call[name[id_list].append, parameter[call[name[Id], parameter[name[idstr]]]]] return[call[name[IdList], parameter[name[id_list]]]]
keyword[def] identifier[get_repository_ids_by_asset] ( identifier[self] , identifier[asset_id] ): literal[string] identifier[mgr] = identifier[self] . identifier[_get_provider_manager] ( literal[string] , identifier[local] = keyword[True] ) identifier[lookup_session] = identifier[mgr] . identifier[get_asset_lookup_session] ( identifier[proxy] = identifier[self] . identifier[_proxy] ) identifier[lookup_session] . identifier[use_federated_repository_view] () identifier[asset] = identifier[lookup_session] . identifier[get_asset] ( identifier[asset_id] ) identifier[id_list] =[] keyword[for] identifier[idstr] keyword[in] identifier[asset] . identifier[_my_map] [ literal[string] ]: identifier[id_list] . identifier[append] ( identifier[Id] ( identifier[idstr] )) keyword[return] identifier[IdList] ( identifier[id_list] )
def get_repository_ids_by_asset(self, asset_id): """Gets the list of ``Repository`` ``Ids`` mapped to an ``Asset``. arg: asset_id (osid.id.Id): ``Id`` of an ``Asset`` return: (osid.id.IdList) - list of repository ``Ids`` raise: NotFound - ``asset_id`` is not found raise: NullArgument - ``asset_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceBinSession.get_bin_ids_by_resource mgr = self._get_provider_manager('REPOSITORY', local=True) lookup_session = mgr.get_asset_lookup_session(proxy=self._proxy) lookup_session.use_federated_repository_view() asset = lookup_session.get_asset(asset_id) id_list = [] for idstr in asset._my_map['assignedRepositoryIds']: id_list.append(Id(idstr)) # depends on [control=['for'], data=['idstr']] return IdList(id_list)
async def create_app_collections(db): ''' load all models in app and create collections in db with specified indices''' futures = [] for model_class in MongoCollectionMixin.__subclasses__(): if model_class._meta.concrete is True: futures.append(create_collection(db, model_class)) await asyncio.gather(*futures)
<ast.AsyncFunctionDef object at 0x7da18eb57700>
keyword[async] keyword[def] identifier[create_app_collections] ( identifier[db] ): literal[string] identifier[futures] =[] keyword[for] identifier[model_class] keyword[in] identifier[MongoCollectionMixin] . identifier[__subclasses__] (): keyword[if] identifier[model_class] . identifier[_meta] . identifier[concrete] keyword[is] keyword[True] : identifier[futures] . identifier[append] ( identifier[create_collection] ( identifier[db] , identifier[model_class] )) keyword[await] identifier[asyncio] . identifier[gather] (* identifier[futures] )
async def create_app_collections(db): """ load all models in app and create collections in db with specified indices""" futures = [] for model_class in MongoCollectionMixin.__subclasses__(): if model_class._meta.concrete is True: futures.append(create_collection(db, model_class)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['model_class']] await asyncio.gather(*futures)
def listen_ttf(self, target, timeout): """Listen as Type F Target is supported for either 212 or 424 kbps.""" if target.brty not in ('212F', '424F'): info = "unsupported target bitrate: %r" % target.brty raise nfc.clf.UnsupportedTargetError(info) if target.sensf_res is None: raise ValueError("sensf_res is required") if len(target.sensf_res) != 19: raise ValueError("sensf_res must be 19 byte") self.chipset.tg_set_rf(target.brty) self.chipset.tg_set_protocol(self.chipset.tg_set_protocol_defaults) self.chipset.tg_set_protocol(rf_off_error=False) recv_timeout = min(int(1000 * timeout), 0xFFFF) time_to_return = time.time() + timeout transmit_data = sensf_req = sensf_res = None while recv_timeout > 0: if transmit_data: log.debug("%s send %s", target.brty, hexlify(transmit_data)) log.debug("%s wait recv %d ms", target.brty, recv_timeout) try: data = self.chipset.tg_comm_rf(recv_timeout=recv_timeout, transmit_data=transmit_data) except CommunicationError as error: log.debug(error) continue finally: recv_timeout = int((time_to_return - time.time()) * 1E3) transmit_data = None assert target.brty == ('106A', '212F', '424F')[data[0]-11] log.debug("%s rcvd %s", target.brty, hexlify(buffer(data, 7))) if len(data) > 7 and len(data)-7 == data[7]: if sensf_req and data[9:17] == target.sensf_res[1:9]: self.chipset.tg_set_protocol(rf_off_error=True) target = nfc.clf.LocalTarget(target.brty) target.sensf_req = sensf_req target.sensf_res = sensf_res target.tt3_cmd = data[8:] return target if len(data) == 13 and data[7] == 6 and data[8] == 0: (sensf_req, sensf_res) = (data[8:], target.sensf_res[:]) if (((sensf_req[1] == 255 or sensf_req[1] == sensf_res[17]) and (sensf_req[2] == 255 or sensf_req[2] == sensf_res[18]))): transmit_data = sensf_res[0:17] if sensf_req[3] == 1: transmit_data += sensf_res[17:19] if sensf_req[3] == 2: transmit_data += b"\x00" transmit_data += chr(1 << (target.brty == "424F")) transmit_data = chr(len(transmit_data)+1) + transmit_data
def function[listen_ttf, parameter[self, target, timeout]]: constant[Listen as Type F Target is supported for either 212 or 424 kbps.] if compare[name[target].brty <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da20c7968c0>, <ast.Constant object at 0x7da20c794dc0>]]] begin[:] variable[info] assign[=] binary_operation[constant[unsupported target bitrate: %r] <ast.Mod object at 0x7da2590d6920> name[target].brty] <ast.Raise object at 0x7da20c7944c0> if compare[name[target].sensf_res is constant[None]] begin[:] <ast.Raise object at 0x7da20c7955a0> if compare[call[name[len], parameter[name[target].sensf_res]] not_equal[!=] constant[19]] begin[:] <ast.Raise object at 0x7da20c7969b0> call[name[self].chipset.tg_set_rf, parameter[name[target].brty]] call[name[self].chipset.tg_set_protocol, parameter[name[self].chipset.tg_set_protocol_defaults]] call[name[self].chipset.tg_set_protocol, parameter[]] variable[recv_timeout] assign[=] call[name[min], parameter[call[name[int], parameter[binary_operation[constant[1000] * name[timeout]]]], constant[65535]]] variable[time_to_return] assign[=] binary_operation[call[name[time].time, parameter[]] + name[timeout]] variable[transmit_data] assign[=] constant[None] while compare[name[recv_timeout] greater[>] constant[0]] begin[:] if name[transmit_data] begin[:] call[name[log].debug, parameter[constant[%s send %s], name[target].brty, call[name[hexlify], parameter[name[transmit_data]]]]] call[name[log].debug, parameter[constant[%s wait recv %d ms], name[target].brty, name[recv_timeout]]] <ast.Try object at 0x7da20c6c5930> assert[compare[name[target].brty equal[==] call[tuple[[<ast.Constant object at 0x7da20c6c7c40>, <ast.Constant object at 0x7da20c6c7d60>, <ast.Constant object at 0x7da20c6c6140>]]][binary_operation[call[name[data]][constant[0]] - constant[11]]]]] call[name[log].debug, parameter[constant[%s rcvd %s], name[target].brty, call[name[hexlify], parameter[call[name[buffer], parameter[name[data], constant[7]]]]]]] if <ast.BoolOp object at 0x7da20c6c5090> begin[:] if <ast.BoolOp object at 0x7da20c6c60b0> begin[:] call[name[self].chipset.tg_set_protocol, parameter[]] variable[target] assign[=] call[name[nfc].clf.LocalTarget, parameter[name[target].brty]] name[target].sensf_req assign[=] name[sensf_req] name[target].sensf_res assign[=] name[sensf_res] name[target].tt3_cmd assign[=] call[name[data]][<ast.Slice object at 0x7da20c6c67d0>] return[name[target]] if <ast.BoolOp object at 0x7da20c6c4a30> begin[:] <ast.Tuple object at 0x7da20c6c7bb0> assign[=] tuple[[<ast.Subscript object at 0x7da20c6c6a40>, <ast.Subscript object at 0x7da20c6c6740>]] if <ast.BoolOp object at 0x7da20c6c5900> begin[:] variable[transmit_data] assign[=] call[name[sensf_res]][<ast.Slice object at 0x7da20c6c6ec0>] if compare[call[name[sensf_req]][constant[3]] equal[==] constant[1]] begin[:] <ast.AugAssign object at 0x7da20c6c5ea0> if compare[call[name[sensf_req]][constant[3]] equal[==] constant[2]] begin[:] <ast.AugAssign object at 0x7da20c6c5840> <ast.AugAssign object at 0x7da20c6c7cd0> variable[transmit_data] assign[=] binary_operation[call[name[chr], parameter[binary_operation[call[name[len], parameter[name[transmit_data]]] + constant[1]]]] + name[transmit_data]]
keyword[def] identifier[listen_ttf] ( identifier[self] , identifier[target] , identifier[timeout] ): literal[string] keyword[if] identifier[target] . identifier[brty] keyword[not] keyword[in] ( literal[string] , literal[string] ): identifier[info] = literal[string] % identifier[target] . identifier[brty] keyword[raise] identifier[nfc] . identifier[clf] . identifier[UnsupportedTargetError] ( identifier[info] ) keyword[if] identifier[target] . identifier[sensf_res] keyword[is] keyword[None] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] identifier[len] ( identifier[target] . identifier[sensf_res] )!= literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[self] . identifier[chipset] . identifier[tg_set_rf] ( identifier[target] . identifier[brty] ) identifier[self] . identifier[chipset] . identifier[tg_set_protocol] ( identifier[self] . identifier[chipset] . identifier[tg_set_protocol_defaults] ) identifier[self] . identifier[chipset] . identifier[tg_set_protocol] ( identifier[rf_off_error] = keyword[False] ) identifier[recv_timeout] = identifier[min] ( identifier[int] ( literal[int] * identifier[timeout] ), literal[int] ) identifier[time_to_return] = identifier[time] . identifier[time] ()+ identifier[timeout] identifier[transmit_data] = identifier[sensf_req] = identifier[sensf_res] = keyword[None] keyword[while] identifier[recv_timeout] > literal[int] : keyword[if] identifier[transmit_data] : identifier[log] . identifier[debug] ( literal[string] , identifier[target] . identifier[brty] , identifier[hexlify] ( identifier[transmit_data] )) identifier[log] . identifier[debug] ( literal[string] , identifier[target] . identifier[brty] , identifier[recv_timeout] ) keyword[try] : identifier[data] = identifier[self] . identifier[chipset] . identifier[tg_comm_rf] ( identifier[recv_timeout] = identifier[recv_timeout] , identifier[transmit_data] = identifier[transmit_data] ) keyword[except] identifier[CommunicationError] keyword[as] identifier[error] : identifier[log] . identifier[debug] ( identifier[error] ) keyword[continue] keyword[finally] : identifier[recv_timeout] = identifier[int] (( identifier[time_to_return] - identifier[time] . identifier[time] ())* literal[int] ) identifier[transmit_data] = keyword[None] keyword[assert] identifier[target] . identifier[brty] ==( literal[string] , literal[string] , literal[string] )[ identifier[data] [ literal[int] ]- literal[int] ] identifier[log] . identifier[debug] ( literal[string] , identifier[target] . identifier[brty] , identifier[hexlify] ( identifier[buffer] ( identifier[data] , literal[int] ))) keyword[if] identifier[len] ( identifier[data] )> literal[int] keyword[and] identifier[len] ( identifier[data] )- literal[int] == identifier[data] [ literal[int] ]: keyword[if] identifier[sensf_req] keyword[and] identifier[data] [ literal[int] : literal[int] ]== identifier[target] . identifier[sensf_res] [ literal[int] : literal[int] ]: identifier[self] . identifier[chipset] . identifier[tg_set_protocol] ( identifier[rf_off_error] = keyword[True] ) identifier[target] = identifier[nfc] . identifier[clf] . identifier[LocalTarget] ( identifier[target] . identifier[brty] ) identifier[target] . identifier[sensf_req] = identifier[sensf_req] identifier[target] . identifier[sensf_res] = identifier[sensf_res] identifier[target] . identifier[tt3_cmd] = identifier[data] [ literal[int] :] keyword[return] identifier[target] keyword[if] identifier[len] ( identifier[data] )== literal[int] keyword[and] identifier[data] [ literal[int] ]== literal[int] keyword[and] identifier[data] [ literal[int] ]== literal[int] : ( identifier[sensf_req] , identifier[sensf_res] )=( identifier[data] [ literal[int] :], identifier[target] . identifier[sensf_res] [:]) keyword[if] ((( identifier[sensf_req] [ literal[int] ]== literal[int] keyword[or] identifier[sensf_req] [ literal[int] ]== identifier[sensf_res] [ literal[int] ]) keyword[and] ( identifier[sensf_req] [ literal[int] ]== literal[int] keyword[or] identifier[sensf_req] [ literal[int] ]== identifier[sensf_res] [ literal[int] ]))): identifier[transmit_data] = identifier[sensf_res] [ literal[int] : literal[int] ] keyword[if] identifier[sensf_req] [ literal[int] ]== literal[int] : identifier[transmit_data] += identifier[sensf_res] [ literal[int] : literal[int] ] keyword[if] identifier[sensf_req] [ literal[int] ]== literal[int] : identifier[transmit_data] += literal[string] identifier[transmit_data] += identifier[chr] ( literal[int] <<( identifier[target] . identifier[brty] == literal[string] )) identifier[transmit_data] = identifier[chr] ( identifier[len] ( identifier[transmit_data] )+ literal[int] )+ identifier[transmit_data]
def listen_ttf(self, target, timeout): """Listen as Type F Target is supported for either 212 or 424 kbps.""" if target.brty not in ('212F', '424F'): info = 'unsupported target bitrate: %r' % target.brty raise nfc.clf.UnsupportedTargetError(info) # depends on [control=['if'], data=[]] if target.sensf_res is None: raise ValueError('sensf_res is required') # depends on [control=['if'], data=[]] if len(target.sensf_res) != 19: raise ValueError('sensf_res must be 19 byte') # depends on [control=['if'], data=[]] self.chipset.tg_set_rf(target.brty) self.chipset.tg_set_protocol(self.chipset.tg_set_protocol_defaults) self.chipset.tg_set_protocol(rf_off_error=False) recv_timeout = min(int(1000 * timeout), 65535) time_to_return = time.time() + timeout transmit_data = sensf_req = sensf_res = None while recv_timeout > 0: if transmit_data: log.debug('%s send %s', target.brty, hexlify(transmit_data)) # depends on [control=['if'], data=[]] log.debug('%s wait recv %d ms', target.brty, recv_timeout) try: data = self.chipset.tg_comm_rf(recv_timeout=recv_timeout, transmit_data=transmit_data) # depends on [control=['try'], data=[]] except CommunicationError as error: log.debug(error) continue # depends on [control=['except'], data=['error']] finally: recv_timeout = int((time_to_return - time.time()) * 1000.0) transmit_data = None assert target.brty == ('106A', '212F', '424F')[data[0] - 11] log.debug('%s rcvd %s', target.brty, hexlify(buffer(data, 7))) if len(data) > 7 and len(data) - 7 == data[7]: if sensf_req and data[9:17] == target.sensf_res[1:9]: self.chipset.tg_set_protocol(rf_off_error=True) target = nfc.clf.LocalTarget(target.brty) target.sensf_req = sensf_req target.sensf_res = sensf_res target.tt3_cmd = data[8:] return target # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if len(data) == 13 and data[7] == 6 and (data[8] == 0): (sensf_req, sensf_res) = (data[8:], target.sensf_res[:]) if (sensf_req[1] == 255 or sensf_req[1] == sensf_res[17]) and (sensf_req[2] == 255 or sensf_req[2] == sensf_res[18]): transmit_data = sensf_res[0:17] if sensf_req[3] == 1: transmit_data += sensf_res[17:19] # depends on [control=['if'], data=[]] if sensf_req[3] == 2: transmit_data += b'\x00' transmit_data += chr(1 << (target.brty == '424F')) # depends on [control=['if'], data=[]] transmit_data = chr(len(transmit_data) + 1) + transmit_data # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['while'], data=['recv_timeout']]
def reserve(self, capacity): """ Set current capacity of the underlying array""" if capacity >= self._data.size: capacity = int(2 ** np.ceil(np.log2(capacity))) self._data = np.resize(self._data, capacity)
def function[reserve, parameter[self, capacity]]: constant[ Set current capacity of the underlying array] if compare[name[capacity] greater_or_equal[>=] name[self]._data.size] begin[:] variable[capacity] assign[=] call[name[int], parameter[binary_operation[constant[2] ** call[name[np].ceil, parameter[call[name[np].log2, parameter[name[capacity]]]]]]]] name[self]._data assign[=] call[name[np].resize, parameter[name[self]._data, name[capacity]]]
keyword[def] identifier[reserve] ( identifier[self] , identifier[capacity] ): literal[string] keyword[if] identifier[capacity] >= identifier[self] . identifier[_data] . identifier[size] : identifier[capacity] = identifier[int] ( literal[int] ** identifier[np] . identifier[ceil] ( identifier[np] . identifier[log2] ( identifier[capacity] ))) identifier[self] . identifier[_data] = identifier[np] . identifier[resize] ( identifier[self] . identifier[_data] , identifier[capacity] )
def reserve(self, capacity): """ Set current capacity of the underlying array""" if capacity >= self._data.size: capacity = int(2 ** np.ceil(np.log2(capacity))) self._data = np.resize(self._data, capacity) # depends on [control=['if'], data=['capacity']]
def get_unauthorized(self, msg, signature, timestamp, nonce): """ 处理取消授权通知 :params msg: 加密内容 :params signature: 消息签名 :params timestamp: 时间戳 :params nonce: 随机数 """ warnings.warn('`get_unauthorized` method of `WeChatComponent` is deprecated,' 'Use `parse_message` instead', DeprecationWarning, stacklevel=2) content = self.crypto.decrypt_message(msg, signature, timestamp, nonce) message = xmltodict.parse(to_text(content))['xml'] return ComponentUnauthorizedMessage(message)
def function[get_unauthorized, parameter[self, msg, signature, timestamp, nonce]]: constant[ 处理取消授权通知 :params msg: 加密内容 :params signature: 消息签名 :params timestamp: 时间戳 :params nonce: 随机数 ] call[name[warnings].warn, parameter[constant[`get_unauthorized` method of `WeChatComponent` is deprecated,Use `parse_message` instead], name[DeprecationWarning]]] variable[content] assign[=] call[name[self].crypto.decrypt_message, parameter[name[msg], name[signature], name[timestamp], name[nonce]]] variable[message] assign[=] call[call[name[xmltodict].parse, parameter[call[name[to_text], parameter[name[content]]]]]][constant[xml]] return[call[name[ComponentUnauthorizedMessage], parameter[name[message]]]]
keyword[def] identifier[get_unauthorized] ( identifier[self] , identifier[msg] , identifier[signature] , identifier[timestamp] , identifier[nonce] ): literal[string] identifier[warnings] . identifier[warn] ( literal[string] literal[string] , identifier[DeprecationWarning] , identifier[stacklevel] = literal[int] ) identifier[content] = identifier[self] . identifier[crypto] . identifier[decrypt_message] ( identifier[msg] , identifier[signature] , identifier[timestamp] , identifier[nonce] ) identifier[message] = identifier[xmltodict] . identifier[parse] ( identifier[to_text] ( identifier[content] ))[ literal[string] ] keyword[return] identifier[ComponentUnauthorizedMessage] ( identifier[message] )
def get_unauthorized(self, msg, signature, timestamp, nonce): """ 处理取消授权通知 :params msg: 加密内容 :params signature: 消息签名 :params timestamp: 时间戳 :params nonce: 随机数 """ warnings.warn('`get_unauthorized` method of `WeChatComponent` is deprecated,Use `parse_message` instead', DeprecationWarning, stacklevel=2) content = self.crypto.decrypt_message(msg, signature, timestamp, nonce) message = xmltodict.parse(to_text(content))['xml'] return ComponentUnauthorizedMessage(message)
def populateFromRow(self, referenceRecord): """ Populates this reference from the values in the specified DB row. """ self._length = referenceRecord.length self._isDerived = bool(referenceRecord.isderived) self._md5checksum = referenceRecord.md5checksum species = referenceRecord.species if species is not None and species != 'null': self.setSpeciesFromJson(species) self._sourceAccessions = json.loads(referenceRecord.sourceaccessions) self._sourceDivergence = referenceRecord.sourcedivergence self._sourceUri = referenceRecord.sourceuri
def function[populateFromRow, parameter[self, referenceRecord]]: constant[ Populates this reference from the values in the specified DB row. ] name[self]._length assign[=] name[referenceRecord].length name[self]._isDerived assign[=] call[name[bool], parameter[name[referenceRecord].isderived]] name[self]._md5checksum assign[=] name[referenceRecord].md5checksum variable[species] assign[=] name[referenceRecord].species if <ast.BoolOp object at 0x7da204565030> begin[:] call[name[self].setSpeciesFromJson, parameter[name[species]]] name[self]._sourceAccessions assign[=] call[name[json].loads, parameter[name[referenceRecord].sourceaccessions]] name[self]._sourceDivergence assign[=] name[referenceRecord].sourcedivergence name[self]._sourceUri assign[=] name[referenceRecord].sourceuri
keyword[def] identifier[populateFromRow] ( identifier[self] , identifier[referenceRecord] ): literal[string] identifier[self] . identifier[_length] = identifier[referenceRecord] . identifier[length] identifier[self] . identifier[_isDerived] = identifier[bool] ( identifier[referenceRecord] . identifier[isderived] ) identifier[self] . identifier[_md5checksum] = identifier[referenceRecord] . identifier[md5checksum] identifier[species] = identifier[referenceRecord] . identifier[species] keyword[if] identifier[species] keyword[is] keyword[not] keyword[None] keyword[and] identifier[species] != literal[string] : identifier[self] . identifier[setSpeciesFromJson] ( identifier[species] ) identifier[self] . identifier[_sourceAccessions] = identifier[json] . identifier[loads] ( identifier[referenceRecord] . identifier[sourceaccessions] ) identifier[self] . identifier[_sourceDivergence] = identifier[referenceRecord] . identifier[sourcedivergence] identifier[self] . identifier[_sourceUri] = identifier[referenceRecord] . identifier[sourceuri]
def populateFromRow(self, referenceRecord): """ Populates this reference from the values in the specified DB row. """ self._length = referenceRecord.length self._isDerived = bool(referenceRecord.isderived) self._md5checksum = referenceRecord.md5checksum species = referenceRecord.species if species is not None and species != 'null': self.setSpeciesFromJson(species) # depends on [control=['if'], data=[]] self._sourceAccessions = json.loads(referenceRecord.sourceaccessions) self._sourceDivergence = referenceRecord.sourcedivergence self._sourceUri = referenceRecord.sourceuri
def _extract_game_info(self, games): """ Parse game information from all boxscores. Find the major game information for all boxscores listed on a particular boxscores webpage and return the results in a list. Parameters ---------- games : generator A generator where each element points to a boxscore on the parsed boxscores webpage. Returns ------- list Returns a ``list`` of dictionaries where each dictionary contains the name and abbreviations for both the home and away teams, and a link to the game's boxscore. """ all_boxscores = [] for game in games: details = self._get_team_details(game) away_name, away_abbr, away_score, home_name, home_abbr, \ home_score = details boxscore_url = game('td[class="right gamelink"] a') boxscore_uri = self._get_boxscore_uri(boxscore_url) losers = [l for l in game('tr[class="loser"]').items()] winner = self._get_team_results(game('tr[class="winner"]')) loser = self._get_team_results(game('tr[class="loser"]')) # Occurs when the boxscore format is invalid and the game should be # skipped to avoid conflicts populating the game information. if (len(losers) != 2 and loser and not winner) or \ (len(losers) != 2 and winner and not loser): continue # Occurs when information couldn't be parsed from the boxscore or # the game hasn't occurred yet. In this case, the winner should be # None to avoid conflicts. if not winner or len(losers) == 2: winning_name = None winning_abbreviation = None else: winning_name, winning_abbreviation = winner # Occurs when information couldn't be parsed from the boxscore or # the game hasn't occurred yet. In this case, the winner should be # None to avoid conflicts. if not loser or len(losers) == 2: losing_name = None losing_abbreviation = None else: losing_name, losing_abbreviation = loser game_info = { 'boxscore': boxscore_uri, 'away_name': away_name, 'away_abbr': away_abbr, 'away_score': away_score, 'home_name': home_name, 'home_abbr': home_abbr, 'home_score': home_score, 'winning_name': winning_name, 'winning_abbr': winning_abbreviation, 'losing_name': losing_name, 'losing_abbr': losing_abbreviation } all_boxscores.append(game_info) return all_boxscores
def function[_extract_game_info, parameter[self, games]]: constant[ Parse game information from all boxscores. Find the major game information for all boxscores listed on a particular boxscores webpage and return the results in a list. Parameters ---------- games : generator A generator where each element points to a boxscore on the parsed boxscores webpage. Returns ------- list Returns a ``list`` of dictionaries where each dictionary contains the name and abbreviations for both the home and away teams, and a link to the game's boxscore. ] variable[all_boxscores] assign[=] list[[]] for taget[name[game]] in starred[name[games]] begin[:] variable[details] assign[=] call[name[self]._get_team_details, parameter[name[game]]] <ast.Tuple object at 0x7da1b0bf2c50> assign[=] name[details] variable[boxscore_url] assign[=] call[name[game], parameter[constant[td[class="right gamelink"] a]]] variable[boxscore_uri] assign[=] call[name[self]._get_boxscore_uri, parameter[name[boxscore_url]]] variable[losers] assign[=] <ast.ListComp object at 0x7da1b0bf3730> variable[winner] assign[=] call[name[self]._get_team_results, parameter[call[name[game], parameter[constant[tr[class="winner"]]]]]] variable[loser] assign[=] call[name[self]._get_team_results, parameter[call[name[game], parameter[constant[tr[class="loser"]]]]]] if <ast.BoolOp object at 0x7da1b0bf36a0> begin[:] continue if <ast.BoolOp object at 0x7da1b0b9f370> begin[:] variable[winning_name] assign[=] constant[None] variable[winning_abbreviation] assign[=] constant[None] if <ast.BoolOp object at 0x7da1b0b9f280> begin[:] variable[losing_name] assign[=] constant[None] variable[losing_abbreviation] assign[=] constant[None] variable[game_info] assign[=] dictionary[[<ast.Constant object at 0x7da1b0b9c6a0>, <ast.Constant object at 0x7da1b0b9f6d0>, <ast.Constant object at 0x7da1b0b9fa00>, <ast.Constant object at 0x7da1b0b9ca60>, <ast.Constant object at 0x7da1b0b9c5e0>, <ast.Constant object at 0x7da1b0b9cd60>, <ast.Constant object at 0x7da1b0b9dd50>, <ast.Constant object at 0x7da1b0b9f4f0>, <ast.Constant object at 0x7da1b0b9f7f0>, <ast.Constant object at 0x7da1b0b9d060>, <ast.Constant object at 0x7da1b0b9d150>], [<ast.Name object at 0x7da1b0b9ece0>, <ast.Name object at 0x7da1b0b9e2f0>, <ast.Name object at 0x7da1b0b9ff70>, <ast.Name object at 0x7da1b0b9e560>, <ast.Name object at 0x7da1b0b9fe20>, <ast.Name object at 0x7da1b0b9f790>, <ast.Name object at 0x7da1b0b9f1f0>, <ast.Name object at 0x7da1b0b9e6b0>, <ast.Name object at 0x7da1b0b9d360>, <ast.Name object at 0x7da1b0b9cee0>, <ast.Name object at 0x7da1b0b9ca90>]] call[name[all_boxscores].append, parameter[name[game_info]]] return[name[all_boxscores]]
keyword[def] identifier[_extract_game_info] ( identifier[self] , identifier[games] ): literal[string] identifier[all_boxscores] =[] keyword[for] identifier[game] keyword[in] identifier[games] : identifier[details] = identifier[self] . identifier[_get_team_details] ( identifier[game] ) identifier[away_name] , identifier[away_abbr] , identifier[away_score] , identifier[home_name] , identifier[home_abbr] , identifier[home_score] = identifier[details] identifier[boxscore_url] = identifier[game] ( literal[string] ) identifier[boxscore_uri] = identifier[self] . identifier[_get_boxscore_uri] ( identifier[boxscore_url] ) identifier[losers] =[ identifier[l] keyword[for] identifier[l] keyword[in] identifier[game] ( literal[string] ). identifier[items] ()] identifier[winner] = identifier[self] . identifier[_get_team_results] ( identifier[game] ( literal[string] )) identifier[loser] = identifier[self] . identifier[_get_team_results] ( identifier[game] ( literal[string] )) keyword[if] ( identifier[len] ( identifier[losers] )!= literal[int] keyword[and] identifier[loser] keyword[and] keyword[not] identifier[winner] ) keyword[or] ( identifier[len] ( identifier[losers] )!= literal[int] keyword[and] identifier[winner] keyword[and] keyword[not] identifier[loser] ): keyword[continue] keyword[if] keyword[not] identifier[winner] keyword[or] identifier[len] ( identifier[losers] )== literal[int] : identifier[winning_name] = keyword[None] identifier[winning_abbreviation] = keyword[None] keyword[else] : identifier[winning_name] , identifier[winning_abbreviation] = identifier[winner] keyword[if] keyword[not] identifier[loser] keyword[or] identifier[len] ( identifier[losers] )== literal[int] : identifier[losing_name] = keyword[None] identifier[losing_abbreviation] = keyword[None] keyword[else] : identifier[losing_name] , identifier[losing_abbreviation] = identifier[loser] identifier[game_info] ={ literal[string] : identifier[boxscore_uri] , literal[string] : identifier[away_name] , literal[string] : identifier[away_abbr] , literal[string] : identifier[away_score] , literal[string] : identifier[home_name] , literal[string] : identifier[home_abbr] , literal[string] : identifier[home_score] , literal[string] : identifier[winning_name] , literal[string] : identifier[winning_abbreviation] , literal[string] : identifier[losing_name] , literal[string] : identifier[losing_abbreviation] } identifier[all_boxscores] . identifier[append] ( identifier[game_info] ) keyword[return] identifier[all_boxscores]
def _extract_game_info(self, games): """ Parse game information from all boxscores. Find the major game information for all boxscores listed on a particular boxscores webpage and return the results in a list. Parameters ---------- games : generator A generator where each element points to a boxscore on the parsed boxscores webpage. Returns ------- list Returns a ``list`` of dictionaries where each dictionary contains the name and abbreviations for both the home and away teams, and a link to the game's boxscore. """ all_boxscores = [] for game in games: details = self._get_team_details(game) (away_name, away_abbr, away_score, home_name, home_abbr, home_score) = details boxscore_url = game('td[class="right gamelink"] a') boxscore_uri = self._get_boxscore_uri(boxscore_url) losers = [l for l in game('tr[class="loser"]').items()] winner = self._get_team_results(game('tr[class="winner"]')) loser = self._get_team_results(game('tr[class="loser"]')) # Occurs when the boxscore format is invalid and the game should be # skipped to avoid conflicts populating the game information. if len(losers) != 2 and loser and (not winner) or (len(losers) != 2 and winner and (not loser)): continue # depends on [control=['if'], data=[]] # Occurs when information couldn't be parsed from the boxscore or # the game hasn't occurred yet. In this case, the winner should be # None to avoid conflicts. if not winner or len(losers) == 2: winning_name = None winning_abbreviation = None # depends on [control=['if'], data=[]] else: (winning_name, winning_abbreviation) = winner # Occurs when information couldn't be parsed from the boxscore or # the game hasn't occurred yet. In this case, the winner should be # None to avoid conflicts. if not loser or len(losers) == 2: losing_name = None losing_abbreviation = None # depends on [control=['if'], data=[]] else: (losing_name, losing_abbreviation) = loser game_info = {'boxscore': boxscore_uri, 'away_name': away_name, 'away_abbr': away_abbr, 'away_score': away_score, 'home_name': home_name, 'home_abbr': home_abbr, 'home_score': home_score, 'winning_name': winning_name, 'winning_abbr': winning_abbreviation, 'losing_name': losing_name, 'losing_abbr': losing_abbreviation} all_boxscores.append(game_info) # depends on [control=['for'], data=['game']] return all_boxscores
def _equal_values(self, val1, val2): """Checks if the parameter considers two values as equal. This is important for the trajectory in case of merging. In case you want to delete duplicate parameter points, the trajectory needs to know when two parameters are equal. Since equality is not always implemented by values handled by parameters in the same way, the parameters need to judge whether their values are equal. The straightforward example here is a numpy array. Checking for equality of two numpy arrays yields a third numpy array containing truth values of a piecewise comparison. Accordingly, the parameter could judge two numpy arrays equal if ALL of the numpy array elements are equal. In this BaseParameter class values are considered to be equal if they obey the function :func:`~pypet.utils.comparisons.nested_equal`. You might consider implementing a different equality comparison in your subclass. :raises: TypeError: If both values are not supported by the parameter. """ if self.f_supports(val1) != self.f_supports(val2): return False if not self.f_supports(val1) and not self.f_supports(val2): raise TypeError('I do not support the types of both inputs (`%s` and `%s`), ' 'therefore I cannot judge whether ' 'the two are equal.' % (str(type(val1)), str(type(val2)))) if not self._values_of_same_type(val1, val2): return False return comparisons.nested_equal(val1, val2)
def function[_equal_values, parameter[self, val1, val2]]: constant[Checks if the parameter considers two values as equal. This is important for the trajectory in case of merging. In case you want to delete duplicate parameter points, the trajectory needs to know when two parameters are equal. Since equality is not always implemented by values handled by parameters in the same way, the parameters need to judge whether their values are equal. The straightforward example here is a numpy array. Checking for equality of two numpy arrays yields a third numpy array containing truth values of a piecewise comparison. Accordingly, the parameter could judge two numpy arrays equal if ALL of the numpy array elements are equal. In this BaseParameter class values are considered to be equal if they obey the function :func:`~pypet.utils.comparisons.nested_equal`. You might consider implementing a different equality comparison in your subclass. :raises: TypeError: If both values are not supported by the parameter. ] if compare[call[name[self].f_supports, parameter[name[val1]]] not_equal[!=] call[name[self].f_supports, parameter[name[val2]]]] begin[:] return[constant[False]] if <ast.BoolOp object at 0x7da1b033e1a0> begin[:] <ast.Raise object at 0x7da1b033ce20> if <ast.UnaryOp object at 0x7da1b033c4c0> begin[:] return[constant[False]] return[call[name[comparisons].nested_equal, parameter[name[val1], name[val2]]]]
keyword[def] identifier[_equal_values] ( identifier[self] , identifier[val1] , identifier[val2] ): literal[string] keyword[if] identifier[self] . identifier[f_supports] ( identifier[val1] )!= identifier[self] . identifier[f_supports] ( identifier[val2] ): keyword[return] keyword[False] keyword[if] keyword[not] identifier[self] . identifier[f_supports] ( identifier[val1] ) keyword[and] keyword[not] identifier[self] . identifier[f_supports] ( identifier[val2] ): keyword[raise] identifier[TypeError] ( literal[string] literal[string] literal[string] %( identifier[str] ( identifier[type] ( identifier[val1] )), identifier[str] ( identifier[type] ( identifier[val2] )))) keyword[if] keyword[not] identifier[self] . identifier[_values_of_same_type] ( identifier[val1] , identifier[val2] ): keyword[return] keyword[False] keyword[return] identifier[comparisons] . identifier[nested_equal] ( identifier[val1] , identifier[val2] )
def _equal_values(self, val1, val2): """Checks if the parameter considers two values as equal. This is important for the trajectory in case of merging. In case you want to delete duplicate parameter points, the trajectory needs to know when two parameters are equal. Since equality is not always implemented by values handled by parameters in the same way, the parameters need to judge whether their values are equal. The straightforward example here is a numpy array. Checking for equality of two numpy arrays yields a third numpy array containing truth values of a piecewise comparison. Accordingly, the parameter could judge two numpy arrays equal if ALL of the numpy array elements are equal. In this BaseParameter class values are considered to be equal if they obey the function :func:`~pypet.utils.comparisons.nested_equal`. You might consider implementing a different equality comparison in your subclass. :raises: TypeError: If both values are not supported by the parameter. """ if self.f_supports(val1) != self.f_supports(val2): return False # depends on [control=['if'], data=[]] if not self.f_supports(val1) and (not self.f_supports(val2)): raise TypeError('I do not support the types of both inputs (`%s` and `%s`), therefore I cannot judge whether the two are equal.' % (str(type(val1)), str(type(val2)))) # depends on [control=['if'], data=[]] if not self._values_of_same_type(val1, val2): return False # depends on [control=['if'], data=[]] return comparisons.nested_equal(val1, val2)
def match(self, item): '''Return whether *item* matches this collection expression. If a match is successful return data about the match otherwise return None. ''' match = self._expression.match(item) if not match: return None index = match.group('index') padded = False if match.group('padding'): padded = True if self.padding == 0: if padded: return None elif len(index) != self.padding: return None return match
def function[match, parameter[self, item]]: constant[Return whether *item* matches this collection expression. If a match is successful return data about the match otherwise return None. ] variable[match] assign[=] call[name[self]._expression.match, parameter[name[item]]] if <ast.UnaryOp object at 0x7da1b0f38400> begin[:] return[constant[None]] variable[index] assign[=] call[name[match].group, parameter[constant[index]]] variable[padded] assign[=] constant[False] if call[name[match].group, parameter[constant[padding]]] begin[:] variable[padded] assign[=] constant[True] if compare[name[self].padding equal[==] constant[0]] begin[:] if name[padded] begin[:] return[constant[None]] return[name[match]]
keyword[def] identifier[match] ( identifier[self] , identifier[item] ): literal[string] identifier[match] = identifier[self] . identifier[_expression] . identifier[match] ( identifier[item] ) keyword[if] keyword[not] identifier[match] : keyword[return] keyword[None] identifier[index] = identifier[match] . identifier[group] ( literal[string] ) identifier[padded] = keyword[False] keyword[if] identifier[match] . identifier[group] ( literal[string] ): identifier[padded] = keyword[True] keyword[if] identifier[self] . identifier[padding] == literal[int] : keyword[if] identifier[padded] : keyword[return] keyword[None] keyword[elif] identifier[len] ( identifier[index] )!= identifier[self] . identifier[padding] : keyword[return] keyword[None] keyword[return] identifier[match]
def match(self, item): """Return whether *item* matches this collection expression. If a match is successful return data about the match otherwise return None. """ match = self._expression.match(item) if not match: return None # depends on [control=['if'], data=[]] index = match.group('index') padded = False if match.group('padding'): padded = True # depends on [control=['if'], data=[]] if self.padding == 0: if padded: return None # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif len(index) != self.padding: return None # depends on [control=['if'], data=[]] return match
def get_v_distance(self, latlonalt1, latlonalt2): '''get the horizontal distance between threat and vehicle''' (lat1, lon1, alt1) = latlonalt1 (lat2, lon2, alt2) = latlonalt2 return alt2 - alt1
def function[get_v_distance, parameter[self, latlonalt1, latlonalt2]]: constant[get the horizontal distance between threat and vehicle] <ast.Tuple object at 0x7da20c76cf10> assign[=] name[latlonalt1] <ast.Tuple object at 0x7da20c76e860> assign[=] name[latlonalt2] return[binary_operation[name[alt2] - name[alt1]]]
keyword[def] identifier[get_v_distance] ( identifier[self] , identifier[latlonalt1] , identifier[latlonalt2] ): literal[string] ( identifier[lat1] , identifier[lon1] , identifier[alt1] )= identifier[latlonalt1] ( identifier[lat2] , identifier[lon2] , identifier[alt2] )= identifier[latlonalt2] keyword[return] identifier[alt2] - identifier[alt1]
def get_v_distance(self, latlonalt1, latlonalt2): """get the horizontal distance between threat and vehicle""" (lat1, lon1, alt1) = latlonalt1 (lat2, lon2, alt2) = latlonalt2 return alt2 - alt1
def _summarize_accessible_fields(field_descriptions, width=40, section_title='Accessible fields'): """ Create a summary string for the accessible fields in a model. Unlike `_toolkit_repr_print`, this function does not look up the values of the fields, it just formats the names and descriptions. Parameters ---------- field_descriptions : dict{str: str} Name of each field and its description, in a dictionary. Keys and values should be strings. width : int, optional Width of the names. This is usually determined and passed by the calling `__repr__` method. section_title : str, optional Name of the accessible fields section in the summary string. Returns ------- out : str """ key_str = "{:<{}}: {}" items = [] items.append(section_title) items.append("-" * len(section_title)) for field_name, field_desc in field_descriptions.items(): items.append(key_str.format(field_name, width, field_desc)) return "\n".join(items)
def function[_summarize_accessible_fields, parameter[field_descriptions, width, section_title]]: constant[ Create a summary string for the accessible fields in a model. Unlike `_toolkit_repr_print`, this function does not look up the values of the fields, it just formats the names and descriptions. Parameters ---------- field_descriptions : dict{str: str} Name of each field and its description, in a dictionary. Keys and values should be strings. width : int, optional Width of the names. This is usually determined and passed by the calling `__repr__` method. section_title : str, optional Name of the accessible fields section in the summary string. Returns ------- out : str ] variable[key_str] assign[=] constant[{:<{}}: {}] variable[items] assign[=] list[[]] call[name[items].append, parameter[name[section_title]]] call[name[items].append, parameter[binary_operation[constant[-] * call[name[len], parameter[name[section_title]]]]]] for taget[tuple[[<ast.Name object at 0x7da1b1f8cb80>, <ast.Name object at 0x7da1b1f8e2f0>]]] in starred[call[name[field_descriptions].items, parameter[]]] begin[:] call[name[items].append, parameter[call[name[key_str].format, parameter[name[field_name], name[width], name[field_desc]]]]] return[call[constant[ ].join, parameter[name[items]]]]
keyword[def] identifier[_summarize_accessible_fields] ( identifier[field_descriptions] , identifier[width] = literal[int] , identifier[section_title] = literal[string] ): literal[string] identifier[key_str] = literal[string] identifier[items] =[] identifier[items] . identifier[append] ( identifier[section_title] ) identifier[items] . identifier[append] ( literal[string] * identifier[len] ( identifier[section_title] )) keyword[for] identifier[field_name] , identifier[field_desc] keyword[in] identifier[field_descriptions] . identifier[items] (): identifier[items] . identifier[append] ( identifier[key_str] . identifier[format] ( identifier[field_name] , identifier[width] , identifier[field_desc] )) keyword[return] literal[string] . identifier[join] ( identifier[items] )
def _summarize_accessible_fields(field_descriptions, width=40, section_title='Accessible fields'): """ Create a summary string for the accessible fields in a model. Unlike `_toolkit_repr_print`, this function does not look up the values of the fields, it just formats the names and descriptions. Parameters ---------- field_descriptions : dict{str: str} Name of each field and its description, in a dictionary. Keys and values should be strings. width : int, optional Width of the names. This is usually determined and passed by the calling `__repr__` method. section_title : str, optional Name of the accessible fields section in the summary string. Returns ------- out : str """ key_str = '{:<{}}: {}' items = [] items.append(section_title) items.append('-' * len(section_title)) for (field_name, field_desc) in field_descriptions.items(): items.append(key_str.format(field_name, width, field_desc)) # depends on [control=['for'], data=[]] return '\n'.join(items)
def dict_find(in_dict, value): """ Helper function for looking up directory keys by their values. This isn't robust to repeated values Parameters ---------- in_dict : dictionary A dictionary containing `value` value : any type What we wish to find in the dictionary Returns ------- key: basestring The key at which the value can be found Examples -------- >>> dict_find({'Key1': 'A', 'Key2': 'B'}, 'B') 'Key2' """ # Todo: make this robust to repeated values # Todo: make this robust to missing values return list(in_dict.keys())[list(in_dict.values()).index(value)]
def function[dict_find, parameter[in_dict, value]]: constant[ Helper function for looking up directory keys by their values. This isn't robust to repeated values Parameters ---------- in_dict : dictionary A dictionary containing `value` value : any type What we wish to find in the dictionary Returns ------- key: basestring The key at which the value can be found Examples -------- >>> dict_find({'Key1': 'A', 'Key2': 'B'}, 'B') 'Key2' ] return[call[call[name[list], parameter[call[name[in_dict].keys, parameter[]]]]][call[call[name[list], parameter[call[name[in_dict].values, parameter[]]]].index, parameter[name[value]]]]]
keyword[def] identifier[dict_find] ( identifier[in_dict] , identifier[value] ): literal[string] keyword[return] identifier[list] ( identifier[in_dict] . identifier[keys] ())[ identifier[list] ( identifier[in_dict] . identifier[values] ()). identifier[index] ( identifier[value] )]
def dict_find(in_dict, value): """ Helper function for looking up directory keys by their values. This isn't robust to repeated values Parameters ---------- in_dict : dictionary A dictionary containing `value` value : any type What we wish to find in the dictionary Returns ------- key: basestring The key at which the value can be found Examples -------- >>> dict_find({'Key1': 'A', 'Key2': 'B'}, 'B') 'Key2' """ # Todo: make this robust to repeated values # Todo: make this robust to missing values return list(in_dict.keys())[list(in_dict.values()).index(value)]
def _separable_series2(h, N=1): """ finds separable approximations to the 2d function 2d h returns res = (hx, hy)[N] s.t. h \approx sum_i outer(res[i,0],res[i,1]) """ if min(h.shape)<N: raise ValueError("smallest dimension of h is smaller than approximation order! (%s < %s)"%(min(h.shape),N)) U, S, V = linalg.svd(h) hx = [-U[:, n] * np.sqrt(S[n]) for n in range(N)] hy = [-V[n, :] * np.sqrt(S[n]) for n in range(N)] return np.array(list(zip(hx, hy)))
def function[_separable_series2, parameter[h, N]]: constant[ finds separable approximations to the 2d function 2d h returns res = (hx, hy)[N] s.t. h pprox sum_i outer(res[i,0],res[i,1]) ] if compare[call[name[min], parameter[name[h].shape]] less[<] name[N]] begin[:] <ast.Raise object at 0x7da18f00e8f0> <ast.Tuple object at 0x7da18f00eb00> assign[=] call[name[linalg].svd, parameter[name[h]]] variable[hx] assign[=] <ast.ListComp object at 0x7da18f00c670> variable[hy] assign[=] <ast.ListComp object at 0x7da18f00e650> return[call[name[np].array, parameter[call[name[list], parameter[call[name[zip], parameter[name[hx], name[hy]]]]]]]]
keyword[def] identifier[_separable_series2] ( identifier[h] , identifier[N] = literal[int] ): literal[string] keyword[if] identifier[min] ( identifier[h] . identifier[shape] )< identifier[N] : keyword[raise] identifier[ValueError] ( literal[string] %( identifier[min] ( identifier[h] . identifier[shape] ), identifier[N] )) identifier[U] , identifier[S] , identifier[V] = identifier[linalg] . identifier[svd] ( identifier[h] ) identifier[hx] =[- identifier[U] [:, identifier[n] ]* identifier[np] . identifier[sqrt] ( identifier[S] [ identifier[n] ]) keyword[for] identifier[n] keyword[in] identifier[range] ( identifier[N] )] identifier[hy] =[- identifier[V] [ identifier[n] ,:]* identifier[np] . identifier[sqrt] ( identifier[S] [ identifier[n] ]) keyword[for] identifier[n] keyword[in] identifier[range] ( identifier[N] )] keyword[return] identifier[np] . identifier[array] ( identifier[list] ( identifier[zip] ( identifier[hx] , identifier[hy] )))
def _separable_series2(h, N=1): """ finds separable approximations to the 2d function 2d h returns res = (hx, hy)[N] s.t. h \x07pprox sum_i outer(res[i,0],res[i,1]) """ if min(h.shape) < N: raise ValueError('smallest dimension of h is smaller than approximation order! (%s < %s)' % (min(h.shape), N)) # depends on [control=['if'], data=['N']] (U, S, V) = linalg.svd(h) hx = [-U[:, n] * np.sqrt(S[n]) for n in range(N)] hy = [-V[n, :] * np.sqrt(S[n]) for n in range(N)] return np.array(list(zip(hx, hy)))
def _encode_batched_op_msg( operation, command, docs, check_keys, ack, opts, ctx): """Encode the next batched insert, update, or delete operation as OP_MSG. """ buf = StringIO() to_send, _ = _batched_op_msg_impl( operation, command, docs, check_keys, ack, opts, ctx, buf) return buf.getvalue(), to_send
def function[_encode_batched_op_msg, parameter[operation, command, docs, check_keys, ack, opts, ctx]]: constant[Encode the next batched insert, update, or delete operation as OP_MSG. ] variable[buf] assign[=] call[name[StringIO], parameter[]] <ast.Tuple object at 0x7da20c991ea0> assign[=] call[name[_batched_op_msg_impl], parameter[name[operation], name[command], name[docs], name[check_keys], name[ack], name[opts], name[ctx], name[buf]]] return[tuple[[<ast.Call object at 0x7da20c991030>, <ast.Name object at 0x7da20c992230>]]]
keyword[def] identifier[_encode_batched_op_msg] ( identifier[operation] , identifier[command] , identifier[docs] , identifier[check_keys] , identifier[ack] , identifier[opts] , identifier[ctx] ): literal[string] identifier[buf] = identifier[StringIO] () identifier[to_send] , identifier[_] = identifier[_batched_op_msg_impl] ( identifier[operation] , identifier[command] , identifier[docs] , identifier[check_keys] , identifier[ack] , identifier[opts] , identifier[ctx] , identifier[buf] ) keyword[return] identifier[buf] . identifier[getvalue] (), identifier[to_send]
def _encode_batched_op_msg(operation, command, docs, check_keys, ack, opts, ctx): """Encode the next batched insert, update, or delete operation as OP_MSG. """ buf = StringIO() (to_send, _) = _batched_op_msg_impl(operation, command, docs, check_keys, ack, opts, ctx, buf) return (buf.getvalue(), to_send)