code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def flush_buffer(self): ''' Flush the buffer of the tail ''' if len(self.buffer) > 0: return_value = ''.join(self.buffer) self.buffer.clear() self.send_message(return_value) self.last_flush_date = datetime.datetime.now()
def function[flush_buffer, parameter[self]]: constant[ Flush the buffer of the tail ] if compare[call[name[len], parameter[name[self].buffer]] greater[>] constant[0]] begin[:] variable[return_value] assign[=] call[constant[].join, parameter[name[self].buffer]] call[name[self].buffer.clear, parameter[]] call[name[self].send_message, parameter[name[return_value]]] name[self].last_flush_date assign[=] call[name[datetime].datetime.now, parameter[]]
keyword[def] identifier[flush_buffer] ( identifier[self] ): literal[string] keyword[if] identifier[len] ( identifier[self] . identifier[buffer] )> literal[int] : identifier[return_value] = literal[string] . identifier[join] ( identifier[self] . identifier[buffer] ) identifier[self] . identifier[buffer] . identifier[clear] () identifier[self] . identifier[send_message] ( identifier[return_value] ) identifier[self] . identifier[last_flush_date] = identifier[datetime] . identifier[datetime] . identifier[now] ()
def flush_buffer(self): """ Flush the buffer of the tail """ if len(self.buffer) > 0: return_value = ''.join(self.buffer) self.buffer.clear() self.send_message(return_value) self.last_flush_date = datetime.datetime.now() # depends on [control=['if'], data=[]]
def correct_dmdt(d, dmind, dtind, blrange): """ Dedisperses and resamples data *in place*. Drops edges, since it assumes that data is read with overlapping chunks in time. """ data = numpyview(data_mem, 'complex64', datashape(d)) data_resamp = numpyview(data_resamp_mem, 'complex64', datashape(d)) bl0,bl1 = blrange data_resamp[:, bl0:bl1] = data[:, bl0:bl1] rtlib.dedisperse_resample(data_resamp, d['freq'], d['inttime'], d['dmarr'][dmind], d['dtarr'][dtind], blrange, verbose=0)
def function[correct_dmdt, parameter[d, dmind, dtind, blrange]]: constant[ Dedisperses and resamples data *in place*. Drops edges, since it assumes that data is read with overlapping chunks in time. ] variable[data] assign[=] call[name[numpyview], parameter[name[data_mem], constant[complex64], call[name[datashape], parameter[name[d]]]]] variable[data_resamp] assign[=] call[name[numpyview], parameter[name[data_resamp_mem], constant[complex64], call[name[datashape], parameter[name[d]]]]] <ast.Tuple object at 0x7da1b24ad030> assign[=] name[blrange] call[name[data_resamp]][tuple[[<ast.Slice object at 0x7da1b24acc40>, <ast.Slice object at 0x7da1b24acd30>]]] assign[=] call[name[data]][tuple[[<ast.Slice object at 0x7da1b24acc10>, <ast.Slice object at 0x7da1b24acb20>]]] call[name[rtlib].dedisperse_resample, parameter[name[data_resamp], call[name[d]][constant[freq]], call[name[d]][constant[inttime]], call[call[name[d]][constant[dmarr]]][name[dmind]], call[call[name[d]][constant[dtarr]]][name[dtind]], name[blrange]]]
keyword[def] identifier[correct_dmdt] ( identifier[d] , identifier[dmind] , identifier[dtind] , identifier[blrange] ): literal[string] identifier[data] = identifier[numpyview] ( identifier[data_mem] , literal[string] , identifier[datashape] ( identifier[d] )) identifier[data_resamp] = identifier[numpyview] ( identifier[data_resamp_mem] , literal[string] , identifier[datashape] ( identifier[d] )) identifier[bl0] , identifier[bl1] = identifier[blrange] identifier[data_resamp] [:, identifier[bl0] : identifier[bl1] ]= identifier[data] [:, identifier[bl0] : identifier[bl1] ] identifier[rtlib] . identifier[dedisperse_resample] ( identifier[data_resamp] , identifier[d] [ literal[string] ], identifier[d] [ literal[string] ], identifier[d] [ literal[string] ][ identifier[dmind] ], identifier[d] [ literal[string] ][ identifier[dtind] ], identifier[blrange] , identifier[verbose] = literal[int] )
def correct_dmdt(d, dmind, dtind, blrange): """ Dedisperses and resamples data *in place*. Drops edges, since it assumes that data is read with overlapping chunks in time. """ data = numpyview(data_mem, 'complex64', datashape(d)) data_resamp = numpyview(data_resamp_mem, 'complex64', datashape(d)) (bl0, bl1) = blrange data_resamp[:, bl0:bl1] = data[:, bl0:bl1] rtlib.dedisperse_resample(data_resamp, d['freq'], d['inttime'], d['dmarr'][dmind], d['dtarr'][dtind], blrange, verbose=0)
def _supports(self, data): """ Simply checks if data is supported """ if isinstance(data, Quantity): return True elif super(Brian2Result, self)._supports(data): return True return False
def function[_supports, parameter[self, data]]: constant[ Simply checks if data is supported ] if call[name[isinstance], parameter[name[data], name[Quantity]]] begin[:] return[constant[True]] return[constant[False]]
keyword[def] identifier[_supports] ( identifier[self] , identifier[data] ): literal[string] keyword[if] identifier[isinstance] ( identifier[data] , identifier[Quantity] ): keyword[return] keyword[True] keyword[elif] identifier[super] ( identifier[Brian2Result] , identifier[self] ). identifier[_supports] ( identifier[data] ): keyword[return] keyword[True] keyword[return] keyword[False]
def _supports(self, data): """ Simply checks if data is supported """ if isinstance(data, Quantity): return True # depends on [control=['if'], data=[]] elif super(Brian2Result, self)._supports(data): return True # depends on [control=['if'], data=[]] return False
def check_plan_id(self, plan_id) -> bool: """ Checks that the plan_id exists in the catalog :return: boolean """ for plan in self.catalog().plans: if plan.id == plan_id: return True return False
def function[check_plan_id, parameter[self, plan_id]]: constant[ Checks that the plan_id exists in the catalog :return: boolean ] for taget[name[plan]] in starred[call[name[self].catalog, parameter[]].plans] begin[:] if compare[name[plan].id equal[==] name[plan_id]] begin[:] return[constant[True]] return[constant[False]]
keyword[def] identifier[check_plan_id] ( identifier[self] , identifier[plan_id] )-> identifier[bool] : literal[string] keyword[for] identifier[plan] keyword[in] identifier[self] . identifier[catalog] (). identifier[plans] : keyword[if] identifier[plan] . identifier[id] == identifier[plan_id] : keyword[return] keyword[True] keyword[return] keyword[False]
def check_plan_id(self, plan_id) -> bool: """ Checks that the plan_id exists in the catalog :return: boolean """ for plan in self.catalog().plans: if plan.id == plan_id: return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['plan']] return False
def link_blob_into_repository(self, session, digest, source_repo, target_repo): """ Links ("mounts" in Docker Registry terminology) a blob from one repository in a registry into another repository in the same registry. """ self.log.debug("%s: Linking blob %s from %s to %s", session.registry, digest, source_repo, target_repo) # Check that it exists in the source repository url = "/v2/{}/blobs/{}".format(source_repo, digest) result = session.head(url) if result.status_code == requests.codes.NOT_FOUND: self.log.debug("%s: blob %s, not present in %s, skipping", session.registry, digest, source_repo) # Assume we don't need to copy it - maybe it's a foreign layer return result.raise_for_status() url = "/v2/{}/blobs/uploads/?mount={}&from={}".format(target_repo, digest, source_repo) result = session.post(url, data='') result.raise_for_status() if result.status_code != requests.codes.CREATED: # A 202-Accepted would mean that the source blob didn't exist and # we're starting an upload - but we've checked that above raise RuntimeError("Blob mount had unexpected status {}".format(result.status_code))
def function[link_blob_into_repository, parameter[self, session, digest, source_repo, target_repo]]: constant[ Links ("mounts" in Docker Registry terminology) a blob from one repository in a registry into another repository in the same registry. ] call[name[self].log.debug, parameter[constant[%s: Linking blob %s from %s to %s], name[session].registry, name[digest], name[source_repo], name[target_repo]]] variable[url] assign[=] call[constant[/v2/{}/blobs/{}].format, parameter[name[source_repo], name[digest]]] variable[result] assign[=] call[name[session].head, parameter[name[url]]] if compare[name[result].status_code equal[==] name[requests].codes.NOT_FOUND] begin[:] call[name[self].log.debug, parameter[constant[%s: blob %s, not present in %s, skipping], name[session].registry, name[digest], name[source_repo]]] return[None] call[name[result].raise_for_status, parameter[]] variable[url] assign[=] call[constant[/v2/{}/blobs/uploads/?mount={}&from={}].format, parameter[name[target_repo], name[digest], name[source_repo]]] variable[result] assign[=] call[name[session].post, parameter[name[url]]] call[name[result].raise_for_status, parameter[]] if compare[name[result].status_code not_equal[!=] name[requests].codes.CREATED] begin[:] <ast.Raise object at 0x7da20c993730>
keyword[def] identifier[link_blob_into_repository] ( identifier[self] , identifier[session] , identifier[digest] , identifier[source_repo] , identifier[target_repo] ): literal[string] identifier[self] . identifier[log] . identifier[debug] ( literal[string] , identifier[session] . identifier[registry] , identifier[digest] , identifier[source_repo] , identifier[target_repo] ) identifier[url] = literal[string] . identifier[format] ( identifier[source_repo] , identifier[digest] ) identifier[result] = identifier[session] . identifier[head] ( identifier[url] ) keyword[if] identifier[result] . identifier[status_code] == identifier[requests] . identifier[codes] . identifier[NOT_FOUND] : identifier[self] . identifier[log] . identifier[debug] ( literal[string] , identifier[session] . identifier[registry] , identifier[digest] , identifier[source_repo] ) keyword[return] identifier[result] . identifier[raise_for_status] () identifier[url] = literal[string] . identifier[format] ( identifier[target_repo] , identifier[digest] , identifier[source_repo] ) identifier[result] = identifier[session] . identifier[post] ( identifier[url] , identifier[data] = literal[string] ) identifier[result] . identifier[raise_for_status] () keyword[if] identifier[result] . identifier[status_code] != identifier[requests] . identifier[codes] . identifier[CREATED] : keyword[raise] identifier[RuntimeError] ( literal[string] . identifier[format] ( identifier[result] . identifier[status_code] ))
def link_blob_into_repository(self, session, digest, source_repo, target_repo): """ Links ("mounts" in Docker Registry terminology) a blob from one repository in a registry into another repository in the same registry. """ self.log.debug('%s: Linking blob %s from %s to %s', session.registry, digest, source_repo, target_repo) # Check that it exists in the source repository url = '/v2/{}/blobs/{}'.format(source_repo, digest) result = session.head(url) if result.status_code == requests.codes.NOT_FOUND: self.log.debug('%s: blob %s, not present in %s, skipping', session.registry, digest, source_repo) # Assume we don't need to copy it - maybe it's a foreign layer return # depends on [control=['if'], data=[]] result.raise_for_status() url = '/v2/{}/blobs/uploads/?mount={}&from={}'.format(target_repo, digest, source_repo) result = session.post(url, data='') result.raise_for_status() if result.status_code != requests.codes.CREATED: # A 202-Accepted would mean that the source blob didn't exist and # we're starting an upload - but we've checked that above raise RuntimeError('Blob mount had unexpected status {}'.format(result.status_code)) # depends on [control=['if'], data=[]]
def p_reset(self, program): """ reset : RESET primary """ program[0] = node.Reset([program[2]]) self.verify_reg(program[2], 'qreg')
def function[p_reset, parameter[self, program]]: constant[ reset : RESET primary ] call[name[program]][constant[0]] assign[=] call[name[node].Reset, parameter[list[[<ast.Subscript object at 0x7da1b0382440>]]]] call[name[self].verify_reg, parameter[call[name[program]][constant[2]], constant[qreg]]]
keyword[def] identifier[p_reset] ( identifier[self] , identifier[program] ): literal[string] identifier[program] [ literal[int] ]= identifier[node] . identifier[Reset] ([ identifier[program] [ literal[int] ]]) identifier[self] . identifier[verify_reg] ( identifier[program] [ literal[int] ], literal[string] )
def p_reset(self, program): """ reset : RESET primary """ program[0] = node.Reset([program[2]]) self.verify_reg(program[2], 'qreg')
def Nu_cylinder_Perkins_Leppert_1964(Re, Pr, mu=None, muw=None): r'''Calculates Nusselt number for crossflow across a single tube as shown in [1]_ at a specified `Re` and `Pr`, both evaluated at the free stream temperature. Recommends a viscosity exponent correction of 0.25, which is applied only if provided. Also shown in [2]_. .. math:: Nu = \left[0.31Re^{0.5} + 0.11Re^{0.67}\right]Pr^{0.4} \left(\frac{\mu}{\mu_w}\right)^{0.25} Parameters ---------- Re : float Reynolds number with respect to cylinder diameter, [-] Pr : float Prandtl number at free stream temperature, [-] mu : float, optional Viscosity of fluid at the free stream temperature [Pa*s] muw : float, optional Viscosity of fluid at the wall temperature [Pa*s] Returns ------- Nu : float Nusselt number with respect to cylinder diameter, [-] Notes ----- Considers new data since `Nu_cylinder_Perkins_Leppert_1962`, Re from 2E3 to 1.2E5, Pr from 1 to 7, and surface to bulk temperature differences of 11 to 66. Examples -------- >>> Nu_cylinder_Perkins_Leppert_1964(6071, 0.7) 53.61767038619986 References ---------- .. [1] Perkins Jr., H. C., and G. Leppert. "Local Heat-Transfer Coefficients on a Uniformly Heated Cylinder." International Journal of Heat and Mass Transfer 7, no. 2 (February 1964): 143-158. doi:10.1016/0017-9310(64)90079-1. .. [2] Sanitjai, S., and R. J. Goldstein. "Forced Convection Heat Transfer from a Circular Cylinder in Crossflow to Air and Liquids." International Journal of Heat and Mass Transfer 47, no. 22 (October 2004): 4795-4805. doi:10.1016/j.ijheatmasstransfer.2004.05.012. ''' Nu = (0.31*Re**0.5 + 0.11*Re**0.67)*Pr**0.4 if mu and muw: Nu *= (mu/muw)**0.25 return Nu
def function[Nu_cylinder_Perkins_Leppert_1964, parameter[Re, Pr, mu, muw]]: constant[Calculates Nusselt number for crossflow across a single tube as shown in [1]_ at a specified `Re` and `Pr`, both evaluated at the free stream temperature. Recommends a viscosity exponent correction of 0.25, which is applied only if provided. Also shown in [2]_. .. math:: Nu = \left[0.31Re^{0.5} + 0.11Re^{0.67}\right]Pr^{0.4} \left(\frac{\mu}{\mu_w}\right)^{0.25} Parameters ---------- Re : float Reynolds number with respect to cylinder diameter, [-] Pr : float Prandtl number at free stream temperature, [-] mu : float, optional Viscosity of fluid at the free stream temperature [Pa*s] muw : float, optional Viscosity of fluid at the wall temperature [Pa*s] Returns ------- Nu : float Nusselt number with respect to cylinder diameter, [-] Notes ----- Considers new data since `Nu_cylinder_Perkins_Leppert_1962`, Re from 2E3 to 1.2E5, Pr from 1 to 7, and surface to bulk temperature differences of 11 to 66. Examples -------- >>> Nu_cylinder_Perkins_Leppert_1964(6071, 0.7) 53.61767038619986 References ---------- .. [1] Perkins Jr., H. C., and G. Leppert. "Local Heat-Transfer Coefficients on a Uniformly Heated Cylinder." International Journal of Heat and Mass Transfer 7, no. 2 (February 1964): 143-158. doi:10.1016/0017-9310(64)90079-1. .. [2] Sanitjai, S., and R. J. Goldstein. "Forced Convection Heat Transfer from a Circular Cylinder in Crossflow to Air and Liquids." International Journal of Heat and Mass Transfer 47, no. 22 (October 2004): 4795-4805. doi:10.1016/j.ijheatmasstransfer.2004.05.012. ] variable[Nu] assign[=] binary_operation[binary_operation[binary_operation[constant[0.31] * binary_operation[name[Re] ** constant[0.5]]] + binary_operation[constant[0.11] * binary_operation[name[Re] ** constant[0.67]]]] * binary_operation[name[Pr] ** constant[0.4]]] if <ast.BoolOp object at 0x7da18bcc80d0> begin[:] <ast.AugAssign object at 0x7da18bccb670> return[name[Nu]]
keyword[def] identifier[Nu_cylinder_Perkins_Leppert_1964] ( identifier[Re] , identifier[Pr] , identifier[mu] = keyword[None] , identifier[muw] = keyword[None] ): literal[string] identifier[Nu] =( literal[int] * identifier[Re] ** literal[int] + literal[int] * identifier[Re] ** literal[int] )* identifier[Pr] ** literal[int] keyword[if] identifier[mu] keyword[and] identifier[muw] : identifier[Nu] *=( identifier[mu] / identifier[muw] )** literal[int] keyword[return] identifier[Nu]
def Nu_cylinder_Perkins_Leppert_1964(Re, Pr, mu=None, muw=None): """Calculates Nusselt number for crossflow across a single tube as shown in [1]_ at a specified `Re` and `Pr`, both evaluated at the free stream temperature. Recommends a viscosity exponent correction of 0.25, which is applied only if provided. Also shown in [2]_. .. math:: Nu = \\left[0.31Re^{0.5} + 0.11Re^{0.67}\\right]Pr^{0.4} \\left(\\frac{\\mu}{\\mu_w}\\right)^{0.25} Parameters ---------- Re : float Reynolds number with respect to cylinder diameter, [-] Pr : float Prandtl number at free stream temperature, [-] mu : float, optional Viscosity of fluid at the free stream temperature [Pa*s] muw : float, optional Viscosity of fluid at the wall temperature [Pa*s] Returns ------- Nu : float Nusselt number with respect to cylinder diameter, [-] Notes ----- Considers new data since `Nu_cylinder_Perkins_Leppert_1962`, Re from 2E3 to 1.2E5, Pr from 1 to 7, and surface to bulk temperature differences of 11 to 66. Examples -------- >>> Nu_cylinder_Perkins_Leppert_1964(6071, 0.7) 53.61767038619986 References ---------- .. [1] Perkins Jr., H. C., and G. Leppert. "Local Heat-Transfer Coefficients on a Uniformly Heated Cylinder." International Journal of Heat and Mass Transfer 7, no. 2 (February 1964): 143-158. doi:10.1016/0017-9310(64)90079-1. .. [2] Sanitjai, S., and R. J. Goldstein. "Forced Convection Heat Transfer from a Circular Cylinder in Crossflow to Air and Liquids." International Journal of Heat and Mass Transfer 47, no. 22 (October 2004): 4795-4805. doi:10.1016/j.ijheatmasstransfer.2004.05.012. """ Nu = (0.31 * Re ** 0.5 + 0.11 * Re ** 0.67) * Pr ** 0.4 if mu and muw: Nu *= (mu / muw) ** 0.25 # depends on [control=['if'], data=[]] return Nu
def run_git_shell(cls, cmd, cwd=None): """ Runs git shell command, reads output and decodes it into unicode string. @param cmd: Command to be executed. @type cmd: str @type cwd: str @param cwd: Working directory. @rtype: str @return: Output of the command. @raise CalledProcessError: Raises exception if return code of the command is non-zero. """ p = Popen(cmd, shell=True, stdout=PIPE, cwd=cwd) output, _ = p.communicate() output = cls.decode_git_output(output) if p.returncode: if sys.version_info > (2, 6): raise CalledProcessError(returncode=p.returncode, cmd=cmd, output=output) else: raise CalledProcessError(returncode=p.returncode, cmd=cmd) return output
def function[run_git_shell, parameter[cls, cmd, cwd]]: constant[ Runs git shell command, reads output and decodes it into unicode string. @param cmd: Command to be executed. @type cmd: str @type cwd: str @param cwd: Working directory. @rtype: str @return: Output of the command. @raise CalledProcessError: Raises exception if return code of the command is non-zero. ] variable[p] assign[=] call[name[Popen], parameter[name[cmd]]] <ast.Tuple object at 0x7da1b07e0a60> assign[=] call[name[p].communicate, parameter[]] variable[output] assign[=] call[name[cls].decode_git_output, parameter[name[output]]] if name[p].returncode begin[:] if compare[name[sys].version_info greater[>] tuple[[<ast.Constant object at 0x7da1b07e0520>, <ast.Constant object at 0x7da1b07e04f0>]]] begin[:] <ast.Raise object at 0x7da1b07e0490> return[name[output]]
keyword[def] identifier[run_git_shell] ( identifier[cls] , identifier[cmd] , identifier[cwd] = keyword[None] ): literal[string] identifier[p] = identifier[Popen] ( identifier[cmd] , identifier[shell] = keyword[True] , identifier[stdout] = identifier[PIPE] , identifier[cwd] = identifier[cwd] ) identifier[output] , identifier[_] = identifier[p] . identifier[communicate] () identifier[output] = identifier[cls] . identifier[decode_git_output] ( identifier[output] ) keyword[if] identifier[p] . identifier[returncode] : keyword[if] identifier[sys] . identifier[version_info] >( literal[int] , literal[int] ): keyword[raise] identifier[CalledProcessError] ( identifier[returncode] = identifier[p] . identifier[returncode] , identifier[cmd] = identifier[cmd] , identifier[output] = identifier[output] ) keyword[else] : keyword[raise] identifier[CalledProcessError] ( identifier[returncode] = identifier[p] . identifier[returncode] , identifier[cmd] = identifier[cmd] ) keyword[return] identifier[output]
def run_git_shell(cls, cmd, cwd=None): """ Runs git shell command, reads output and decodes it into unicode string. @param cmd: Command to be executed. @type cmd: str @type cwd: str @param cwd: Working directory. @rtype: str @return: Output of the command. @raise CalledProcessError: Raises exception if return code of the command is non-zero. """ p = Popen(cmd, shell=True, stdout=PIPE, cwd=cwd) (output, _) = p.communicate() output = cls.decode_git_output(output) if p.returncode: if sys.version_info > (2, 6): raise CalledProcessError(returncode=p.returncode, cmd=cmd, output=output) # depends on [control=['if'], data=[]] else: raise CalledProcessError(returncode=p.returncode, cmd=cmd) # depends on [control=['if'], data=[]] return output
def summary(obj, indent=0): '''Helper function to format repr strings for JObjects and friends. Parameters ---------- obj The object to repr indent : int >= 0 indent each new line by `indent` spaces Returns ------- r : str If `obj` has a `__summary__` method, it is used. If `obj` is a `SortedKeyList`, then it returns a description of the length of the list. Otherwise, `repr(obj)`. ''' if hasattr(obj, '__summary__'): rep = obj.__summary__() elif isinstance(obj, SortedKeyList): rep = '<{:d} observations>'.format(len(obj)) else: rep = repr(obj) return rep.replace('\n', '\n' + ' ' * indent)
def function[summary, parameter[obj, indent]]: constant[Helper function to format repr strings for JObjects and friends. Parameters ---------- obj The object to repr indent : int >= 0 indent each new line by `indent` spaces Returns ------- r : str If `obj` has a `__summary__` method, it is used. If `obj` is a `SortedKeyList`, then it returns a description of the length of the list. Otherwise, `repr(obj)`. ] if call[name[hasattr], parameter[name[obj], constant[__summary__]]] begin[:] variable[rep] assign[=] call[name[obj].__summary__, parameter[]] return[call[name[rep].replace, parameter[constant[ ], binary_operation[constant[ ] + binary_operation[constant[ ] * name[indent]]]]]]
keyword[def] identifier[summary] ( identifier[obj] , identifier[indent] = literal[int] ): literal[string] keyword[if] identifier[hasattr] ( identifier[obj] , literal[string] ): identifier[rep] = identifier[obj] . identifier[__summary__] () keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[SortedKeyList] ): identifier[rep] = literal[string] . identifier[format] ( identifier[len] ( identifier[obj] )) keyword[else] : identifier[rep] = identifier[repr] ( identifier[obj] ) keyword[return] identifier[rep] . identifier[replace] ( literal[string] , literal[string] + literal[string] * identifier[indent] )
def summary(obj, indent=0): """Helper function to format repr strings for JObjects and friends. Parameters ---------- obj The object to repr indent : int >= 0 indent each new line by `indent` spaces Returns ------- r : str If `obj` has a `__summary__` method, it is used. If `obj` is a `SortedKeyList`, then it returns a description of the length of the list. Otherwise, `repr(obj)`. """ if hasattr(obj, '__summary__'): rep = obj.__summary__() # depends on [control=['if'], data=[]] elif isinstance(obj, SortedKeyList): rep = '<{:d} observations>'.format(len(obj)) # depends on [control=['if'], data=[]] else: rep = repr(obj) return rep.replace('\n', '\n' + ' ' * indent)
def fit(self, Z): """Learn a list of feature name -> indices mappings. Parameters ---------- Z : DictRDD with column 'X' Dict(s) or Mapping(s) from feature names (arbitrary Python objects) to feature values (strings or convertible to dtype). Returns ------- self """ X = Z[:, 'X'] if isinstance(Z, DictRDD) else Z """Create vocabulary """ class SetAccum(AccumulatorParam): def zero(self, initialValue): return set(initialValue) def addInPlace(self, v1, v2): v1 |= v2 return v1 accum = X.context.accumulator(set(), SetAccum()) def mapper(X, separator=self.separator): feature_names = [] for x in X: for f, v in six.iteritems(x): if isinstance(v, six.string_types): f = "%s%s%s" % (f, self.separator, v) feature_names.append(f) accum.add(set(feature_names)) X.foreach(mapper) # init vocabulary feature_names = list(accum.value) if self.sort: feature_names.sort() vocab = dict((f, i) for i, f in enumerate(feature_names)) self.feature_names_ = feature_names self.vocabulary_ = vocab return self
def function[fit, parameter[self, Z]]: constant[Learn a list of feature name -> indices mappings. Parameters ---------- Z : DictRDD with column 'X' Dict(s) or Mapping(s) from feature names (arbitrary Python objects) to feature values (strings or convertible to dtype). Returns ------- self ] variable[X] assign[=] <ast.IfExp object at 0x7da20c6c7250> constant[Create vocabulary ] class class[SetAccum, parameter[]] begin[:] def function[zero, parameter[self, initialValue]]: return[call[name[set], parameter[name[initialValue]]]] def function[addInPlace, parameter[self, v1, v2]]: <ast.AugAssign object at 0x7da20c6c61d0> return[name[v1]] variable[accum] assign[=] call[name[X].context.accumulator, parameter[call[name[set], parameter[]], call[name[SetAccum], parameter[]]]] def function[mapper, parameter[X, separator]]: variable[feature_names] assign[=] list[[]] for taget[name[x]] in starred[name[X]] begin[:] for taget[tuple[[<ast.Name object at 0x7da20c6c52d0>, <ast.Name object at 0x7da20c6c55a0>]]] in starred[call[name[six].iteritems, parameter[name[x]]]] begin[:] if call[name[isinstance], parameter[name[v], name[six].string_types]] begin[:] variable[f] assign[=] binary_operation[constant[%s%s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c6c6bf0>, <ast.Attribute object at 0x7da20c6c5360>, <ast.Name object at 0x7da20c6c4130>]]] call[name[feature_names].append, parameter[name[f]]] call[name[accum].add, parameter[call[name[set], parameter[name[feature_names]]]]] call[name[X].foreach, parameter[name[mapper]]] variable[feature_names] assign[=] call[name[list], parameter[name[accum].value]] if name[self].sort begin[:] call[name[feature_names].sort, parameter[]] variable[vocab] assign[=] call[name[dict], parameter[<ast.GeneratorExp object at 0x7da20c6c4160>]] name[self].feature_names_ assign[=] name[feature_names] name[self].vocabulary_ assign[=] name[vocab] return[name[self]]
keyword[def] identifier[fit] ( identifier[self] , identifier[Z] ): literal[string] identifier[X] = identifier[Z] [:, literal[string] ] keyword[if] identifier[isinstance] ( identifier[Z] , identifier[DictRDD] ) keyword[else] identifier[Z] literal[string] keyword[class] identifier[SetAccum] ( identifier[AccumulatorParam] ): keyword[def] identifier[zero] ( identifier[self] , identifier[initialValue] ): keyword[return] identifier[set] ( identifier[initialValue] ) keyword[def] identifier[addInPlace] ( identifier[self] , identifier[v1] , identifier[v2] ): identifier[v1] |= identifier[v2] keyword[return] identifier[v1] identifier[accum] = identifier[X] . identifier[context] . identifier[accumulator] ( identifier[set] (), identifier[SetAccum] ()) keyword[def] identifier[mapper] ( identifier[X] , identifier[separator] = identifier[self] . identifier[separator] ): identifier[feature_names] =[] keyword[for] identifier[x] keyword[in] identifier[X] : keyword[for] identifier[f] , identifier[v] keyword[in] identifier[six] . identifier[iteritems] ( identifier[x] ): keyword[if] identifier[isinstance] ( identifier[v] , identifier[six] . identifier[string_types] ): identifier[f] = literal[string] %( identifier[f] , identifier[self] . identifier[separator] , identifier[v] ) identifier[feature_names] . identifier[append] ( identifier[f] ) identifier[accum] . identifier[add] ( identifier[set] ( identifier[feature_names] )) identifier[X] . identifier[foreach] ( identifier[mapper] ) identifier[feature_names] = identifier[list] ( identifier[accum] . identifier[value] ) keyword[if] identifier[self] . identifier[sort] : identifier[feature_names] . identifier[sort] () identifier[vocab] = identifier[dict] (( identifier[f] , identifier[i] ) keyword[for] identifier[i] , identifier[f] keyword[in] identifier[enumerate] ( identifier[feature_names] )) identifier[self] . identifier[feature_names_] = identifier[feature_names] identifier[self] . identifier[vocabulary_] = identifier[vocab] keyword[return] identifier[self]
def fit(self, Z): """Learn a list of feature name -> indices mappings. Parameters ---------- Z : DictRDD with column 'X' Dict(s) or Mapping(s) from feature names (arbitrary Python objects) to feature values (strings or convertible to dtype). Returns ------- self """ X = Z[:, 'X'] if isinstance(Z, DictRDD) else Z 'Create vocabulary\n ' class SetAccum(AccumulatorParam): def zero(self, initialValue): return set(initialValue) def addInPlace(self, v1, v2): v1 |= v2 return v1 accum = X.context.accumulator(set(), SetAccum()) def mapper(X, separator=self.separator): feature_names = [] for x in X: for (f, v) in six.iteritems(x): if isinstance(v, six.string_types): f = '%s%s%s' % (f, self.separator, v) # depends on [control=['if'], data=[]] feature_names.append(f) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['x']] accum.add(set(feature_names)) X.foreach(mapper) # init vocabulary feature_names = list(accum.value) if self.sort: feature_names.sort() # depends on [control=['if'], data=[]] vocab = dict(((f, i) for (i, f) in enumerate(feature_names))) self.feature_names_ = feature_names self.vocabulary_ = vocab return self
def uclust_search_and_align_from_fasta_filepath( query_fasta_filepath, subject_fasta_filepath, percent_ID=0.75, enable_rev_strand_matching=True, max_accepts=8, max_rejects=32, tmp_dir=gettempdir(), HALT_EXEC=False): """ query seqs against subject fasta using uclust, return global pw alignment of best match """ # Explanation of parameter settings # id - min percent id to count a match # maxaccepts = 8 , searches for best match rather than first match # (0 => infinite accepts, or good matches before # quitting search) # maxaccepts = 32, # libonly = True , does not add sequences to the library if they don't # match something there already. this effectively makes # uclust a search tool rather than a clustering tool params = {'--id': percent_ID, '--maxaccepts': max_accepts, '--maxrejects': max_rejects, '--libonly': True, '--lib': subject_fasta_filepath, '--tmpdir': tmp_dir} if enable_rev_strand_matching: params['--rev'] = True # instantiate the application controller app = Uclust(params, TmpDir=tmp_dir, HALT_EXEC=HALT_EXEC) # apply uclust _, alignment_filepath = mkstemp(dir=tmp_dir, prefix='uclust_alignments', suffix='.fasta') _, uc_filepath = mkstemp(dir=tmp_dir, prefix='uclust_results', suffix='.uc') input_data = {'--input': query_fasta_filepath, '--fastapairs': alignment_filepath, '--uc': uc_filepath} app_result = app(input_data) # yield the pairwise alignments for result in process_uclust_pw_alignment_results( app_result['PairwiseAlignments'], app_result['ClusterFile']): try: yield result except GeneratorExit: break # clean up the temp files that were generated app_result.cleanUp() return
def function[uclust_search_and_align_from_fasta_filepath, parameter[query_fasta_filepath, subject_fasta_filepath, percent_ID, enable_rev_strand_matching, max_accepts, max_rejects, tmp_dir, HALT_EXEC]]: constant[ query seqs against subject fasta using uclust, return global pw alignment of best match ] variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b0b70af0>, <ast.Constant object at 0x7da1b0b71030>, <ast.Constant object at 0x7da1b0b714b0>, <ast.Constant object at 0x7da1b0b73af0>, <ast.Constant object at 0x7da1b0b71150>, <ast.Constant object at 0x7da1b0b70520>], [<ast.Name object at 0x7da1b0b71bd0>, <ast.Name object at 0x7da1b0b71fc0>, <ast.Name object at 0x7da1b0b71990>, <ast.Constant object at 0x7da1b0b72770>, <ast.Name object at 0x7da1b0b71e70>, <ast.Name object at 0x7da1b0b71300>]] if name[enable_rev_strand_matching] begin[:] call[name[params]][constant[--rev]] assign[=] constant[True] variable[app] assign[=] call[name[Uclust], parameter[name[params]]] <ast.Tuple object at 0x7da1b0b71420> assign[=] call[name[mkstemp], parameter[]] <ast.Tuple object at 0x7da1b0b725f0> assign[=] call[name[mkstemp], parameter[]] variable[input_data] assign[=] dictionary[[<ast.Constant object at 0x7da1b0b72170>, <ast.Constant object at 0x7da1b0b71a50>, <ast.Constant object at 0x7da1b0b723e0>], [<ast.Name object at 0x7da1b0b82a40>, <ast.Name object at 0x7da1b0b80e20>, <ast.Name object at 0x7da1b0b82200>]] variable[app_result] assign[=] call[name[app], parameter[name[input_data]]] for taget[name[result]] in starred[call[name[process_uclust_pw_alignment_results], parameter[call[name[app_result]][constant[PairwiseAlignments]], call[name[app_result]][constant[ClusterFile]]]]] begin[:] <ast.Try object at 0x7da1b0b812a0> call[name[app_result].cleanUp, parameter[]] return[None]
keyword[def] identifier[uclust_search_and_align_from_fasta_filepath] ( identifier[query_fasta_filepath] , identifier[subject_fasta_filepath] , identifier[percent_ID] = literal[int] , identifier[enable_rev_strand_matching] = keyword[True] , identifier[max_accepts] = literal[int] , identifier[max_rejects] = literal[int] , identifier[tmp_dir] = identifier[gettempdir] (), identifier[HALT_EXEC] = keyword[False] ): literal[string] identifier[params] ={ literal[string] : identifier[percent_ID] , literal[string] : identifier[max_accepts] , literal[string] : identifier[max_rejects] , literal[string] : keyword[True] , literal[string] : identifier[subject_fasta_filepath] , literal[string] : identifier[tmp_dir] } keyword[if] identifier[enable_rev_strand_matching] : identifier[params] [ literal[string] ]= keyword[True] identifier[app] = identifier[Uclust] ( identifier[params] , identifier[TmpDir] = identifier[tmp_dir] , identifier[HALT_EXEC] = identifier[HALT_EXEC] ) identifier[_] , identifier[alignment_filepath] = identifier[mkstemp] ( identifier[dir] = identifier[tmp_dir] , identifier[prefix] = literal[string] , identifier[suffix] = literal[string] ) identifier[_] , identifier[uc_filepath] = identifier[mkstemp] ( identifier[dir] = identifier[tmp_dir] , identifier[prefix] = literal[string] , identifier[suffix] = literal[string] ) identifier[input_data] ={ literal[string] : identifier[query_fasta_filepath] , literal[string] : identifier[alignment_filepath] , literal[string] : identifier[uc_filepath] } identifier[app_result] = identifier[app] ( identifier[input_data] ) keyword[for] identifier[result] keyword[in] identifier[process_uclust_pw_alignment_results] ( identifier[app_result] [ literal[string] ], identifier[app_result] [ literal[string] ]): keyword[try] : keyword[yield] identifier[result] keyword[except] identifier[GeneratorExit] : keyword[break] identifier[app_result] . identifier[cleanUp] () keyword[return]
def uclust_search_and_align_from_fasta_filepath(query_fasta_filepath, subject_fasta_filepath, percent_ID=0.75, enable_rev_strand_matching=True, max_accepts=8, max_rejects=32, tmp_dir=gettempdir(), HALT_EXEC=False): """ query seqs against subject fasta using uclust, return global pw alignment of best match """ # Explanation of parameter settings # id - min percent id to count a match # maxaccepts = 8 , searches for best match rather than first match # (0 => infinite accepts, or good matches before # quitting search) # maxaccepts = 32, # libonly = True , does not add sequences to the library if they don't # match something there already. this effectively makes # uclust a search tool rather than a clustering tool params = {'--id': percent_ID, '--maxaccepts': max_accepts, '--maxrejects': max_rejects, '--libonly': True, '--lib': subject_fasta_filepath, '--tmpdir': tmp_dir} if enable_rev_strand_matching: params['--rev'] = True # depends on [control=['if'], data=[]] # instantiate the application controller app = Uclust(params, TmpDir=tmp_dir, HALT_EXEC=HALT_EXEC) # apply uclust (_, alignment_filepath) = mkstemp(dir=tmp_dir, prefix='uclust_alignments', suffix='.fasta') (_, uc_filepath) = mkstemp(dir=tmp_dir, prefix='uclust_results', suffix='.uc') input_data = {'--input': query_fasta_filepath, '--fastapairs': alignment_filepath, '--uc': uc_filepath} app_result = app(input_data) # yield the pairwise alignments for result in process_uclust_pw_alignment_results(app_result['PairwiseAlignments'], app_result['ClusterFile']): try: yield result # depends on [control=['try'], data=[]] except GeneratorExit: break # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['result']] # clean up the temp files that were generated app_result.cleanUp() return
def save(self, **kwargs): """Custom save method to process thumbnails and save image dimensions.""" is_new = self.pk is None if is_new: # Make filenames lowercase self.img.name = self.img.name.lower() # Call super method super(Image, self).save(**kwargs) if is_new and self.img: data = self.img.read() if not data: return image = Img.open(StringIO.StringIO(data)) self.width, self.height = image.size super(Image, self).save() name = self.get_name() ext = self.get_extension() for size in self.SIZES.keys(): self.save_thumbnail(image, self.SIZES[size], name, size, ext)
def function[save, parameter[self]]: constant[Custom save method to process thumbnails and save image dimensions.] variable[is_new] assign[=] compare[name[self].pk is constant[None]] if name[is_new] begin[:] name[self].img.name assign[=] call[name[self].img.name.lower, parameter[]] call[call[name[super], parameter[name[Image], name[self]]].save, parameter[]] if <ast.BoolOp object at 0x7da18f723370> begin[:] variable[data] assign[=] call[name[self].img.read, parameter[]] if <ast.UnaryOp object at 0x7da18f721ea0> begin[:] return[None] variable[image] assign[=] call[name[Img].open, parameter[call[name[StringIO].StringIO, parameter[name[data]]]]] <ast.Tuple object at 0x7da18fe93be0> assign[=] name[image].size call[call[name[super], parameter[name[Image], name[self]]].save, parameter[]] variable[name] assign[=] call[name[self].get_name, parameter[]] variable[ext] assign[=] call[name[self].get_extension, parameter[]] for taget[name[size]] in starred[call[name[self].SIZES.keys, parameter[]]] begin[:] call[name[self].save_thumbnail, parameter[name[image], call[name[self].SIZES][name[size]], name[name], name[size], name[ext]]]
keyword[def] identifier[save] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[is_new] = identifier[self] . identifier[pk] keyword[is] keyword[None] keyword[if] identifier[is_new] : identifier[self] . identifier[img] . identifier[name] = identifier[self] . identifier[img] . identifier[name] . identifier[lower] () identifier[super] ( identifier[Image] , identifier[self] ). identifier[save] (** identifier[kwargs] ) keyword[if] identifier[is_new] keyword[and] identifier[self] . identifier[img] : identifier[data] = identifier[self] . identifier[img] . identifier[read] () keyword[if] keyword[not] identifier[data] : keyword[return] identifier[image] = identifier[Img] . identifier[open] ( identifier[StringIO] . identifier[StringIO] ( identifier[data] )) identifier[self] . identifier[width] , identifier[self] . identifier[height] = identifier[image] . identifier[size] identifier[super] ( identifier[Image] , identifier[self] ). identifier[save] () identifier[name] = identifier[self] . identifier[get_name] () identifier[ext] = identifier[self] . identifier[get_extension] () keyword[for] identifier[size] keyword[in] identifier[self] . identifier[SIZES] . identifier[keys] (): identifier[self] . identifier[save_thumbnail] ( identifier[image] , identifier[self] . identifier[SIZES] [ identifier[size] ], identifier[name] , identifier[size] , identifier[ext] )
def save(self, **kwargs): """Custom save method to process thumbnails and save image dimensions.""" is_new = self.pk is None if is_new: # Make filenames lowercase self.img.name = self.img.name.lower() # depends on [control=['if'], data=[]] # Call super method super(Image, self).save(**kwargs) if is_new and self.img: data = self.img.read() if not data: return # depends on [control=['if'], data=[]] image = Img.open(StringIO.StringIO(data)) (self.width, self.height) = image.size super(Image, self).save() name = self.get_name() ext = self.get_extension() for size in self.SIZES.keys(): self.save_thumbnail(image, self.SIZES[size], name, size, ext) # depends on [control=['for'], data=['size']] # depends on [control=['if'], data=[]]
def connect(self): """ Creates connection to RabbitMQ server """ if self.connecting: log.info('PikaClient: Already connecting to RabbitMQ') return log.info('PikaClient: Connecting to RabbitMQ') self.connecting = True self.connection = TornadoConnection(NON_BLOCKING_MQ_PARAMS, stop_ioloop_on_close=False, custom_ioloop=self.io_loop, on_open_callback=self.on_connected)
def function[connect, parameter[self]]: constant[ Creates connection to RabbitMQ server ] if name[self].connecting begin[:] call[name[log].info, parameter[constant[PikaClient: Already connecting to RabbitMQ]]] return[None] call[name[log].info, parameter[constant[PikaClient: Connecting to RabbitMQ]]] name[self].connecting assign[=] constant[True] name[self].connection assign[=] call[name[TornadoConnection], parameter[name[NON_BLOCKING_MQ_PARAMS]]]
keyword[def] identifier[connect] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[connecting] : identifier[log] . identifier[info] ( literal[string] ) keyword[return] identifier[log] . identifier[info] ( literal[string] ) identifier[self] . identifier[connecting] = keyword[True] identifier[self] . identifier[connection] = identifier[TornadoConnection] ( identifier[NON_BLOCKING_MQ_PARAMS] , identifier[stop_ioloop_on_close] = keyword[False] , identifier[custom_ioloop] = identifier[self] . identifier[io_loop] , identifier[on_open_callback] = identifier[self] . identifier[on_connected] )
def connect(self): """ Creates connection to RabbitMQ server """ if self.connecting: log.info('PikaClient: Already connecting to RabbitMQ') return # depends on [control=['if'], data=[]] log.info('PikaClient: Connecting to RabbitMQ') self.connecting = True self.connection = TornadoConnection(NON_BLOCKING_MQ_PARAMS, stop_ioloop_on_close=False, custom_ioloop=self.io_loop, on_open_callback=self.on_connected)
def call_hook(self, hook, *args, **kwargs): """ Calls each registered hook """ for function in self.hooks[hook]: function.__call__(*args, **kwargs)
def function[call_hook, parameter[self, hook]]: constant[ Calls each registered hook ] for taget[name[function]] in starred[call[name[self].hooks][name[hook]]] begin[:] call[name[function].__call__, parameter[<ast.Starred object at 0x7da1b1914970>]]
keyword[def] identifier[call_hook] ( identifier[self] , identifier[hook] ,* identifier[args] ,** identifier[kwargs] ): literal[string] keyword[for] identifier[function] keyword[in] identifier[self] . identifier[hooks] [ identifier[hook] ]: identifier[function] . identifier[__call__] (* identifier[args] ,** identifier[kwargs] )
def call_hook(self, hook, *args, **kwargs): """ Calls each registered hook """ for function in self.hooks[hook]: function.__call__(*args, **kwargs) # depends on [control=['for'], data=['function']]
def observer_update(self, message): """Called when update from observer is received.""" # Demultiplex observer update into multiple messages. for action in ('added', 'changed', 'removed'): for item in message[action]: self.send_json( { 'msg': action, 'observer': message['observer'], 'primary_key': message['primary_key'], 'order': item['order'], 'item': item['data'], } )
def function[observer_update, parameter[self, message]]: constant[Called when update from observer is received.] for taget[name[action]] in starred[tuple[[<ast.Constant object at 0x7da20e9575b0>, <ast.Constant object at 0x7da20e9569e0>, <ast.Constant object at 0x7da20e957520>]]] begin[:] for taget[name[item]] in starred[call[name[message]][name[action]]] begin[:] call[name[self].send_json, parameter[dictionary[[<ast.Constant object at 0x7da20e954400>, <ast.Constant object at 0x7da20e9562c0>, <ast.Constant object at 0x7da20e954460>, <ast.Constant object at 0x7da20e957490>, <ast.Constant object at 0x7da20e954100>], [<ast.Name object at 0x7da20e955120>, <ast.Subscript object at 0x7da20e9579d0>, <ast.Subscript object at 0x7da20e955300>, <ast.Subscript object at 0x7da20e957700>, <ast.Subscript object at 0x7da20e956bf0>]]]]
keyword[def] identifier[observer_update] ( identifier[self] , identifier[message] ): literal[string] keyword[for] identifier[action] keyword[in] ( literal[string] , literal[string] , literal[string] ): keyword[for] identifier[item] keyword[in] identifier[message] [ identifier[action] ]: identifier[self] . identifier[send_json] ( { literal[string] : identifier[action] , literal[string] : identifier[message] [ literal[string] ], literal[string] : identifier[message] [ literal[string] ], literal[string] : identifier[item] [ literal[string] ], literal[string] : identifier[item] [ literal[string] ], } )
def observer_update(self, message): """Called when update from observer is received.""" # Demultiplex observer update into multiple messages. for action in ('added', 'changed', 'removed'): for item in message[action]: self.send_json({'msg': action, 'observer': message['observer'], 'primary_key': message['primary_key'], 'order': item['order'], 'item': item['data']}) # depends on [control=['for'], data=['item']] # depends on [control=['for'], data=['action']]
def PyDbLite_to_csv(src,dest=None,dialect='excel'): """Convert a PyDbLite base to a CSV file src is the PyDbLite.Base instance dest is the file-like object for the CSV output dialect is the same as in csv module""" import csv fieldnames = ["__id__","__version__"]+src.fields if dest is None: dest = open(os.path.splitext(src.name)[0]+'.csv','w') w = csv.DictWriter(dest,fieldnames,dialect) first = dict([(f,f) for f in fieldnames]) w.writerow(first) # first row has the field names for r in src: if not "__version__" in r: r["__version__"] = 0 w.writerow(r) dest.close()
def function[PyDbLite_to_csv, parameter[src, dest, dialect]]: constant[Convert a PyDbLite base to a CSV file src is the PyDbLite.Base instance dest is the file-like object for the CSV output dialect is the same as in csv module] import module[csv] variable[fieldnames] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b09eece0>, <ast.Constant object at 0x7da1b09ef460>]] + name[src].fields] if compare[name[dest] is constant[None]] begin[:] variable[dest] assign[=] call[name[open], parameter[binary_operation[call[call[name[os].path.splitext, parameter[name[src].name]]][constant[0]] + constant[.csv]], constant[w]]] variable[w] assign[=] call[name[csv].DictWriter, parameter[name[dest], name[fieldnames], name[dialect]]] variable[first] assign[=] call[name[dict], parameter[<ast.ListComp object at 0x7da1b09ef730>]] call[name[w].writerow, parameter[name[first]]] for taget[name[r]] in starred[name[src]] begin[:] if <ast.UnaryOp object at 0x7da1b09ec130> begin[:] call[name[r]][constant[__version__]] assign[=] constant[0] call[name[w].writerow, parameter[name[r]]] call[name[dest].close, parameter[]]
keyword[def] identifier[PyDbLite_to_csv] ( identifier[src] , identifier[dest] = keyword[None] , identifier[dialect] = literal[string] ): literal[string] keyword[import] identifier[csv] identifier[fieldnames] =[ literal[string] , literal[string] ]+ identifier[src] . identifier[fields] keyword[if] identifier[dest] keyword[is] keyword[None] : identifier[dest] = identifier[open] ( identifier[os] . identifier[path] . identifier[splitext] ( identifier[src] . identifier[name] )[ literal[int] ]+ literal[string] , literal[string] ) identifier[w] = identifier[csv] . identifier[DictWriter] ( identifier[dest] , identifier[fieldnames] , identifier[dialect] ) identifier[first] = identifier[dict] ([( identifier[f] , identifier[f] ) keyword[for] identifier[f] keyword[in] identifier[fieldnames] ]) identifier[w] . identifier[writerow] ( identifier[first] ) keyword[for] identifier[r] keyword[in] identifier[src] : keyword[if] keyword[not] literal[string] keyword[in] identifier[r] : identifier[r] [ literal[string] ]= literal[int] identifier[w] . identifier[writerow] ( identifier[r] ) identifier[dest] . identifier[close] ()
def PyDbLite_to_csv(src, dest=None, dialect='excel'): """Convert a PyDbLite base to a CSV file src is the PyDbLite.Base instance dest is the file-like object for the CSV output dialect is the same as in csv module""" import csv fieldnames = ['__id__', '__version__'] + src.fields if dest is None: dest = open(os.path.splitext(src.name)[0] + '.csv', 'w') # depends on [control=['if'], data=['dest']] w = csv.DictWriter(dest, fieldnames, dialect) first = dict([(f, f) for f in fieldnames]) w.writerow(first) # first row has the field names for r in src: if not '__version__' in r: r['__version__'] = 0 # depends on [control=['if'], data=[]] w.writerow(r) # depends on [control=['for'], data=['r']] dest.close()
def show(self): """ When dynamic, not all argument values may be available. """ copied = self.copy() enumerated = [el for el in enumerate(copied)] for (group_ind, specs) in enumerated: if len(enumerated) > 1: print("Group %d" % group_ind) ordering = self.constant_keys + self.varying_keys # Ordered nicely by varying_keys definition. spec_lines = [', '.join(['%s=%s' % (k, s[k]) for k in ordering]) for s in specs] print('\n'.join(['%d: %s' % (i,l) for (i,l) in enumerate(spec_lines)])) print('Remaining arguments not available for %s' % self.__class__.__name__)
def function[show, parameter[self]]: constant[ When dynamic, not all argument values may be available. ] variable[copied] assign[=] call[name[self].copy, parameter[]] variable[enumerated] assign[=] <ast.ListComp object at 0x7da1afea6560> for taget[tuple[[<ast.Name object at 0x7da1afea5870>, <ast.Name object at 0x7da1afea5510>]]] in starred[name[enumerated]] begin[:] if compare[call[name[len], parameter[name[enumerated]]] greater[>] constant[1]] begin[:] call[name[print], parameter[binary_operation[constant[Group %d] <ast.Mod object at 0x7da2590d6920> name[group_ind]]]] variable[ordering] assign[=] binary_operation[name[self].constant_keys + name[self].varying_keys] variable[spec_lines] assign[=] <ast.ListComp object at 0x7da1afea4af0> call[name[print], parameter[call[constant[ ].join, parameter[<ast.ListComp object at 0x7da1afea6a10>]]]] call[name[print], parameter[binary_operation[constant[Remaining arguments not available for %s] <ast.Mod object at 0x7da2590d6920> name[self].__class__.__name__]]]
keyword[def] identifier[show] ( identifier[self] ): literal[string] identifier[copied] = identifier[self] . identifier[copy] () identifier[enumerated] =[ identifier[el] keyword[for] identifier[el] keyword[in] identifier[enumerate] ( identifier[copied] )] keyword[for] ( identifier[group_ind] , identifier[specs] ) keyword[in] identifier[enumerated] : keyword[if] identifier[len] ( identifier[enumerated] )> literal[int] : identifier[print] ( literal[string] % identifier[group_ind] ) identifier[ordering] = identifier[self] . identifier[constant_keys] + identifier[self] . identifier[varying_keys] identifier[spec_lines] =[ literal[string] . identifier[join] ([ literal[string] %( identifier[k] , identifier[s] [ identifier[k] ]) keyword[for] identifier[k] keyword[in] identifier[ordering] ]) keyword[for] identifier[s] keyword[in] identifier[specs] ] identifier[print] ( literal[string] . identifier[join] ([ literal[string] %( identifier[i] , identifier[l] ) keyword[for] ( identifier[i] , identifier[l] ) keyword[in] identifier[enumerate] ( identifier[spec_lines] )])) identifier[print] ( literal[string] % identifier[self] . identifier[__class__] . identifier[__name__] )
def show(self): """ When dynamic, not all argument values may be available. """ copied = self.copy() enumerated = [el for el in enumerate(copied)] for (group_ind, specs) in enumerated: if len(enumerated) > 1: print('Group %d' % group_ind) # depends on [control=['if'], data=[]] ordering = self.constant_keys + self.varying_keys # Ordered nicely by varying_keys definition. spec_lines = [', '.join(['%s=%s' % (k, s[k]) for k in ordering]) for s in specs] print('\n'.join(['%d: %s' % (i, l) for (i, l) in enumerate(spec_lines)])) # depends on [control=['for'], data=[]] print('Remaining arguments not available for %s' % self.__class__.__name__)
def _termination_callback(self, process_name, returncode): """ Called when the process has stopped. :param returncode: Process returncode """ self._terminate_process_iou() if returncode != 0: if returncode == -11: message = 'IOU VM "{}" process has stopped with return code: {} (segfault). This could be an issue with the IOU image, using a different image may fix this.\n{}'.format(self.name, returncode, self.read_iou_stdout()) else: message = 'IOU VM "{}" process has stopped with return code: {}\n{}'.format(self.name, returncode, self.read_iou_stdout()) log.warning(message) self.project.emit("log.error", {"message": message}) if self._telnet_server: self._telnet_server.close() self._telnet_server = None
def function[_termination_callback, parameter[self, process_name, returncode]]: constant[ Called when the process has stopped. :param returncode: Process returncode ] call[name[self]._terminate_process_iou, parameter[]] if compare[name[returncode] not_equal[!=] constant[0]] begin[:] if compare[name[returncode] equal[==] <ast.UnaryOp object at 0x7da18f722dd0>] begin[:] variable[message] assign[=] call[constant[IOU VM "{}" process has stopped with return code: {} (segfault). This could be an issue with the IOU image, using a different image may fix this. {}].format, parameter[name[self].name, name[returncode], call[name[self].read_iou_stdout, parameter[]]]] call[name[log].warning, parameter[name[message]]] call[name[self].project.emit, parameter[constant[log.error], dictionary[[<ast.Constant object at 0x7da2049624d0>], [<ast.Name object at 0x7da204960850>]]]] if name[self]._telnet_server begin[:] call[name[self]._telnet_server.close, parameter[]] name[self]._telnet_server assign[=] constant[None]
keyword[def] identifier[_termination_callback] ( identifier[self] , identifier[process_name] , identifier[returncode] ): literal[string] identifier[self] . identifier[_terminate_process_iou] () keyword[if] identifier[returncode] != literal[int] : keyword[if] identifier[returncode] ==- literal[int] : identifier[message] = literal[string] . identifier[format] ( identifier[self] . identifier[name] , identifier[returncode] , identifier[self] . identifier[read_iou_stdout] ()) keyword[else] : identifier[message] = literal[string] . identifier[format] ( identifier[self] . identifier[name] , identifier[returncode] , identifier[self] . identifier[read_iou_stdout] ()) identifier[log] . identifier[warning] ( identifier[message] ) identifier[self] . identifier[project] . identifier[emit] ( literal[string] ,{ literal[string] : identifier[message] }) keyword[if] identifier[self] . identifier[_telnet_server] : identifier[self] . identifier[_telnet_server] . identifier[close] () identifier[self] . identifier[_telnet_server] = keyword[None]
def _termination_callback(self, process_name, returncode): """ Called when the process has stopped. :param returncode: Process returncode """ self._terminate_process_iou() if returncode != 0: if returncode == -11: message = 'IOU VM "{}" process has stopped with return code: {} (segfault). This could be an issue with the IOU image, using a different image may fix this.\n{}'.format(self.name, returncode, self.read_iou_stdout()) # depends on [control=['if'], data=['returncode']] else: message = 'IOU VM "{}" process has stopped with return code: {}\n{}'.format(self.name, returncode, self.read_iou_stdout()) log.warning(message) self.project.emit('log.error', {'message': message}) # depends on [control=['if'], data=['returncode']] if self._telnet_server: self._telnet_server.close() self._telnet_server = None # depends on [control=['if'], data=[]]
def verifymessage(self, address, signature, message): """Verify a signed message.""" return self.req("verifymessage", [address, signature, message])
def function[verifymessage, parameter[self, address, signature, message]]: constant[Verify a signed message.] return[call[name[self].req, parameter[constant[verifymessage], list[[<ast.Name object at 0x7da18f09df60>, <ast.Name object at 0x7da18f09f6d0>, <ast.Name object at 0x7da18f09fbb0>]]]]]
keyword[def] identifier[verifymessage] ( identifier[self] , identifier[address] , identifier[signature] , identifier[message] ): literal[string] keyword[return] identifier[self] . identifier[req] ( literal[string] ,[ identifier[address] , identifier[signature] , identifier[message] ])
def verifymessage(self, address, signature, message): """Verify a signed message.""" return self.req('verifymessage', [address, signature, message])
def to_realimag(z): """ Convert a complex hermitian matrix to a real valued doubled up representation, i.e., for ``Z = Z_r + 1j * Z_i`` return ``R(Z)``:: R(Z) = [ Z_r Z_i] [-Z_i Z_r] A complex hermitian matrix ``Z`` with elementwise real and imaginary parts ``Z = Z_r + 1j * Z_i`` can be isomorphically represented in doubled up form as:: R(Z) = [ Z_r Z_i] [-Z_i Z_r] R(X)*R(Y) = [ (X_r*Y_r-X_i*Y_i) (X_r*Y_i + X_i*Y_r)] [-(X_r*Y_i + X_i*Y_r) (X_r*Y_r-X_i*Y_i) ] = R(X*Y). In particular, ``Z`` is complex positive (semi-)definite iff ``R(Z)`` is real positive (semi-)definite. :param (qutip.Qobj|scipy.sparse.base.spmatrix) z: The operator representation matrix. :returns: R(Z) the doubled up representation. :rtype: scipy.sparse.csr_matrix """ if isinstance(z, qt.Qobj): z = z.data if not is_hermitian(z): # pragma no coverage raise ValueError("Need a hermitian matrix z") return spvstack([sphstack([z.real, z.imag]), sphstack([z.imag.T, z.real])]).tocsr().real
def function[to_realimag, parameter[z]]: constant[ Convert a complex hermitian matrix to a real valued doubled up representation, i.e., for ``Z = Z_r + 1j * Z_i`` return ``R(Z)``:: R(Z) = [ Z_r Z_i] [-Z_i Z_r] A complex hermitian matrix ``Z`` with elementwise real and imaginary parts ``Z = Z_r + 1j * Z_i`` can be isomorphically represented in doubled up form as:: R(Z) = [ Z_r Z_i] [-Z_i Z_r] R(X)*R(Y) = [ (X_r*Y_r-X_i*Y_i) (X_r*Y_i + X_i*Y_r)] [-(X_r*Y_i + X_i*Y_r) (X_r*Y_r-X_i*Y_i) ] = R(X*Y). In particular, ``Z`` is complex positive (semi-)definite iff ``R(Z)`` is real positive (semi-)definite. :param (qutip.Qobj|scipy.sparse.base.spmatrix) z: The operator representation matrix. :returns: R(Z) the doubled up representation. :rtype: scipy.sparse.csr_matrix ] if call[name[isinstance], parameter[name[z], name[qt].Qobj]] begin[:] variable[z] assign[=] name[z].data if <ast.UnaryOp object at 0x7da20c76d4e0> begin[:] <ast.Raise object at 0x7da20c76e350> return[call[call[name[spvstack], parameter[list[[<ast.Call object at 0x7da20c76ee60>, <ast.Call object at 0x7da20c76cca0>]]]].tocsr, parameter[]].real]
keyword[def] identifier[to_realimag] ( identifier[z] ): literal[string] keyword[if] identifier[isinstance] ( identifier[z] , identifier[qt] . identifier[Qobj] ): identifier[z] = identifier[z] . identifier[data] keyword[if] keyword[not] identifier[is_hermitian] ( identifier[z] ): keyword[raise] identifier[ValueError] ( literal[string] ) keyword[return] identifier[spvstack] ([ identifier[sphstack] ([ identifier[z] . identifier[real] , identifier[z] . identifier[imag] ]), identifier[sphstack] ([ identifier[z] . identifier[imag] . identifier[T] , identifier[z] . identifier[real] ])]). identifier[tocsr] (). identifier[real]
def to_realimag(z): """ Convert a complex hermitian matrix to a real valued doubled up representation, i.e., for ``Z = Z_r + 1j * Z_i`` return ``R(Z)``:: R(Z) = [ Z_r Z_i] [-Z_i Z_r] A complex hermitian matrix ``Z`` with elementwise real and imaginary parts ``Z = Z_r + 1j * Z_i`` can be isomorphically represented in doubled up form as:: R(Z) = [ Z_r Z_i] [-Z_i Z_r] R(X)*R(Y) = [ (X_r*Y_r-X_i*Y_i) (X_r*Y_i + X_i*Y_r)] [-(X_r*Y_i + X_i*Y_r) (X_r*Y_r-X_i*Y_i) ] = R(X*Y). In particular, ``Z`` is complex positive (semi-)definite iff ``R(Z)`` is real positive (semi-)definite. :param (qutip.Qobj|scipy.sparse.base.spmatrix) z: The operator representation matrix. :returns: R(Z) the doubled up representation. :rtype: scipy.sparse.csr_matrix """ if isinstance(z, qt.Qobj): z = z.data # depends on [control=['if'], data=[]] if not is_hermitian(z): # pragma no coverage raise ValueError('Need a hermitian matrix z') # depends on [control=['if'], data=[]] return spvstack([sphstack([z.real, z.imag]), sphstack([z.imag.T, z.real])]).tocsr().real
def _run_openstack_cmds(self, commands, commands_to_log=None, sync=False): """Execute/sends a CAPI (Command API) command to EOS. In this method, list of commands is appended with prefix and postfix commands - to make is understandble by EOS. :param commands : List of command to be executed on EOS. :param commands_to_logs : This should be set to the command that is logged. If it is None, then the commands param is logged. :param sync: This flags indicates that the region is being synced. """ full_command = self._build_command(commands, sync=sync) if commands_to_log: full_log_command = self._build_command(commands_to_log, sync=sync) else: full_log_command = None return self._run_eos_cmds(full_command, full_log_command)
def function[_run_openstack_cmds, parameter[self, commands, commands_to_log, sync]]: constant[Execute/sends a CAPI (Command API) command to EOS. In this method, list of commands is appended with prefix and postfix commands - to make is understandble by EOS. :param commands : List of command to be executed on EOS. :param commands_to_logs : This should be set to the command that is logged. If it is None, then the commands param is logged. :param sync: This flags indicates that the region is being synced. ] variable[full_command] assign[=] call[name[self]._build_command, parameter[name[commands]]] if name[commands_to_log] begin[:] variable[full_log_command] assign[=] call[name[self]._build_command, parameter[name[commands_to_log]]] return[call[name[self]._run_eos_cmds, parameter[name[full_command], name[full_log_command]]]]
keyword[def] identifier[_run_openstack_cmds] ( identifier[self] , identifier[commands] , identifier[commands_to_log] = keyword[None] , identifier[sync] = keyword[False] ): literal[string] identifier[full_command] = identifier[self] . identifier[_build_command] ( identifier[commands] , identifier[sync] = identifier[sync] ) keyword[if] identifier[commands_to_log] : identifier[full_log_command] = identifier[self] . identifier[_build_command] ( identifier[commands_to_log] , identifier[sync] = identifier[sync] ) keyword[else] : identifier[full_log_command] = keyword[None] keyword[return] identifier[self] . identifier[_run_eos_cmds] ( identifier[full_command] , identifier[full_log_command] )
def _run_openstack_cmds(self, commands, commands_to_log=None, sync=False): """Execute/sends a CAPI (Command API) command to EOS. In this method, list of commands is appended with prefix and postfix commands - to make is understandble by EOS. :param commands : List of command to be executed on EOS. :param commands_to_logs : This should be set to the command that is logged. If it is None, then the commands param is logged. :param sync: This flags indicates that the region is being synced. """ full_command = self._build_command(commands, sync=sync) if commands_to_log: full_log_command = self._build_command(commands_to_log, sync=sync) # depends on [control=['if'], data=[]] else: full_log_command = None return self._run_eos_cmds(full_command, full_log_command)
def GetFeatureService(self, itemId, returnURLOnly=False): """Obtains a feature service by item ID. Args: itemId (str): The feature service's item ID. returnURLOnly (bool): A boolean value to return the URL of the feature service. Defaults to ``False``. Returns: When ``returnURLOnly`` is ``True``, the URL of the feature service is returned. When ``False``, the result from :py:func:`arcrest.agol.services.FeatureService` or :py:func:`arcrest.ags.services.FeatureService`. """ admin = None item = None try: admin = arcrest.manageorg.Administration(securityHandler=self._securityHandler) if self._securityHandler.valid == False: self._valid = self._securityHandler.valid self._message = self._securityHandler.message return None item = admin.content.getItem(itemId=itemId) if item.type == "Feature Service": if returnURLOnly: return item.url else: fs = arcrest.agol.FeatureService( url=item.url, securityHandler=self._securityHandler) if fs.layers is None or len(fs.layers) == 0 : fs = arcrest.ags.FeatureService( url=item.url) return fs return None except: line, filename, synerror = trace() raise common.ArcRestHelperError({ "function": "GetFeatureService", "line": line, "filename": filename, "synerror": synerror, } ) finally: admin = None item = None del item del admin gc.collect()
def function[GetFeatureService, parameter[self, itemId, returnURLOnly]]: constant[Obtains a feature service by item ID. Args: itemId (str): The feature service's item ID. returnURLOnly (bool): A boolean value to return the URL of the feature service. Defaults to ``False``. Returns: When ``returnURLOnly`` is ``True``, the URL of the feature service is returned. When ``False``, the result from :py:func:`arcrest.agol.services.FeatureService` or :py:func:`arcrest.ags.services.FeatureService`. ] variable[admin] assign[=] constant[None] variable[item] assign[=] constant[None] <ast.Try object at 0x7da1b12f2d10>
keyword[def] identifier[GetFeatureService] ( identifier[self] , identifier[itemId] , identifier[returnURLOnly] = keyword[False] ): literal[string] identifier[admin] = keyword[None] identifier[item] = keyword[None] keyword[try] : identifier[admin] = identifier[arcrest] . identifier[manageorg] . identifier[Administration] ( identifier[securityHandler] = identifier[self] . identifier[_securityHandler] ) keyword[if] identifier[self] . identifier[_securityHandler] . identifier[valid] == keyword[False] : identifier[self] . identifier[_valid] = identifier[self] . identifier[_securityHandler] . identifier[valid] identifier[self] . identifier[_message] = identifier[self] . identifier[_securityHandler] . identifier[message] keyword[return] keyword[None] identifier[item] = identifier[admin] . identifier[content] . identifier[getItem] ( identifier[itemId] = identifier[itemId] ) keyword[if] identifier[item] . identifier[type] == literal[string] : keyword[if] identifier[returnURLOnly] : keyword[return] identifier[item] . identifier[url] keyword[else] : identifier[fs] = identifier[arcrest] . identifier[agol] . identifier[FeatureService] ( identifier[url] = identifier[item] . identifier[url] , identifier[securityHandler] = identifier[self] . identifier[_securityHandler] ) keyword[if] identifier[fs] . identifier[layers] keyword[is] keyword[None] keyword[or] identifier[len] ( identifier[fs] . identifier[layers] )== literal[int] : identifier[fs] = identifier[arcrest] . identifier[ags] . identifier[FeatureService] ( identifier[url] = identifier[item] . identifier[url] ) keyword[return] identifier[fs] keyword[return] keyword[None] keyword[except] : identifier[line] , identifier[filename] , identifier[synerror] = identifier[trace] () keyword[raise] identifier[common] . identifier[ArcRestHelperError] ({ literal[string] : literal[string] , literal[string] : identifier[line] , literal[string] : identifier[filename] , literal[string] : identifier[synerror] , } ) keyword[finally] : identifier[admin] = keyword[None] identifier[item] = keyword[None] keyword[del] identifier[item] keyword[del] identifier[admin] identifier[gc] . identifier[collect] ()
def GetFeatureService(self, itemId, returnURLOnly=False): """Obtains a feature service by item ID. Args: itemId (str): The feature service's item ID. returnURLOnly (bool): A boolean value to return the URL of the feature service. Defaults to ``False``. Returns: When ``returnURLOnly`` is ``True``, the URL of the feature service is returned. When ``False``, the result from :py:func:`arcrest.agol.services.FeatureService` or :py:func:`arcrest.ags.services.FeatureService`. """ admin = None item = None try: admin = arcrest.manageorg.Administration(securityHandler=self._securityHandler) if self._securityHandler.valid == False: self._valid = self._securityHandler.valid self._message = self._securityHandler.message return None # depends on [control=['if'], data=[]] item = admin.content.getItem(itemId=itemId) if item.type == 'Feature Service': if returnURLOnly: return item.url # depends on [control=['if'], data=[]] else: fs = arcrest.agol.FeatureService(url=item.url, securityHandler=self._securityHandler) if fs.layers is None or len(fs.layers) == 0: fs = arcrest.ags.FeatureService(url=item.url) # depends on [control=['if'], data=[]] return fs # depends on [control=['if'], data=[]] return None # depends on [control=['try'], data=[]] except: (line, filename, synerror) = trace() raise common.ArcRestHelperError({'function': 'GetFeatureService', 'line': line, 'filename': filename, 'synerror': synerror}) # depends on [control=['except'], data=[]] finally: admin = None item = None del item del admin gc.collect()
def to_proj4(self, as_dict=False): """ Returns the CS as a proj4 formatted string or dict. Arguments: - **as_dict** (optional): If True, returns the proj4 string as a dict (defaults to False). """ string = "%s" % self.proj.to_proj4() string += " %s" % self.geogcs.to_proj4(toplevel=False) string += " " + " ".join(param.to_proj4() for param in self.params) string += " %s" % self.unit.to_proj4() string += " +axis=" + self.twin_ax[0].proj4 + self.twin_ax[1].proj4 + "u" # up set as default because only proj4 can set it I think... string += " +no_defs" if as_dict: return dict([ entry.lstrip('+').split('=') for entry in string.split() if entry != "+no_defs" ]) else: return string
def function[to_proj4, parameter[self, as_dict]]: constant[ Returns the CS as a proj4 formatted string or dict. Arguments: - **as_dict** (optional): If True, returns the proj4 string as a dict (defaults to False). ] variable[string] assign[=] binary_operation[constant[%s] <ast.Mod object at 0x7da2590d6920> call[name[self].proj.to_proj4, parameter[]]] <ast.AugAssign object at 0x7da18c4cf2b0> <ast.AugAssign object at 0x7da18c4cf370> <ast.AugAssign object at 0x7da18c4ccbe0> <ast.AugAssign object at 0x7da18c4cf730> <ast.AugAssign object at 0x7da18c4ce650> if name[as_dict] begin[:] return[call[name[dict], parameter[<ast.ListComp object at 0x7da18c4cfd00>]]]
keyword[def] identifier[to_proj4] ( identifier[self] , identifier[as_dict] = keyword[False] ): literal[string] identifier[string] = literal[string] % identifier[self] . identifier[proj] . identifier[to_proj4] () identifier[string] += literal[string] % identifier[self] . identifier[geogcs] . identifier[to_proj4] ( identifier[toplevel] = keyword[False] ) identifier[string] += literal[string] + literal[string] . identifier[join] ( identifier[param] . identifier[to_proj4] () keyword[for] identifier[param] keyword[in] identifier[self] . identifier[params] ) identifier[string] += literal[string] % identifier[self] . identifier[unit] . identifier[to_proj4] () identifier[string] += literal[string] + identifier[self] . identifier[twin_ax] [ literal[int] ]. identifier[proj4] + identifier[self] . identifier[twin_ax] [ literal[int] ]. identifier[proj4] + literal[string] identifier[string] += literal[string] keyword[if] identifier[as_dict] : keyword[return] identifier[dict] ([ identifier[entry] . identifier[lstrip] ( literal[string] ). identifier[split] ( literal[string] ) keyword[for] identifier[entry] keyword[in] identifier[string] . identifier[split] () keyword[if] identifier[entry] != literal[string] ]) keyword[else] : keyword[return] identifier[string]
def to_proj4(self, as_dict=False): """ Returns the CS as a proj4 formatted string or dict. Arguments: - **as_dict** (optional): If True, returns the proj4 string as a dict (defaults to False). """ string = '%s' % self.proj.to_proj4() string += ' %s' % self.geogcs.to_proj4(toplevel=False) string += ' ' + ' '.join((param.to_proj4() for param in self.params)) string += ' %s' % self.unit.to_proj4() string += ' +axis=' + self.twin_ax[0].proj4 + self.twin_ax[1].proj4 + 'u' # up set as default because only proj4 can set it I think... string += ' +no_defs' if as_dict: return dict([entry.lstrip('+').split('=') for entry in string.split() if entry != '+no_defs']) # depends on [control=['if'], data=[]] else: return string
def get_inventory_text(self): """Return the inventory information from the device.""" inventory_text = None if self.inventory_cmd: try: inventory_text = self.device.send(self.inventory_cmd, timeout=120) self.log('Inventory collected') except CommandError: self.log('Unable to collect inventory') else: self.log('No inventory command for {}'.format(self.platform)) return inventory_text
def function[get_inventory_text, parameter[self]]: constant[Return the inventory information from the device.] variable[inventory_text] assign[=] constant[None] if name[self].inventory_cmd begin[:] <ast.Try object at 0x7da1b2596c80> return[name[inventory_text]]
keyword[def] identifier[get_inventory_text] ( identifier[self] ): literal[string] identifier[inventory_text] = keyword[None] keyword[if] identifier[self] . identifier[inventory_cmd] : keyword[try] : identifier[inventory_text] = identifier[self] . identifier[device] . identifier[send] ( identifier[self] . identifier[inventory_cmd] , identifier[timeout] = literal[int] ) identifier[self] . identifier[log] ( literal[string] ) keyword[except] identifier[CommandError] : identifier[self] . identifier[log] ( literal[string] ) keyword[else] : identifier[self] . identifier[log] ( literal[string] . identifier[format] ( identifier[self] . identifier[platform] )) keyword[return] identifier[inventory_text]
def get_inventory_text(self): """Return the inventory information from the device.""" inventory_text = None if self.inventory_cmd: try: inventory_text = self.device.send(self.inventory_cmd, timeout=120) self.log('Inventory collected') # depends on [control=['try'], data=[]] except CommandError: self.log('Unable to collect inventory') # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] else: self.log('No inventory command for {}'.format(self.platform)) return inventory_text
def delete_all_versions(self, model_name, obj_pk): """Delete all versions of a cached instance.""" if self.cache: for version in self.versions: key = self.key_for(version, model_name, obj_pk) self.cache.delete(key)
def function[delete_all_versions, parameter[self, model_name, obj_pk]]: constant[Delete all versions of a cached instance.] if name[self].cache begin[:] for taget[name[version]] in starred[name[self].versions] begin[:] variable[key] assign[=] call[name[self].key_for, parameter[name[version], name[model_name], name[obj_pk]]] call[name[self].cache.delete, parameter[name[key]]]
keyword[def] identifier[delete_all_versions] ( identifier[self] , identifier[model_name] , identifier[obj_pk] ): literal[string] keyword[if] identifier[self] . identifier[cache] : keyword[for] identifier[version] keyword[in] identifier[self] . identifier[versions] : identifier[key] = identifier[self] . identifier[key_for] ( identifier[version] , identifier[model_name] , identifier[obj_pk] ) identifier[self] . identifier[cache] . identifier[delete] ( identifier[key] )
def delete_all_versions(self, model_name, obj_pk): """Delete all versions of a cached instance.""" if self.cache: for version in self.versions: key = self.key_for(version, model_name, obj_pk) self.cache.delete(key) # depends on [control=['for'], data=['version']] # depends on [control=['if'], data=[]]
def switch_cursor(cursor_type, parent_window): """ Functions switches the cursor to cursor type """ watch = Gdk.Cursor(cursor_type) window = parent_window.get_root_window() window.set_cursor(watch)
def function[switch_cursor, parameter[cursor_type, parent_window]]: constant[ Functions switches the cursor to cursor type ] variable[watch] assign[=] call[name[Gdk].Cursor, parameter[name[cursor_type]]] variable[window] assign[=] call[name[parent_window].get_root_window, parameter[]] call[name[window].set_cursor, parameter[name[watch]]]
keyword[def] identifier[switch_cursor] ( identifier[cursor_type] , identifier[parent_window] ): literal[string] identifier[watch] = identifier[Gdk] . identifier[Cursor] ( identifier[cursor_type] ) identifier[window] = identifier[parent_window] . identifier[get_root_window] () identifier[window] . identifier[set_cursor] ( identifier[watch] )
def switch_cursor(cursor_type, parent_window): """ Functions switches the cursor to cursor type """ watch = Gdk.Cursor(cursor_type) window = parent_window.get_root_window() window.set_cursor(watch)
def get_item(filename, uuid): """ Read entry from JSON file """ with open(os.fsencode(str(filename)), "r") as f: data = json.load(f) results = [i for i in data if i["uuid"] == str(uuid)] if results: return results return None
def function[get_item, parameter[filename, uuid]]: constant[ Read entry from JSON file ] with call[name[open], parameter[call[name[os].fsencode, parameter[call[name[str], parameter[name[filename]]]]], constant[r]]] begin[:] variable[data] assign[=] call[name[json].load, parameter[name[f]]] variable[results] assign[=] <ast.ListComp object at 0x7da2047e8eb0> if name[results] begin[:] return[name[results]] return[constant[None]]
keyword[def] identifier[get_item] ( identifier[filename] , identifier[uuid] ): literal[string] keyword[with] identifier[open] ( identifier[os] . identifier[fsencode] ( identifier[str] ( identifier[filename] )), literal[string] ) keyword[as] identifier[f] : identifier[data] = identifier[json] . identifier[load] ( identifier[f] ) identifier[results] =[ identifier[i] keyword[for] identifier[i] keyword[in] identifier[data] keyword[if] identifier[i] [ literal[string] ]== identifier[str] ( identifier[uuid] )] keyword[if] identifier[results] : keyword[return] identifier[results] keyword[return] keyword[None]
def get_item(filename, uuid): """ Read entry from JSON file """ with open(os.fsencode(str(filename)), 'r') as f: data = json.load(f) results = [i for i in data if i['uuid'] == str(uuid)] if results: return results # depends on [control=['if'], data=[]] return None # depends on [control=['with'], data=['f']]
def get_course_by_sis_id(self, sis_course_id, params={}): """ Return course resource for given sis id. """ return self.get_course(self._sis_id(sis_course_id, sis_field="course"), params)
def function[get_course_by_sis_id, parameter[self, sis_course_id, params]]: constant[ Return course resource for given sis id. ] return[call[name[self].get_course, parameter[call[name[self]._sis_id, parameter[name[sis_course_id]]], name[params]]]]
keyword[def] identifier[get_course_by_sis_id] ( identifier[self] , identifier[sis_course_id] , identifier[params] ={}): literal[string] keyword[return] identifier[self] . identifier[get_course] ( identifier[self] . identifier[_sis_id] ( identifier[sis_course_id] , identifier[sis_field] = literal[string] ), identifier[params] )
def get_course_by_sis_id(self, sis_course_id, params={}): """ Return course resource for given sis id. """ return self.get_course(self._sis_id(sis_course_id, sis_field='course'), params)
def wait(self, key, index=0, recursive=False, sorted=False, quorum=False, timeout=None): """Waits until a node changes.""" return self.adapter.get(key, recursive=recursive, sorted=sorted, quorum=quorum, wait=True, wait_index=index, timeout=timeout)
def function[wait, parameter[self, key, index, recursive, sorted, quorum, timeout]]: constant[Waits until a node changes.] return[call[name[self].adapter.get, parameter[name[key]]]]
keyword[def] identifier[wait] ( identifier[self] , identifier[key] , identifier[index] = literal[int] , identifier[recursive] = keyword[False] , identifier[sorted] = keyword[False] , identifier[quorum] = keyword[False] , identifier[timeout] = keyword[None] ): literal[string] keyword[return] identifier[self] . identifier[adapter] . identifier[get] ( identifier[key] , identifier[recursive] = identifier[recursive] , identifier[sorted] = identifier[sorted] , identifier[quorum] = identifier[quorum] , identifier[wait] = keyword[True] , identifier[wait_index] = identifier[index] , identifier[timeout] = identifier[timeout] )
def wait(self, key, index=0, recursive=False, sorted=False, quorum=False, timeout=None): """Waits until a node changes.""" return self.adapter.get(key, recursive=recursive, sorted=sorted, quorum=quorum, wait=True, wait_index=index, timeout=timeout)
def convenience_calc_log_likelihood(self, params): """ Calculates the log-likelihood for this model and dataset. """ shapes, intercepts, betas = self.convenience_split_params(params) args = [betas, self.design_3d, self.alt_id_vector, self.rows_to_obs, self.rows_to_alts, self.rows_to_mixers, self.choice_vector, self.utility_transform] kwargs = {"ridge": self.ridge, "weights": self.weights} log_likelihood = general_log_likelihood(*args, **kwargs) return log_likelihood
def function[convenience_calc_log_likelihood, parameter[self, params]]: constant[ Calculates the log-likelihood for this model and dataset. ] <ast.Tuple object at 0x7da1b1307610> assign[=] call[name[self].convenience_split_params, parameter[name[params]]] variable[args] assign[=] list[[<ast.Name object at 0x7da1b13077c0>, <ast.Attribute object at 0x7da1b1306c80>, <ast.Attribute object at 0x7da1b1305750>, <ast.Attribute object at 0x7da1b1304880>, <ast.Attribute object at 0x7da1b1306b90>, <ast.Attribute object at 0x7da1b1307dc0>, <ast.Attribute object at 0x7da1b1307e20>, <ast.Attribute object at 0x7da1b1307130>]] variable[kwargs] assign[=] dictionary[[<ast.Constant object at 0x7da1b1304d30>, <ast.Constant object at 0x7da1b1305570>], [<ast.Attribute object at 0x7da1b1304130>, <ast.Attribute object at 0x7da1b1304280>]] variable[log_likelihood] assign[=] call[name[general_log_likelihood], parameter[<ast.Starred object at 0x7da1b1307550>]] return[name[log_likelihood]]
keyword[def] identifier[convenience_calc_log_likelihood] ( identifier[self] , identifier[params] ): literal[string] identifier[shapes] , identifier[intercepts] , identifier[betas] = identifier[self] . identifier[convenience_split_params] ( identifier[params] ) identifier[args] =[ identifier[betas] , identifier[self] . identifier[design_3d] , identifier[self] . identifier[alt_id_vector] , identifier[self] . identifier[rows_to_obs] , identifier[self] . identifier[rows_to_alts] , identifier[self] . identifier[rows_to_mixers] , identifier[self] . identifier[choice_vector] , identifier[self] . identifier[utility_transform] ] identifier[kwargs] ={ literal[string] : identifier[self] . identifier[ridge] , literal[string] : identifier[self] . identifier[weights] } identifier[log_likelihood] = identifier[general_log_likelihood] (* identifier[args] ,** identifier[kwargs] ) keyword[return] identifier[log_likelihood]
def convenience_calc_log_likelihood(self, params): """ Calculates the log-likelihood for this model and dataset. """ (shapes, intercepts, betas) = self.convenience_split_params(params) args = [betas, self.design_3d, self.alt_id_vector, self.rows_to_obs, self.rows_to_alts, self.rows_to_mixers, self.choice_vector, self.utility_transform] kwargs = {'ridge': self.ridge, 'weights': self.weights} log_likelihood = general_log_likelihood(*args, **kwargs) return log_likelihood
def find_first_declaration( declarations, decl_type=None, name=None, parent=None, recursive=True, fullname=None): """ Returns first declaration that match criteria, defined by developer. For more information about arguments see :class:`match_declaration_t` class. :rtype: matched declaration :class:`declaration_t` or None """ decl_matcher = algorithm.match_declaration_t( decl_type=decl_type, name=name, fullname=fullname, parent=parent) if recursive: decls = make_flatten(declarations) else: decls = declarations for decl in decls: if decl_matcher(decl): return decl return None
def function[find_first_declaration, parameter[declarations, decl_type, name, parent, recursive, fullname]]: constant[ Returns first declaration that match criteria, defined by developer. For more information about arguments see :class:`match_declaration_t` class. :rtype: matched declaration :class:`declaration_t` or None ] variable[decl_matcher] assign[=] call[name[algorithm].match_declaration_t, parameter[]] if name[recursive] begin[:] variable[decls] assign[=] call[name[make_flatten], parameter[name[declarations]]] for taget[name[decl]] in starred[name[decls]] begin[:] if call[name[decl_matcher], parameter[name[decl]]] begin[:] return[name[decl]] return[constant[None]]
keyword[def] identifier[find_first_declaration] ( identifier[declarations] , identifier[decl_type] = keyword[None] , identifier[name] = keyword[None] , identifier[parent] = keyword[None] , identifier[recursive] = keyword[True] , identifier[fullname] = keyword[None] ): literal[string] identifier[decl_matcher] = identifier[algorithm] . identifier[match_declaration_t] ( identifier[decl_type] = identifier[decl_type] , identifier[name] = identifier[name] , identifier[fullname] = identifier[fullname] , identifier[parent] = identifier[parent] ) keyword[if] identifier[recursive] : identifier[decls] = identifier[make_flatten] ( identifier[declarations] ) keyword[else] : identifier[decls] = identifier[declarations] keyword[for] identifier[decl] keyword[in] identifier[decls] : keyword[if] identifier[decl_matcher] ( identifier[decl] ): keyword[return] identifier[decl] keyword[return] keyword[None]
def find_first_declaration(declarations, decl_type=None, name=None, parent=None, recursive=True, fullname=None): """ Returns first declaration that match criteria, defined by developer. For more information about arguments see :class:`match_declaration_t` class. :rtype: matched declaration :class:`declaration_t` or None """ decl_matcher = algorithm.match_declaration_t(decl_type=decl_type, name=name, fullname=fullname, parent=parent) if recursive: decls = make_flatten(declarations) # depends on [control=['if'], data=[]] else: decls = declarations for decl in decls: if decl_matcher(decl): return decl # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['decl']] return None
def new_feed(self, name: str, layer_shape: tuple): """ Creates a feed layer. This is usually the first layer in the network. :param name: name of the layer :return: """ feed_data = tf.placeholder(tf.float32, layer_shape, 'input') self.__network.add_layer(name, layer_output=feed_data)
def function[new_feed, parameter[self, name, layer_shape]]: constant[ Creates a feed layer. This is usually the first layer in the network. :param name: name of the layer :return: ] variable[feed_data] assign[=] call[name[tf].placeholder, parameter[name[tf].float32, name[layer_shape], constant[input]]] call[name[self].__network.add_layer, parameter[name[name]]]
keyword[def] identifier[new_feed] ( identifier[self] , identifier[name] : identifier[str] , identifier[layer_shape] : identifier[tuple] ): literal[string] identifier[feed_data] = identifier[tf] . identifier[placeholder] ( identifier[tf] . identifier[float32] , identifier[layer_shape] , literal[string] ) identifier[self] . identifier[__network] . identifier[add_layer] ( identifier[name] , identifier[layer_output] = identifier[feed_data] )
def new_feed(self, name: str, layer_shape: tuple): """ Creates a feed layer. This is usually the first layer in the network. :param name: name of the layer :return: """ feed_data = tf.placeholder(tf.float32, layer_shape, 'input') self.__network.add_layer(name, layer_output=feed_data)
def drop_slot(self, slot=None, drop_stack=False): """ Drop one or all items of the slot. Does not wait for confirmation from the server. If you want that, use a ``Task`` and ``yield inventory.async.drop_slot()`` instead. If ``slot`` is None, drops the ``cursor_slot`` or, if that's empty, the currently held item (``active_slot``). Args: slot (Optional[Slot]): The dropped slot. Can be None, integer, or ``Slot`` instance. Returns: int: The action ID of the click """ if slot is None: if self.cursor_slot.is_empty: slot = self.active_slot else: slot = self.cursor_slot elif isinstance(slot, int): # also allow slot nr slot = self.window.slots[slot] if slot == self.cursor_slot: # dropping items from cursor is done via normal click return self.click_slot(self.cursor_slot, not drop_stack) return self.send_click(windows.DropClick(slot, drop_stack))
def function[drop_slot, parameter[self, slot, drop_stack]]: constant[ Drop one or all items of the slot. Does not wait for confirmation from the server. If you want that, use a ``Task`` and ``yield inventory.async.drop_slot()`` instead. If ``slot`` is None, drops the ``cursor_slot`` or, if that's empty, the currently held item (``active_slot``). Args: slot (Optional[Slot]): The dropped slot. Can be None, integer, or ``Slot`` instance. Returns: int: The action ID of the click ] if compare[name[slot] is constant[None]] begin[:] if name[self].cursor_slot.is_empty begin[:] variable[slot] assign[=] name[self].active_slot if compare[name[slot] equal[==] name[self].cursor_slot] begin[:] return[call[name[self].click_slot, parameter[name[self].cursor_slot, <ast.UnaryOp object at 0x7da1b2851b40>]]] return[call[name[self].send_click, parameter[call[name[windows].DropClick, parameter[name[slot], name[drop_stack]]]]]]
keyword[def] identifier[drop_slot] ( identifier[self] , identifier[slot] = keyword[None] , identifier[drop_stack] = keyword[False] ): literal[string] keyword[if] identifier[slot] keyword[is] keyword[None] : keyword[if] identifier[self] . identifier[cursor_slot] . identifier[is_empty] : identifier[slot] = identifier[self] . identifier[active_slot] keyword[else] : identifier[slot] = identifier[self] . identifier[cursor_slot] keyword[elif] identifier[isinstance] ( identifier[slot] , identifier[int] ): identifier[slot] = identifier[self] . identifier[window] . identifier[slots] [ identifier[slot] ] keyword[if] identifier[slot] == identifier[self] . identifier[cursor_slot] : keyword[return] identifier[self] . identifier[click_slot] ( identifier[self] . identifier[cursor_slot] , keyword[not] identifier[drop_stack] ) keyword[return] identifier[self] . identifier[send_click] ( identifier[windows] . identifier[DropClick] ( identifier[slot] , identifier[drop_stack] ))
def drop_slot(self, slot=None, drop_stack=False): """ Drop one or all items of the slot. Does not wait for confirmation from the server. If you want that, use a ``Task`` and ``yield inventory.async.drop_slot()`` instead. If ``slot`` is None, drops the ``cursor_slot`` or, if that's empty, the currently held item (``active_slot``). Args: slot (Optional[Slot]): The dropped slot. Can be None, integer, or ``Slot`` instance. Returns: int: The action ID of the click """ if slot is None: if self.cursor_slot.is_empty: slot = self.active_slot # depends on [control=['if'], data=[]] else: slot = self.cursor_slot # depends on [control=['if'], data=['slot']] elif isinstance(slot, int): # also allow slot nr slot = self.window.slots[slot] # depends on [control=['if'], data=[]] if slot == self.cursor_slot: # dropping items from cursor is done via normal click return self.click_slot(self.cursor_slot, not drop_stack) # depends on [control=['if'], data=[]] return self.send_click(windows.DropClick(slot, drop_stack))
def _on_trigger(self, my_task): """ Enqueue a trigger, such that this tasks triggers multiple times later when _on_complete() is called. """ self.queued += 1 # All tasks that have already completed need to be put back to # READY. for thetask in my_task.workflow.task_tree: if thetask.thread_id != my_task.thread_id: continue if (thetask.task_spec == self and thetask._has_state(Task.COMPLETED)): thetask._set_state(Task.FUTURE, True) thetask._ready()
def function[_on_trigger, parameter[self, my_task]]: constant[ Enqueue a trigger, such that this tasks triggers multiple times later when _on_complete() is called. ] <ast.AugAssign object at 0x7da1b0121e40> for taget[name[thetask]] in starred[name[my_task].workflow.task_tree] begin[:] if compare[name[thetask].thread_id not_equal[!=] name[my_task].thread_id] begin[:] continue if <ast.BoolOp object at 0x7da1b016cac0> begin[:] call[name[thetask]._set_state, parameter[name[Task].FUTURE, constant[True]]] call[name[thetask]._ready, parameter[]]
keyword[def] identifier[_on_trigger] ( identifier[self] , identifier[my_task] ): literal[string] identifier[self] . identifier[queued] += literal[int] keyword[for] identifier[thetask] keyword[in] identifier[my_task] . identifier[workflow] . identifier[task_tree] : keyword[if] identifier[thetask] . identifier[thread_id] != identifier[my_task] . identifier[thread_id] : keyword[continue] keyword[if] ( identifier[thetask] . identifier[task_spec] == identifier[self] keyword[and] identifier[thetask] . identifier[_has_state] ( identifier[Task] . identifier[COMPLETED] )): identifier[thetask] . identifier[_set_state] ( identifier[Task] . identifier[FUTURE] , keyword[True] ) identifier[thetask] . identifier[_ready] ()
def _on_trigger(self, my_task): """ Enqueue a trigger, such that this tasks triggers multiple times later when _on_complete() is called. """ self.queued += 1 # All tasks that have already completed need to be put back to # READY. for thetask in my_task.workflow.task_tree: if thetask.thread_id != my_task.thread_id: continue # depends on [control=['if'], data=[]] if thetask.task_spec == self and thetask._has_state(Task.COMPLETED): thetask._set_state(Task.FUTURE, True) thetask._ready() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['thetask']]
def set_geoip_params(self, db_country=None, db_city=None): """Sets GeoIP parameters. * http://uwsgi.readthedocs.io/en/latest/GeoIP.html :param str|unicode db_country: Country database file path. :param str|unicode db_city: City database file path. Example: ``GeoLiteCity.dat``. """ self._set('geoip-country', db_country, plugin='geoip') self._set('geoip-city', db_city, plugin='geoip') return self._section
def function[set_geoip_params, parameter[self, db_country, db_city]]: constant[Sets GeoIP parameters. * http://uwsgi.readthedocs.io/en/latest/GeoIP.html :param str|unicode db_country: Country database file path. :param str|unicode db_city: City database file path. Example: ``GeoLiteCity.dat``. ] call[name[self]._set, parameter[constant[geoip-country], name[db_country]]] call[name[self]._set, parameter[constant[geoip-city], name[db_city]]] return[name[self]._section]
keyword[def] identifier[set_geoip_params] ( identifier[self] , identifier[db_country] = keyword[None] , identifier[db_city] = keyword[None] ): literal[string] identifier[self] . identifier[_set] ( literal[string] , identifier[db_country] , identifier[plugin] = literal[string] ) identifier[self] . identifier[_set] ( literal[string] , identifier[db_city] , identifier[plugin] = literal[string] ) keyword[return] identifier[self] . identifier[_section]
def set_geoip_params(self, db_country=None, db_city=None): """Sets GeoIP parameters. * http://uwsgi.readthedocs.io/en/latest/GeoIP.html :param str|unicode db_country: Country database file path. :param str|unicode db_city: City database file path. Example: ``GeoLiteCity.dat``. """ self._set('geoip-country', db_country, plugin='geoip') self._set('geoip-city', db_city, plugin='geoip') return self._section
def raw(self): r"""Return the raw corpus. This is reconstructed by joining sub-components with the corpus' split characters Returns ------- str The raw corpus Example ------- >>> tqbf = 'The quick brown fox jumped over the lazy dog.\n' >>> tqbf += 'And then it slept.\n And the dog ran off.' >>> corp = Corpus(tqbf) >>> print(corp.raw()) The quick brown fox jumped over the lazy dog. And then it slept. And the dog ran off. >>> len(corp.raw()) 85 """ doc_list = [] for doc in self.corpus: sent_list = [] for sent in doc: sent_list.append(' '.join(sent)) doc_list.append(self.sent_split.join(sent_list)) del sent_list return self.doc_split.join(doc_list)
def function[raw, parameter[self]]: constant[Return the raw corpus. This is reconstructed by joining sub-components with the corpus' split characters Returns ------- str The raw corpus Example ------- >>> tqbf = 'The quick brown fox jumped over the lazy dog.\n' >>> tqbf += 'And then it slept.\n And the dog ran off.' >>> corp = Corpus(tqbf) >>> print(corp.raw()) The quick brown fox jumped over the lazy dog. And then it slept. And the dog ran off. >>> len(corp.raw()) 85 ] variable[doc_list] assign[=] list[[]] for taget[name[doc]] in starred[name[self].corpus] begin[:] variable[sent_list] assign[=] list[[]] for taget[name[sent]] in starred[name[doc]] begin[:] call[name[sent_list].append, parameter[call[constant[ ].join, parameter[name[sent]]]]] call[name[doc_list].append, parameter[call[name[self].sent_split.join, parameter[name[sent_list]]]]] <ast.Delete object at 0x7da20c7c9de0> return[call[name[self].doc_split.join, parameter[name[doc_list]]]]
keyword[def] identifier[raw] ( identifier[self] ): literal[string] identifier[doc_list] =[] keyword[for] identifier[doc] keyword[in] identifier[self] . identifier[corpus] : identifier[sent_list] =[] keyword[for] identifier[sent] keyword[in] identifier[doc] : identifier[sent_list] . identifier[append] ( literal[string] . identifier[join] ( identifier[sent] )) identifier[doc_list] . identifier[append] ( identifier[self] . identifier[sent_split] . identifier[join] ( identifier[sent_list] )) keyword[del] identifier[sent_list] keyword[return] identifier[self] . identifier[doc_split] . identifier[join] ( identifier[doc_list] )
def raw(self): """Return the raw corpus. This is reconstructed by joining sub-components with the corpus' split characters Returns ------- str The raw corpus Example ------- >>> tqbf = 'The quick brown fox jumped over the lazy dog.\\n' >>> tqbf += 'And then it slept.\\n And the dog ran off.' >>> corp = Corpus(tqbf) >>> print(corp.raw()) The quick brown fox jumped over the lazy dog. And then it slept. And the dog ran off. >>> len(corp.raw()) 85 """ doc_list = [] for doc in self.corpus: sent_list = [] for sent in doc: sent_list.append(' '.join(sent)) # depends on [control=['for'], data=['sent']] doc_list.append(self.sent_split.join(sent_list)) del sent_list # depends on [control=['for'], data=['doc']] return self.doc_split.join(doc_list)
def get_probes_results(self): """Return the results of the RPM probes.""" probes_results = {} probes_results_table = junos_views.junos_rpm_probes_results_table(self.device) probes_results_table.get() probes_results_items = probes_results_table.items() for probe_result in probes_results_items: probe_name = py23_compat.text_type(probe_result[0]) test_results = { p[0]: p[1] for p in probe_result[1] } test_results['last_test_loss'] = napalm_base.helpers.convert( int, test_results.pop('last_test_loss'), 0) for test_param_name, test_param_value in test_results.items(): if isinstance(test_param_value, float): test_results[test_param_name] = test_param_value * 1e-3 # convert from useconds to mseconds test_name = test_results.pop('test_name', '') source = test_results.get('source', u'') if source is None: test_results['source'] = u'' if probe_name not in probes_results.keys(): probes_results[probe_name] = {} probes_results[probe_name][test_name] = test_results return probes_results
def function[get_probes_results, parameter[self]]: constant[Return the results of the RPM probes.] variable[probes_results] assign[=] dictionary[[], []] variable[probes_results_table] assign[=] call[name[junos_views].junos_rpm_probes_results_table, parameter[name[self].device]] call[name[probes_results_table].get, parameter[]] variable[probes_results_items] assign[=] call[name[probes_results_table].items, parameter[]] for taget[name[probe_result]] in starred[name[probes_results_items]] begin[:] variable[probe_name] assign[=] call[name[py23_compat].text_type, parameter[call[name[probe_result]][constant[0]]]] variable[test_results] assign[=] <ast.DictComp object at 0x7da1b26afc40> call[name[test_results]][constant[last_test_loss]] assign[=] call[name[napalm_base].helpers.convert, parameter[name[int], call[name[test_results].pop, parameter[constant[last_test_loss]]], constant[0]]] for taget[tuple[[<ast.Name object at 0x7da1b26ad390>, <ast.Name object at 0x7da1b26af1c0>]]] in starred[call[name[test_results].items, parameter[]]] begin[:] if call[name[isinstance], parameter[name[test_param_value], name[float]]] begin[:] call[name[test_results]][name[test_param_name]] assign[=] binary_operation[name[test_param_value] * constant[0.001]] variable[test_name] assign[=] call[name[test_results].pop, parameter[constant[test_name], constant[]]] variable[source] assign[=] call[name[test_results].get, parameter[constant[source], constant[]]] if compare[name[source] is constant[None]] begin[:] call[name[test_results]][constant[source]] assign[=] constant[] if compare[name[probe_name] <ast.NotIn object at 0x7da2590d7190> call[name[probes_results].keys, parameter[]]] begin[:] call[name[probes_results]][name[probe_name]] assign[=] dictionary[[], []] call[call[name[probes_results]][name[probe_name]]][name[test_name]] assign[=] name[test_results] return[name[probes_results]]
keyword[def] identifier[get_probes_results] ( identifier[self] ): literal[string] identifier[probes_results] ={} identifier[probes_results_table] = identifier[junos_views] . identifier[junos_rpm_probes_results_table] ( identifier[self] . identifier[device] ) identifier[probes_results_table] . identifier[get] () identifier[probes_results_items] = identifier[probes_results_table] . identifier[items] () keyword[for] identifier[probe_result] keyword[in] identifier[probes_results_items] : identifier[probe_name] = identifier[py23_compat] . identifier[text_type] ( identifier[probe_result] [ literal[int] ]) identifier[test_results] ={ identifier[p] [ literal[int] ]: identifier[p] [ literal[int] ] keyword[for] identifier[p] keyword[in] identifier[probe_result] [ literal[int] ] } identifier[test_results] [ literal[string] ]= identifier[napalm_base] . identifier[helpers] . identifier[convert] ( identifier[int] , identifier[test_results] . identifier[pop] ( literal[string] ), literal[int] ) keyword[for] identifier[test_param_name] , identifier[test_param_value] keyword[in] identifier[test_results] . identifier[items] (): keyword[if] identifier[isinstance] ( identifier[test_param_value] , identifier[float] ): identifier[test_results] [ identifier[test_param_name] ]= identifier[test_param_value] * literal[int] identifier[test_name] = identifier[test_results] . identifier[pop] ( literal[string] , literal[string] ) identifier[source] = identifier[test_results] . identifier[get] ( literal[string] , literal[string] ) keyword[if] identifier[source] keyword[is] keyword[None] : identifier[test_results] [ literal[string] ]= literal[string] keyword[if] identifier[probe_name] keyword[not] keyword[in] identifier[probes_results] . identifier[keys] (): identifier[probes_results] [ identifier[probe_name] ]={} identifier[probes_results] [ identifier[probe_name] ][ identifier[test_name] ]= identifier[test_results] keyword[return] identifier[probes_results]
def get_probes_results(self): """Return the results of the RPM probes.""" probes_results = {} probes_results_table = junos_views.junos_rpm_probes_results_table(self.device) probes_results_table.get() probes_results_items = probes_results_table.items() for probe_result in probes_results_items: probe_name = py23_compat.text_type(probe_result[0]) test_results = {p[0]: p[1] for p in probe_result[1]} test_results['last_test_loss'] = napalm_base.helpers.convert(int, test_results.pop('last_test_loss'), 0) for (test_param_name, test_param_value) in test_results.items(): if isinstance(test_param_value, float): test_results[test_param_name] = test_param_value * 0.001 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # convert from useconds to mseconds test_name = test_results.pop('test_name', '') source = test_results.get('source', u'') if source is None: test_results['source'] = u'' # depends on [control=['if'], data=[]] if probe_name not in probes_results.keys(): probes_results[probe_name] = {} # depends on [control=['if'], data=['probe_name']] probes_results[probe_name][test_name] = test_results # depends on [control=['for'], data=['probe_result']] return probes_results
def dgeodr(x, y, z, re, f): """ This routine computes the Jacobian of the transformation from rectangular to geodetic coordinates. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dgeodr_c.html :param x: X-coordinate of point. :type x: float :param y: Y-coordinate of point. :type y: float :param z: Z-coord :type z: float :param re: Equatorial radius of the reference spheroid. :type re: float :param f: Flattening coefficient. :type f: float :return: Matrix of partial derivatives. :rtype: 3x3-Element Array of floats """ x = ctypes.c_double(x) y = ctypes.c_double(y) z = ctypes.c_double(z) re = ctypes.c_double(re) f = ctypes.c_double(f) jacobi = stypes.emptyDoubleMatrix() libspice.dgeodr_c(x, y, z, re, f, jacobi) return stypes.cMatrixToNumpy(jacobi)
def function[dgeodr, parameter[x, y, z, re, f]]: constant[ This routine computes the Jacobian of the transformation from rectangular to geodetic coordinates. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dgeodr_c.html :param x: X-coordinate of point. :type x: float :param y: Y-coordinate of point. :type y: float :param z: Z-coord :type z: float :param re: Equatorial radius of the reference spheroid. :type re: float :param f: Flattening coefficient. :type f: float :return: Matrix of partial derivatives. :rtype: 3x3-Element Array of floats ] variable[x] assign[=] call[name[ctypes].c_double, parameter[name[x]]] variable[y] assign[=] call[name[ctypes].c_double, parameter[name[y]]] variable[z] assign[=] call[name[ctypes].c_double, parameter[name[z]]] variable[re] assign[=] call[name[ctypes].c_double, parameter[name[re]]] variable[f] assign[=] call[name[ctypes].c_double, parameter[name[f]]] variable[jacobi] assign[=] call[name[stypes].emptyDoubleMatrix, parameter[]] call[name[libspice].dgeodr_c, parameter[name[x], name[y], name[z], name[re], name[f], name[jacobi]]] return[call[name[stypes].cMatrixToNumpy, parameter[name[jacobi]]]]
keyword[def] identifier[dgeodr] ( identifier[x] , identifier[y] , identifier[z] , identifier[re] , identifier[f] ): literal[string] identifier[x] = identifier[ctypes] . identifier[c_double] ( identifier[x] ) identifier[y] = identifier[ctypes] . identifier[c_double] ( identifier[y] ) identifier[z] = identifier[ctypes] . identifier[c_double] ( identifier[z] ) identifier[re] = identifier[ctypes] . identifier[c_double] ( identifier[re] ) identifier[f] = identifier[ctypes] . identifier[c_double] ( identifier[f] ) identifier[jacobi] = identifier[stypes] . identifier[emptyDoubleMatrix] () identifier[libspice] . identifier[dgeodr_c] ( identifier[x] , identifier[y] , identifier[z] , identifier[re] , identifier[f] , identifier[jacobi] ) keyword[return] identifier[stypes] . identifier[cMatrixToNumpy] ( identifier[jacobi] )
def dgeodr(x, y, z, re, f): """ This routine computes the Jacobian of the transformation from rectangular to geodetic coordinates. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dgeodr_c.html :param x: X-coordinate of point. :type x: float :param y: Y-coordinate of point. :type y: float :param z: Z-coord :type z: float :param re: Equatorial radius of the reference spheroid. :type re: float :param f: Flattening coefficient. :type f: float :return: Matrix of partial derivatives. :rtype: 3x3-Element Array of floats """ x = ctypes.c_double(x) y = ctypes.c_double(y) z = ctypes.c_double(z) re = ctypes.c_double(re) f = ctypes.c_double(f) jacobi = stypes.emptyDoubleMatrix() libspice.dgeodr_c(x, y, z, re, f, jacobi) return stypes.cMatrixToNumpy(jacobi)
def __single_arity_fn_to_py_ast( ctx: GeneratorContext, node: Fn, method: FnMethod, def_name: Optional[str] = None, meta_node: Optional[MetaNode] = None, ) -> GeneratedPyAST: """Return a Python AST node for a function with a single arity.""" assert node.op == NodeOp.FN assert method.op == NodeOp.FN_METHOD lisp_fn_name = node.local.name if node.local is not None else None py_fn_name = __fn_name(lisp_fn_name) if def_name is None else munge(def_name) py_fn_node = ast.AsyncFunctionDef if node.is_async else ast.FunctionDef with ctx.new_symbol_table(py_fn_name), ctx.new_recur_point( method.loop_id, RecurType.FN, is_variadic=node.is_variadic ): # Allow named anonymous functions to recursively call themselves if lisp_fn_name is not None: ctx.symbol_table.new_symbol( sym.symbol(lisp_fn_name), py_fn_name, LocalType.FN ) fn_args, varg, fn_body_ast = __fn_args_to_py_ast( ctx, method.params, method.body ) meta_deps, meta_decorators = __fn_meta(ctx, meta_node) return GeneratedPyAST( node=ast.Name(id=py_fn_name, ctx=ast.Load()), dependencies=list( chain( meta_deps, [ py_fn_node( name=py_fn_name, args=ast.arguments( args=fn_args, kwarg=None, vararg=varg, kwonlyargs=[], defaults=[], kw_defaults=[], ), body=fn_body_ast, decorator_list=list( chain( meta_decorators, [_BASILISP_FN_FN_NAME], [_TRAMPOLINE_FN_NAME] if ctx.recur_point.has_recur else [], ) ), returns=None, ) ], ) ), )
def function[__single_arity_fn_to_py_ast, parameter[ctx, node, method, def_name, meta_node]]: constant[Return a Python AST node for a function with a single arity.] assert[compare[name[node].op equal[==] name[NodeOp].FN]] assert[compare[name[method].op equal[==] name[NodeOp].FN_METHOD]] variable[lisp_fn_name] assign[=] <ast.IfExp object at 0x7da1b025e2c0> variable[py_fn_name] assign[=] <ast.IfExp object at 0x7da1b025de70> variable[py_fn_node] assign[=] <ast.IfExp object at 0x7da1b025f0d0> with call[name[ctx].new_symbol_table, parameter[name[py_fn_name]]] begin[:] if compare[name[lisp_fn_name] is_not constant[None]] begin[:] call[name[ctx].symbol_table.new_symbol, parameter[call[name[sym].symbol, parameter[name[lisp_fn_name]]], name[py_fn_name], name[LocalType].FN]] <ast.Tuple object at 0x7da1b025f670> assign[=] call[name[__fn_args_to_py_ast], parameter[name[ctx], name[method].params, name[method].body]] <ast.Tuple object at 0x7da1b025fca0> assign[=] call[name[__fn_meta], parameter[name[ctx], name[meta_node]]] return[call[name[GeneratedPyAST], parameter[]]]
keyword[def] identifier[__single_arity_fn_to_py_ast] ( identifier[ctx] : identifier[GeneratorContext] , identifier[node] : identifier[Fn] , identifier[method] : identifier[FnMethod] , identifier[def_name] : identifier[Optional] [ identifier[str] ]= keyword[None] , identifier[meta_node] : identifier[Optional] [ identifier[MetaNode] ]= keyword[None] , )-> identifier[GeneratedPyAST] : literal[string] keyword[assert] identifier[node] . identifier[op] == identifier[NodeOp] . identifier[FN] keyword[assert] identifier[method] . identifier[op] == identifier[NodeOp] . identifier[FN_METHOD] identifier[lisp_fn_name] = identifier[node] . identifier[local] . identifier[name] keyword[if] identifier[node] . identifier[local] keyword[is] keyword[not] keyword[None] keyword[else] keyword[None] identifier[py_fn_name] = identifier[__fn_name] ( identifier[lisp_fn_name] ) keyword[if] identifier[def_name] keyword[is] keyword[None] keyword[else] identifier[munge] ( identifier[def_name] ) identifier[py_fn_node] = identifier[ast] . identifier[AsyncFunctionDef] keyword[if] identifier[node] . identifier[is_async] keyword[else] identifier[ast] . identifier[FunctionDef] keyword[with] identifier[ctx] . identifier[new_symbol_table] ( identifier[py_fn_name] ), identifier[ctx] . identifier[new_recur_point] ( identifier[method] . identifier[loop_id] , identifier[RecurType] . identifier[FN] , identifier[is_variadic] = identifier[node] . identifier[is_variadic] ): keyword[if] identifier[lisp_fn_name] keyword[is] keyword[not] keyword[None] : identifier[ctx] . identifier[symbol_table] . identifier[new_symbol] ( identifier[sym] . identifier[symbol] ( identifier[lisp_fn_name] ), identifier[py_fn_name] , identifier[LocalType] . identifier[FN] ) identifier[fn_args] , identifier[varg] , identifier[fn_body_ast] = identifier[__fn_args_to_py_ast] ( identifier[ctx] , identifier[method] . identifier[params] , identifier[method] . identifier[body] ) identifier[meta_deps] , identifier[meta_decorators] = identifier[__fn_meta] ( identifier[ctx] , identifier[meta_node] ) keyword[return] identifier[GeneratedPyAST] ( identifier[node] = identifier[ast] . identifier[Name] ( identifier[id] = identifier[py_fn_name] , identifier[ctx] = identifier[ast] . identifier[Load] ()), identifier[dependencies] = identifier[list] ( identifier[chain] ( identifier[meta_deps] , [ identifier[py_fn_node] ( identifier[name] = identifier[py_fn_name] , identifier[args] = identifier[ast] . identifier[arguments] ( identifier[args] = identifier[fn_args] , identifier[kwarg] = keyword[None] , identifier[vararg] = identifier[varg] , identifier[kwonlyargs] =[], identifier[defaults] =[], identifier[kw_defaults] =[], ), identifier[body] = identifier[fn_body_ast] , identifier[decorator_list] = identifier[list] ( identifier[chain] ( identifier[meta_decorators] , [ identifier[_BASILISP_FN_FN_NAME] ], [ identifier[_TRAMPOLINE_FN_NAME] ] keyword[if] identifier[ctx] . identifier[recur_point] . identifier[has_recur] keyword[else] [], ) ), identifier[returns] = keyword[None] , ) ], ) ), )
def __single_arity_fn_to_py_ast(ctx: GeneratorContext, node: Fn, method: FnMethod, def_name: Optional[str]=None, meta_node: Optional[MetaNode]=None) -> GeneratedPyAST: """Return a Python AST node for a function with a single arity.""" assert node.op == NodeOp.FN assert method.op == NodeOp.FN_METHOD lisp_fn_name = node.local.name if node.local is not None else None py_fn_name = __fn_name(lisp_fn_name) if def_name is None else munge(def_name) py_fn_node = ast.AsyncFunctionDef if node.is_async else ast.FunctionDef with ctx.new_symbol_table(py_fn_name), ctx.new_recur_point(method.loop_id, RecurType.FN, is_variadic=node.is_variadic): # Allow named anonymous functions to recursively call themselves if lisp_fn_name is not None: ctx.symbol_table.new_symbol(sym.symbol(lisp_fn_name), py_fn_name, LocalType.FN) # depends on [control=['if'], data=['lisp_fn_name']] (fn_args, varg, fn_body_ast) = __fn_args_to_py_ast(ctx, method.params, method.body) (meta_deps, meta_decorators) = __fn_meta(ctx, meta_node) return GeneratedPyAST(node=ast.Name(id=py_fn_name, ctx=ast.Load()), dependencies=list(chain(meta_deps, [py_fn_node(name=py_fn_name, args=ast.arguments(args=fn_args, kwarg=None, vararg=varg, kwonlyargs=[], defaults=[], kw_defaults=[]), body=fn_body_ast, decorator_list=list(chain(meta_decorators, [_BASILISP_FN_FN_NAME], [_TRAMPOLINE_FN_NAME] if ctx.recur_point.has_recur else [])), returns=None)]))) # depends on [control=['with'], data=[]]
def from_string(source, args=None): """Renders a template string""" if _has_jinja: logger.info('Precompiling model with arguments: {}'.format(args)) return _jenv.from_string(source).render(args or {}) if args: raise RuntimeError(_except_text) return source
def function[from_string, parameter[source, args]]: constant[Renders a template string] if name[_has_jinja] begin[:] call[name[logger].info, parameter[call[constant[Precompiling model with arguments: {}].format, parameter[name[args]]]]] return[call[call[name[_jenv].from_string, parameter[name[source]]].render, parameter[<ast.BoolOp object at 0x7da20c7c8640>]]] if name[args] begin[:] <ast.Raise object at 0x7da20c7c8b50> return[name[source]]
keyword[def] identifier[from_string] ( identifier[source] , identifier[args] = keyword[None] ): literal[string] keyword[if] identifier[_has_jinja] : identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[args] )) keyword[return] identifier[_jenv] . identifier[from_string] ( identifier[source] ). identifier[render] ( identifier[args] keyword[or] {}) keyword[if] identifier[args] : keyword[raise] identifier[RuntimeError] ( identifier[_except_text] ) keyword[return] identifier[source]
def from_string(source, args=None): """Renders a template string""" if _has_jinja: logger.info('Precompiling model with arguments: {}'.format(args)) return _jenv.from_string(source).render(args or {}) # depends on [control=['if'], data=[]] if args: raise RuntimeError(_except_text) # depends on [control=['if'], data=[]] return source
def add_roles(self, databaseName, roleNames, collectionName=None): """Add multiple roles Args: databaseName (str): Database Name roleNames (list of RoleSpecs): roles Keyword Args: collectionName (str): Collection Raises: ErrRoleException: role not compatible with the databaseName and/or collectionName """ for roleName in roleNames: self.add_role(databaseName, roleName, collectionName)
def function[add_roles, parameter[self, databaseName, roleNames, collectionName]]: constant[Add multiple roles Args: databaseName (str): Database Name roleNames (list of RoleSpecs): roles Keyword Args: collectionName (str): Collection Raises: ErrRoleException: role not compatible with the databaseName and/or collectionName ] for taget[name[roleName]] in starred[name[roleNames]] begin[:] call[name[self].add_role, parameter[name[databaseName], name[roleName], name[collectionName]]]
keyword[def] identifier[add_roles] ( identifier[self] , identifier[databaseName] , identifier[roleNames] , identifier[collectionName] = keyword[None] ): literal[string] keyword[for] identifier[roleName] keyword[in] identifier[roleNames] : identifier[self] . identifier[add_role] ( identifier[databaseName] , identifier[roleName] , identifier[collectionName] )
def add_roles(self, databaseName, roleNames, collectionName=None): """Add multiple roles Args: databaseName (str): Database Name roleNames (list of RoleSpecs): roles Keyword Args: collectionName (str): Collection Raises: ErrRoleException: role not compatible with the databaseName and/or collectionName """ for roleName in roleNames: self.add_role(databaseName, roleName, collectionName) # depends on [control=['for'], data=['roleName']]
def getSkeletalBoneDataCompressed(self, action, eMotionRange, pvCompressedData, unCompressedSize): """ Reads the state of the skeletal bone data in a compressed form that is suitable for sending over the network. The required buffer size will never exceed ( sizeof(VR_BoneTransform_t)*boneCount + 2). Usually the size will be much smaller. """ fn = self.function_table.getSkeletalBoneDataCompressed punRequiredCompressedSize = c_uint32() result = fn(action, eMotionRange, pvCompressedData, unCompressedSize, byref(punRequiredCompressedSize)) return result, punRequiredCompressedSize.value
def function[getSkeletalBoneDataCompressed, parameter[self, action, eMotionRange, pvCompressedData, unCompressedSize]]: constant[ Reads the state of the skeletal bone data in a compressed form that is suitable for sending over the network. The required buffer size will never exceed ( sizeof(VR_BoneTransform_t)*boneCount + 2). Usually the size will be much smaller. ] variable[fn] assign[=] name[self].function_table.getSkeletalBoneDataCompressed variable[punRequiredCompressedSize] assign[=] call[name[c_uint32], parameter[]] variable[result] assign[=] call[name[fn], parameter[name[action], name[eMotionRange], name[pvCompressedData], name[unCompressedSize], call[name[byref], parameter[name[punRequiredCompressedSize]]]]] return[tuple[[<ast.Name object at 0x7da204621d80>, <ast.Attribute object at 0x7da204623280>]]]
keyword[def] identifier[getSkeletalBoneDataCompressed] ( identifier[self] , identifier[action] , identifier[eMotionRange] , identifier[pvCompressedData] , identifier[unCompressedSize] ): literal[string] identifier[fn] = identifier[self] . identifier[function_table] . identifier[getSkeletalBoneDataCompressed] identifier[punRequiredCompressedSize] = identifier[c_uint32] () identifier[result] = identifier[fn] ( identifier[action] , identifier[eMotionRange] , identifier[pvCompressedData] , identifier[unCompressedSize] , identifier[byref] ( identifier[punRequiredCompressedSize] )) keyword[return] identifier[result] , identifier[punRequiredCompressedSize] . identifier[value]
def getSkeletalBoneDataCompressed(self, action, eMotionRange, pvCompressedData, unCompressedSize): """ Reads the state of the skeletal bone data in a compressed form that is suitable for sending over the network. The required buffer size will never exceed ( sizeof(VR_BoneTransform_t)*boneCount + 2). Usually the size will be much smaller. """ fn = self.function_table.getSkeletalBoneDataCompressed punRequiredCompressedSize = c_uint32() result = fn(action, eMotionRange, pvCompressedData, unCompressedSize, byref(punRequiredCompressedSize)) return (result, punRequiredCompressedSize.value)
def write_json_file(argname, cmd, basename, filename): """ Write JSON captured from the defined argname into the package's egg-info directory using the specified filename. """ value = getattr(cmd.distribution, argname, None) if isinstance(value, dict): value = json.dumps( value, indent=4, sort_keys=True, separators=(',', ': ')) cmd.write_or_delete_file(argname, filename, value, force=True)
def function[write_json_file, parameter[argname, cmd, basename, filename]]: constant[ Write JSON captured from the defined argname into the package's egg-info directory using the specified filename. ] variable[value] assign[=] call[name[getattr], parameter[name[cmd].distribution, name[argname], constant[None]]] if call[name[isinstance], parameter[name[value], name[dict]]] begin[:] variable[value] assign[=] call[name[json].dumps, parameter[name[value]]] call[name[cmd].write_or_delete_file, parameter[name[argname], name[filename], name[value]]]
keyword[def] identifier[write_json_file] ( identifier[argname] , identifier[cmd] , identifier[basename] , identifier[filename] ): literal[string] identifier[value] = identifier[getattr] ( identifier[cmd] . identifier[distribution] , identifier[argname] , keyword[None] ) keyword[if] identifier[isinstance] ( identifier[value] , identifier[dict] ): identifier[value] = identifier[json] . identifier[dumps] ( identifier[value] , identifier[indent] = literal[int] , identifier[sort_keys] = keyword[True] , identifier[separators] =( literal[string] , literal[string] )) identifier[cmd] . identifier[write_or_delete_file] ( identifier[argname] , identifier[filename] , identifier[value] , identifier[force] = keyword[True] )
def write_json_file(argname, cmd, basename, filename): """ Write JSON captured from the defined argname into the package's egg-info directory using the specified filename. """ value = getattr(cmd.distribution, argname, None) if isinstance(value, dict): value = json.dumps(value, indent=4, sort_keys=True, separators=(',', ': ')) # depends on [control=['if'], data=[]] cmd.write_or_delete_file(argname, filename, value, force=True)
def build_tags_part(tags, attr_normal, attr_focus): """ create an urwid.Columns widget (wrapped in approproate Attributes) to display a list of tag strings, as part of a threadline. :param tags: list of tag strings to include :type tags: list of str :param attr_normal: urwid attribute to use if unfocussed :param attr_focus: urwid attribute to use if focussed :return: overall width in characters and a Columns widget. :rtype: tuple[int, urwid.Columns] """ part_w = None width = None tag_widgets = [] cols = [] width = -1 # create individual TagWidgets and sort them tag_widgets = [TagWidget(t, attr_normal, attr_focus) for t in tags] tag_widgets = sorted(tag_widgets) for tag_widget in tag_widgets: if not tag_widget.hidden: wrapped_tagwidget = tag_widget tag_width = tag_widget.width() cols.append(('fixed', tag_width, wrapped_tagwidget)) width += tag_width + 1 if cols: part_w = urwid.Columns(cols, dividechars=1) return width, part_w
def function[build_tags_part, parameter[tags, attr_normal, attr_focus]]: constant[ create an urwid.Columns widget (wrapped in approproate Attributes) to display a list of tag strings, as part of a threadline. :param tags: list of tag strings to include :type tags: list of str :param attr_normal: urwid attribute to use if unfocussed :param attr_focus: urwid attribute to use if focussed :return: overall width in characters and a Columns widget. :rtype: tuple[int, urwid.Columns] ] variable[part_w] assign[=] constant[None] variable[width] assign[=] constant[None] variable[tag_widgets] assign[=] list[[]] variable[cols] assign[=] list[[]] variable[width] assign[=] <ast.UnaryOp object at 0x7da1b0797f70> variable[tag_widgets] assign[=] <ast.ListComp object at 0x7da1b0794250> variable[tag_widgets] assign[=] call[name[sorted], parameter[name[tag_widgets]]] for taget[name[tag_widget]] in starred[name[tag_widgets]] begin[:] if <ast.UnaryOp object at 0x7da1b0797b80> begin[:] variable[wrapped_tagwidget] assign[=] name[tag_widget] variable[tag_width] assign[=] call[name[tag_widget].width, parameter[]] call[name[cols].append, parameter[tuple[[<ast.Constant object at 0x7da1b0795de0>, <ast.Name object at 0x7da1b0796080>, <ast.Name object at 0x7da1b0796b60>]]]] <ast.AugAssign object at 0x7da1b07963e0> if name[cols] begin[:] variable[part_w] assign[=] call[name[urwid].Columns, parameter[name[cols]]] return[tuple[[<ast.Name object at 0x7da1b0794340>, <ast.Name object at 0x7da1b0794580>]]]
keyword[def] identifier[build_tags_part] ( identifier[tags] , identifier[attr_normal] , identifier[attr_focus] ): literal[string] identifier[part_w] = keyword[None] identifier[width] = keyword[None] identifier[tag_widgets] =[] identifier[cols] =[] identifier[width] =- literal[int] identifier[tag_widgets] =[ identifier[TagWidget] ( identifier[t] , identifier[attr_normal] , identifier[attr_focus] ) keyword[for] identifier[t] keyword[in] identifier[tags] ] identifier[tag_widgets] = identifier[sorted] ( identifier[tag_widgets] ) keyword[for] identifier[tag_widget] keyword[in] identifier[tag_widgets] : keyword[if] keyword[not] identifier[tag_widget] . identifier[hidden] : identifier[wrapped_tagwidget] = identifier[tag_widget] identifier[tag_width] = identifier[tag_widget] . identifier[width] () identifier[cols] . identifier[append] (( literal[string] , identifier[tag_width] , identifier[wrapped_tagwidget] )) identifier[width] += identifier[tag_width] + literal[int] keyword[if] identifier[cols] : identifier[part_w] = identifier[urwid] . identifier[Columns] ( identifier[cols] , identifier[dividechars] = literal[int] ) keyword[return] identifier[width] , identifier[part_w]
def build_tags_part(tags, attr_normal, attr_focus): """ create an urwid.Columns widget (wrapped in approproate Attributes) to display a list of tag strings, as part of a threadline. :param tags: list of tag strings to include :type tags: list of str :param attr_normal: urwid attribute to use if unfocussed :param attr_focus: urwid attribute to use if focussed :return: overall width in characters and a Columns widget. :rtype: tuple[int, urwid.Columns] """ part_w = None width = None tag_widgets = [] cols = [] width = -1 # create individual TagWidgets and sort them tag_widgets = [TagWidget(t, attr_normal, attr_focus) for t in tags] tag_widgets = sorted(tag_widgets) for tag_widget in tag_widgets: if not tag_widget.hidden: wrapped_tagwidget = tag_widget tag_width = tag_widget.width() cols.append(('fixed', tag_width, wrapped_tagwidget)) width += tag_width + 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['tag_widget']] if cols: part_w = urwid.Columns(cols, dividechars=1) # depends on [control=['if'], data=[]] return (width, part_w)
def create_task(self, task_name=None, script=None, hyper_parameters=None, saved_result_keys=None, **kwargs): """Uploads a task to the database, timestamp will be added automatically. Parameters ----------- task_name : str The task name. script : str File name of the python script. hyper_parameters : dictionary The hyper parameters pass into the script. saved_result_keys : list of str The keys of the task results to keep in the database when the task finishes. kwargs : other parameters Users customized parameters such as description, version number. Examples ----------- Uploads a task >>> db.create_task(task_name='mnist', script='example/tutorial_mnist_simple.py', description='simple tutorial') Finds and runs the latest task >>> db.run_top_task(sess=sess, sort=[("time", pymongo.DESCENDING)]) >>> db.run_top_task(sess=sess, sort=[("time", -1)]) Finds and runs the oldest task >>> db.run_top_task(sess=sess, sort=[("time", pymongo.ASCENDING)]) >>> db.run_top_task(sess=sess, sort=[("time", 1)]) """ if not isinstance(task_name, str): # is None: raise Exception("task_name should be string") if not isinstance(script, str): # is None: raise Exception("script should be string") if hyper_parameters is None: hyper_parameters = {} if saved_result_keys is None: saved_result_keys = [] self._fill_project_info(kwargs) kwargs.update({'time': datetime.utcnow()}) kwargs.update({'hyper_parameters': hyper_parameters}) kwargs.update({'saved_result_keys': saved_result_keys}) _script = open(script, 'rb').read() kwargs.update({'status': 'pending', 'script': _script, 'result': {}}) self.db.Task.insert_one(kwargs) logging.info("[Database] Saved Task - task_name: {} script: {}".format(task_name, script))
def function[create_task, parameter[self, task_name, script, hyper_parameters, saved_result_keys]]: constant[Uploads a task to the database, timestamp will be added automatically. Parameters ----------- task_name : str The task name. script : str File name of the python script. hyper_parameters : dictionary The hyper parameters pass into the script. saved_result_keys : list of str The keys of the task results to keep in the database when the task finishes. kwargs : other parameters Users customized parameters such as description, version number. Examples ----------- Uploads a task >>> db.create_task(task_name='mnist', script='example/tutorial_mnist_simple.py', description='simple tutorial') Finds and runs the latest task >>> db.run_top_task(sess=sess, sort=[("time", pymongo.DESCENDING)]) >>> db.run_top_task(sess=sess, sort=[("time", -1)]) Finds and runs the oldest task >>> db.run_top_task(sess=sess, sort=[("time", pymongo.ASCENDING)]) >>> db.run_top_task(sess=sess, sort=[("time", 1)]) ] if <ast.UnaryOp object at 0x7da1b002b4f0> begin[:] <ast.Raise object at 0x7da1b0029fc0> if <ast.UnaryOp object at 0x7da1b0029d20> begin[:] <ast.Raise object at 0x7da1b00282b0> if compare[name[hyper_parameters] is constant[None]] begin[:] variable[hyper_parameters] assign[=] dictionary[[], []] if compare[name[saved_result_keys] is constant[None]] begin[:] variable[saved_result_keys] assign[=] list[[]] call[name[self]._fill_project_info, parameter[name[kwargs]]] call[name[kwargs].update, parameter[dictionary[[<ast.Constant object at 0x7da1b002bac0>], [<ast.Call object at 0x7da1b0028280>]]]] call[name[kwargs].update, parameter[dictionary[[<ast.Constant object at 0x7da1b0028d90>], [<ast.Name object at 0x7da1b00293c0>]]]] call[name[kwargs].update, parameter[dictionary[[<ast.Constant object at 0x7da1b0029d80>], [<ast.Name object at 0x7da1b0029090>]]]] variable[_script] assign[=] call[call[name[open], parameter[name[script], constant[rb]]].read, parameter[]] call[name[kwargs].update, parameter[dictionary[[<ast.Constant object at 0x7da1b002b760>, <ast.Constant object at 0x7da1b0029660>, <ast.Constant object at 0x7da1b002b460>], [<ast.Constant object at 0x7da1b0029900>, <ast.Name object at 0x7da1b002abf0>, <ast.Dict object at 0x7da1b0029d50>]]]] call[name[self].db.Task.insert_one, parameter[name[kwargs]]] call[name[logging].info, parameter[call[constant[[Database] Saved Task - task_name: {} script: {}].format, parameter[name[task_name], name[script]]]]]
keyword[def] identifier[create_task] ( identifier[self] , identifier[task_name] = keyword[None] , identifier[script] = keyword[None] , identifier[hyper_parameters] = keyword[None] , identifier[saved_result_keys] = keyword[None] ,** identifier[kwargs] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[task_name] , identifier[str] ): keyword[raise] identifier[Exception] ( literal[string] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[script] , identifier[str] ): keyword[raise] identifier[Exception] ( literal[string] ) keyword[if] identifier[hyper_parameters] keyword[is] keyword[None] : identifier[hyper_parameters] ={} keyword[if] identifier[saved_result_keys] keyword[is] keyword[None] : identifier[saved_result_keys] =[] identifier[self] . identifier[_fill_project_info] ( identifier[kwargs] ) identifier[kwargs] . identifier[update] ({ literal[string] : identifier[datetime] . identifier[utcnow] ()}) identifier[kwargs] . identifier[update] ({ literal[string] : identifier[hyper_parameters] }) identifier[kwargs] . identifier[update] ({ literal[string] : identifier[saved_result_keys] }) identifier[_script] = identifier[open] ( identifier[script] , literal[string] ). identifier[read] () identifier[kwargs] . identifier[update] ({ literal[string] : literal[string] , literal[string] : identifier[_script] , literal[string] :{}}) identifier[self] . identifier[db] . identifier[Task] . identifier[insert_one] ( identifier[kwargs] ) identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[task_name] , identifier[script] ))
def create_task(self, task_name=None, script=None, hyper_parameters=None, saved_result_keys=None, **kwargs): """Uploads a task to the database, timestamp will be added automatically. Parameters ----------- task_name : str The task name. script : str File name of the python script. hyper_parameters : dictionary The hyper parameters pass into the script. saved_result_keys : list of str The keys of the task results to keep in the database when the task finishes. kwargs : other parameters Users customized parameters such as description, version number. Examples ----------- Uploads a task >>> db.create_task(task_name='mnist', script='example/tutorial_mnist_simple.py', description='simple tutorial') Finds and runs the latest task >>> db.run_top_task(sess=sess, sort=[("time", pymongo.DESCENDING)]) >>> db.run_top_task(sess=sess, sort=[("time", -1)]) Finds and runs the oldest task >>> db.run_top_task(sess=sess, sort=[("time", pymongo.ASCENDING)]) >>> db.run_top_task(sess=sess, sort=[("time", 1)]) """ if not isinstance(task_name, str): # is None: raise Exception('task_name should be string') # depends on [control=['if'], data=[]] if not isinstance(script, str): # is None: raise Exception('script should be string') # depends on [control=['if'], data=[]] if hyper_parameters is None: hyper_parameters = {} # depends on [control=['if'], data=['hyper_parameters']] if saved_result_keys is None: saved_result_keys = [] # depends on [control=['if'], data=['saved_result_keys']] self._fill_project_info(kwargs) kwargs.update({'time': datetime.utcnow()}) kwargs.update({'hyper_parameters': hyper_parameters}) kwargs.update({'saved_result_keys': saved_result_keys}) _script = open(script, 'rb').read() kwargs.update({'status': 'pending', 'script': _script, 'result': {}}) self.db.Task.insert_one(kwargs) logging.info('[Database] Saved Task - task_name: {} script: {}'.format(task_name, script))
def parse_parameters(payflowpro_response_data): """ Parses a set of Payflow Pro response parameter name and value pairs into a list of PayflowProObjects, and returns a tuple containing the object list and a dictionary containing any unconsumed data. The first item in the object list will always be the Response object, and the RecurringPayments object (if any) will be last. The presence of any unconsumed data in the resulting dictionary probably indicates an error or oversight in the PayflowProObject definitions. """ def build_class(klass, unconsumed_data): known_att_names_set = set(klass.base_fields.keys()) available_atts_set = known_att_names_set.intersection(unconsumed_data) if available_atts_set: available_atts = dict() for name in available_atts_set: available_atts[name] = unconsumed_data[name] del unconsumed_data[name] return klass(**available_atts) return None unconsumed_data = payflowpro_response_data.copy() # Parse the response data first response = build_class(Response, unconsumed_data) result_objects = [response] # Parse the remaining data for klass in object.__class__.__subclasses__(PayflowProObject): obj = build_class(klass, unconsumed_data) if obj: result_objects.append(obj) # Special handling of RecurringPayments p_count = 1 payments = [] while ("p_result%d" % p_count) in unconsumed_data: payments.append(RecurringPayment( p_result = unconsumed_data.pop("p_result%d" % p_count, None), p_pnref = unconsumed_data.pop("p_pnref%d" % p_count, None), p_transtate = unconsumed_data.pop("p_transtate%d" % p_count, None), p_tender = unconsumed_data.pop("p_tender%d" % p_count, None), p_transtime = unconsumed_data.pop("p_transtime%d" % p_count, None), p_amt = unconsumed_data.pop("p_amt%d" % p_count, None))) p_count += 1 if payments: result_objects.append(RecurringPayments(payments=payments)) return (result_objects, unconsumed_data,)
def function[parse_parameters, parameter[payflowpro_response_data]]: constant[ Parses a set of Payflow Pro response parameter name and value pairs into a list of PayflowProObjects, and returns a tuple containing the object list and a dictionary containing any unconsumed data. The first item in the object list will always be the Response object, and the RecurringPayments object (if any) will be last. The presence of any unconsumed data in the resulting dictionary probably indicates an error or oversight in the PayflowProObject definitions. ] def function[build_class, parameter[klass, unconsumed_data]]: variable[known_att_names_set] assign[=] call[name[set], parameter[call[name[klass].base_fields.keys, parameter[]]]] variable[available_atts_set] assign[=] call[name[known_att_names_set].intersection, parameter[name[unconsumed_data]]] if name[available_atts_set] begin[:] variable[available_atts] assign[=] call[name[dict], parameter[]] for taget[name[name]] in starred[name[available_atts_set]] begin[:] call[name[available_atts]][name[name]] assign[=] call[name[unconsumed_data]][name[name]] <ast.Delete object at 0x7da1b0f50a00> return[call[name[klass], parameter[]]] return[constant[None]] variable[unconsumed_data] assign[=] call[name[payflowpro_response_data].copy, parameter[]] variable[response] assign[=] call[name[build_class], parameter[name[Response], name[unconsumed_data]]] variable[result_objects] assign[=] list[[<ast.Name object at 0x7da1b0f52320>]] for taget[name[klass]] in starred[call[name[object].__class__.__subclasses__, parameter[name[PayflowProObject]]]] begin[:] variable[obj] assign[=] call[name[build_class], parameter[name[klass], name[unconsumed_data]]] if name[obj] begin[:] call[name[result_objects].append, parameter[name[obj]]] variable[p_count] assign[=] constant[1] variable[payments] assign[=] list[[]] while compare[binary_operation[constant[p_result%d] <ast.Mod object at 0x7da2590d6920> name[p_count]] in name[unconsumed_data]] begin[:] call[name[payments].append, parameter[call[name[RecurringPayment], parameter[]]]] <ast.AugAssign object at 0x7da1b0f0e710> if name[payments] begin[:] call[name[result_objects].append, parameter[call[name[RecurringPayments], parameter[]]]] return[tuple[[<ast.Name object at 0x7da1b0f0f280>, <ast.Name object at 0x7da1b0f0d480>]]]
keyword[def] identifier[parse_parameters] ( identifier[payflowpro_response_data] ): literal[string] keyword[def] identifier[build_class] ( identifier[klass] , identifier[unconsumed_data] ): identifier[known_att_names_set] = identifier[set] ( identifier[klass] . identifier[base_fields] . identifier[keys] ()) identifier[available_atts_set] = identifier[known_att_names_set] . identifier[intersection] ( identifier[unconsumed_data] ) keyword[if] identifier[available_atts_set] : identifier[available_atts] = identifier[dict] () keyword[for] identifier[name] keyword[in] identifier[available_atts_set] : identifier[available_atts] [ identifier[name] ]= identifier[unconsumed_data] [ identifier[name] ] keyword[del] identifier[unconsumed_data] [ identifier[name] ] keyword[return] identifier[klass] (** identifier[available_atts] ) keyword[return] keyword[None] identifier[unconsumed_data] = identifier[payflowpro_response_data] . identifier[copy] () identifier[response] = identifier[build_class] ( identifier[Response] , identifier[unconsumed_data] ) identifier[result_objects] =[ identifier[response] ] keyword[for] identifier[klass] keyword[in] identifier[object] . identifier[__class__] . identifier[__subclasses__] ( identifier[PayflowProObject] ): identifier[obj] = identifier[build_class] ( identifier[klass] , identifier[unconsumed_data] ) keyword[if] identifier[obj] : identifier[result_objects] . identifier[append] ( identifier[obj] ) identifier[p_count] = literal[int] identifier[payments] =[] keyword[while] ( literal[string] % identifier[p_count] ) keyword[in] identifier[unconsumed_data] : identifier[payments] . identifier[append] ( identifier[RecurringPayment] ( identifier[p_result] = identifier[unconsumed_data] . identifier[pop] ( literal[string] % identifier[p_count] , keyword[None] ), identifier[p_pnref] = identifier[unconsumed_data] . identifier[pop] ( literal[string] % identifier[p_count] , keyword[None] ), identifier[p_transtate] = identifier[unconsumed_data] . identifier[pop] ( literal[string] % identifier[p_count] , keyword[None] ), identifier[p_tender] = identifier[unconsumed_data] . identifier[pop] ( literal[string] % identifier[p_count] , keyword[None] ), identifier[p_transtime] = identifier[unconsumed_data] . identifier[pop] ( literal[string] % identifier[p_count] , keyword[None] ), identifier[p_amt] = identifier[unconsumed_data] . identifier[pop] ( literal[string] % identifier[p_count] , keyword[None] ))) identifier[p_count] += literal[int] keyword[if] identifier[payments] : identifier[result_objects] . identifier[append] ( identifier[RecurringPayments] ( identifier[payments] = identifier[payments] )) keyword[return] ( identifier[result_objects] , identifier[unconsumed_data] ,)
def parse_parameters(payflowpro_response_data): """ Parses a set of Payflow Pro response parameter name and value pairs into a list of PayflowProObjects, and returns a tuple containing the object list and a dictionary containing any unconsumed data. The first item in the object list will always be the Response object, and the RecurringPayments object (if any) will be last. The presence of any unconsumed data in the resulting dictionary probably indicates an error or oversight in the PayflowProObject definitions. """ def build_class(klass, unconsumed_data): known_att_names_set = set(klass.base_fields.keys()) available_atts_set = known_att_names_set.intersection(unconsumed_data) if available_atts_set: available_atts = dict() for name in available_atts_set: available_atts[name] = unconsumed_data[name] del unconsumed_data[name] # depends on [control=['for'], data=['name']] return klass(**available_atts) # depends on [control=['if'], data=[]] return None unconsumed_data = payflowpro_response_data.copy() # Parse the response data first response = build_class(Response, unconsumed_data) result_objects = [response] # Parse the remaining data for klass in object.__class__.__subclasses__(PayflowProObject): obj = build_class(klass, unconsumed_data) if obj: result_objects.append(obj) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['klass']] # Special handling of RecurringPayments p_count = 1 payments = [] while 'p_result%d' % p_count in unconsumed_data: payments.append(RecurringPayment(p_result=unconsumed_data.pop('p_result%d' % p_count, None), p_pnref=unconsumed_data.pop('p_pnref%d' % p_count, None), p_transtate=unconsumed_data.pop('p_transtate%d' % p_count, None), p_tender=unconsumed_data.pop('p_tender%d' % p_count, None), p_transtime=unconsumed_data.pop('p_transtime%d' % p_count, None), p_amt=unconsumed_data.pop('p_amt%d' % p_count, None))) p_count += 1 # depends on [control=['while'], data=['unconsumed_data']] if payments: result_objects.append(RecurringPayments(payments=payments)) # depends on [control=['if'], data=[]] return (result_objects, unconsumed_data)
def infer_import(self, context=None, asname=True): """infer an Import node: return the imported module/object""" name = context.lookupname if name is None: raise exceptions.InferenceError(node=self, context=context) try: if asname: yield self.do_import_module(self.real_name(name)) else: yield self.do_import_module(name) except exceptions.AstroidBuildingError as exc: raise exceptions.InferenceError(node=self, context=context) from exc
def function[infer_import, parameter[self, context, asname]]: constant[infer an Import node: return the imported module/object] variable[name] assign[=] name[context].lookupname if compare[name[name] is constant[None]] begin[:] <ast.Raise object at 0x7da1b1e65d50> <ast.Try object at 0x7da1b1e64c10>
keyword[def] identifier[infer_import] ( identifier[self] , identifier[context] = keyword[None] , identifier[asname] = keyword[True] ): literal[string] identifier[name] = identifier[context] . identifier[lookupname] keyword[if] identifier[name] keyword[is] keyword[None] : keyword[raise] identifier[exceptions] . identifier[InferenceError] ( identifier[node] = identifier[self] , identifier[context] = identifier[context] ) keyword[try] : keyword[if] identifier[asname] : keyword[yield] identifier[self] . identifier[do_import_module] ( identifier[self] . identifier[real_name] ( identifier[name] )) keyword[else] : keyword[yield] identifier[self] . identifier[do_import_module] ( identifier[name] ) keyword[except] identifier[exceptions] . identifier[AstroidBuildingError] keyword[as] identifier[exc] : keyword[raise] identifier[exceptions] . identifier[InferenceError] ( identifier[node] = identifier[self] , identifier[context] = identifier[context] ) keyword[from] identifier[exc]
def infer_import(self, context=None, asname=True): """infer an Import node: return the imported module/object""" name = context.lookupname if name is None: raise exceptions.InferenceError(node=self, context=context) # depends on [control=['if'], data=[]] try: if asname: yield self.do_import_module(self.real_name(name)) # depends on [control=['if'], data=[]] else: yield self.do_import_module(name) # depends on [control=['try'], data=[]] except exceptions.AstroidBuildingError as exc: raise exceptions.InferenceError(node=self, context=context) from exc # depends on [control=['except'], data=['exc']]
def set_title(self, title): """ Set title. You can set multiple titles. :Args: - title: Title value """ self.title = title self.add_metadata('DC', 'title', self.title)
def function[set_title, parameter[self, title]]: constant[ Set title. You can set multiple titles. :Args: - title: Title value ] name[self].title assign[=] name[title] call[name[self].add_metadata, parameter[constant[DC], constant[title], name[self].title]]
keyword[def] identifier[set_title] ( identifier[self] , identifier[title] ): literal[string] identifier[self] . identifier[title] = identifier[title] identifier[self] . identifier[add_metadata] ( literal[string] , literal[string] , identifier[self] . identifier[title] )
def set_title(self, title): """ Set title. You can set multiple titles. :Args: - title: Title value """ self.title = title self.add_metadata('DC', 'title', self.title)
def ball_count(cls, ball_tally, strike_tally, pitch_res): """ Ball/Strike counter :param ball_tally: Ball telly :param strike_tally: Strike telly :param pitch_res: pitching result(Retrosheet format) :return: ball count, strike count """ b, s = ball_tally, strike_tally if pitch_res == "B": if ball_tally < 4: b += 1 elif pitch_res == "S" or pitch_res == "C" or pitch_res == "X": if strike_tally < 3: s += 1 elif pitch_res == "F": if strike_tally < 2: s += 1 return b, s
def function[ball_count, parameter[cls, ball_tally, strike_tally, pitch_res]]: constant[ Ball/Strike counter :param ball_tally: Ball telly :param strike_tally: Strike telly :param pitch_res: pitching result(Retrosheet format) :return: ball count, strike count ] <ast.Tuple object at 0x7da20c993340> assign[=] tuple[[<ast.Name object at 0x7da20c991d50>, <ast.Name object at 0x7da20c9925f0>]] if compare[name[pitch_res] equal[==] constant[B]] begin[:] if compare[name[ball_tally] less[<] constant[4]] begin[:] <ast.AugAssign object at 0x7da20c9907c0> return[tuple[[<ast.Name object at 0x7da18f00ef80>, <ast.Name object at 0x7da18f00dcc0>]]]
keyword[def] identifier[ball_count] ( identifier[cls] , identifier[ball_tally] , identifier[strike_tally] , identifier[pitch_res] ): literal[string] identifier[b] , identifier[s] = identifier[ball_tally] , identifier[strike_tally] keyword[if] identifier[pitch_res] == literal[string] : keyword[if] identifier[ball_tally] < literal[int] : identifier[b] += literal[int] keyword[elif] identifier[pitch_res] == literal[string] keyword[or] identifier[pitch_res] == literal[string] keyword[or] identifier[pitch_res] == literal[string] : keyword[if] identifier[strike_tally] < literal[int] : identifier[s] += literal[int] keyword[elif] identifier[pitch_res] == literal[string] : keyword[if] identifier[strike_tally] < literal[int] : identifier[s] += literal[int] keyword[return] identifier[b] , identifier[s]
def ball_count(cls, ball_tally, strike_tally, pitch_res): """ Ball/Strike counter :param ball_tally: Ball telly :param strike_tally: Strike telly :param pitch_res: pitching result(Retrosheet format) :return: ball count, strike count """ (b, s) = (ball_tally, strike_tally) if pitch_res == 'B': if ball_tally < 4: b += 1 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif pitch_res == 'S' or pitch_res == 'C' or pitch_res == 'X': if strike_tally < 3: s += 1 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif pitch_res == 'F': if strike_tally < 2: s += 1 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return (b, s)
def mergeConfigObj(configObj, inputDict): """ Merge the inputDict values into an existing given configObj instance. The inputDict is a "flat" dict - it has no sections/sub-sections. The configObj may have sub-sections nested to any depth. This will raise a DuplicateKeyError if one of the inputDict keys is used more than once in configObj (e.g. within two different sub-sections). """ # Expanded upon Warren's version in astrodrizzle # Verify that all inputDict keys in configObj are unique within configObj for key in inputDict: if countKey(configObj, key) > 1: raise DuplicateKeyError(key) # Now update configObj with each inputDict item for key in inputDict: setPar(configObj, key, inputDict[key])
def function[mergeConfigObj, parameter[configObj, inputDict]]: constant[ Merge the inputDict values into an existing given configObj instance. The inputDict is a "flat" dict - it has no sections/sub-sections. The configObj may have sub-sections nested to any depth. This will raise a DuplicateKeyError if one of the inputDict keys is used more than once in configObj (e.g. within two different sub-sections). ] for taget[name[key]] in starred[name[inputDict]] begin[:] if compare[call[name[countKey], parameter[name[configObj], name[key]]] greater[>] constant[1]] begin[:] <ast.Raise object at 0x7da1b0edde70> for taget[name[key]] in starred[name[inputDict]] begin[:] call[name[setPar], parameter[name[configObj], name[key], call[name[inputDict]][name[key]]]]
keyword[def] identifier[mergeConfigObj] ( identifier[configObj] , identifier[inputDict] ): literal[string] keyword[for] identifier[key] keyword[in] identifier[inputDict] : keyword[if] identifier[countKey] ( identifier[configObj] , identifier[key] )> literal[int] : keyword[raise] identifier[DuplicateKeyError] ( identifier[key] ) keyword[for] identifier[key] keyword[in] identifier[inputDict] : identifier[setPar] ( identifier[configObj] , identifier[key] , identifier[inputDict] [ identifier[key] ])
def mergeConfigObj(configObj, inputDict): """ Merge the inputDict values into an existing given configObj instance. The inputDict is a "flat" dict - it has no sections/sub-sections. The configObj may have sub-sections nested to any depth. This will raise a DuplicateKeyError if one of the inputDict keys is used more than once in configObj (e.g. within two different sub-sections). """ # Expanded upon Warren's version in astrodrizzle # Verify that all inputDict keys in configObj are unique within configObj for key in inputDict: if countKey(configObj, key) > 1: raise DuplicateKeyError(key) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']] # Now update configObj with each inputDict item for key in inputDict: setPar(configObj, key, inputDict[key]) # depends on [control=['for'], data=['key']]
def make_separate_indels_and_one_alt_with_all_snps_no_combinations(self, ref_seq): '''Returns a VCF record, where each indel from this cluster is in a separate ALT. Then all the remaining SNPs are applied to make one ALT. If >1 SNP in same place, either one might be used''' final_start_position = min([x.POS for x in self.vcf_records]) final_end_position = max([x.ref_end_pos() for x in self.vcf_records]) snps = [] new_vcf_records = [] for record in self.vcf_records: if record.is_snp(): snps.append(copy.copy(record)) else: new_record = copy.copy(record) new_record.add_flanking_seqs(ref_seq, final_start_position, final_end_position) new_vcf_records.append(new_record) if len(snps): new_record = copy.copy(snps[0]) for snp in snps[1:]: merged = new_record.merge(snp, ref_seq) if merged is not None: new_record = merged new_record.add_flanking_seqs(ref_seq, final_start_position, final_end_position) new_vcf_records.append(new_record) alts = ','.join(sorted(list(set([x.ALT[0] for x in new_vcf_records])))) new_record = vcf_record.VcfRecord('\t'.join([self.vcf_records[0].CHROM, str(final_start_position + 1), '.', new_vcf_records[0].REF, alts, '.', 'PASS', '.'])) return new_record
def function[make_separate_indels_and_one_alt_with_all_snps_no_combinations, parameter[self, ref_seq]]: constant[Returns a VCF record, where each indel from this cluster is in a separate ALT. Then all the remaining SNPs are applied to make one ALT. If >1 SNP in same place, either one might be used] variable[final_start_position] assign[=] call[name[min], parameter[<ast.ListComp object at 0x7da1b1eacf10>]] variable[final_end_position] assign[=] call[name[max], parameter[<ast.ListComp object at 0x7da1b1eaca90>]] variable[snps] assign[=] list[[]] variable[new_vcf_records] assign[=] list[[]] for taget[name[record]] in starred[name[self].vcf_records] begin[:] if call[name[record].is_snp, parameter[]] begin[:] call[name[snps].append, parameter[call[name[copy].copy, parameter[name[record]]]]] if call[name[len], parameter[name[snps]]] begin[:] variable[new_record] assign[=] call[name[copy].copy, parameter[call[name[snps]][constant[0]]]] for taget[name[snp]] in starred[call[name[snps]][<ast.Slice object at 0x7da1b1d4d1b0>]] begin[:] variable[merged] assign[=] call[name[new_record].merge, parameter[name[snp], name[ref_seq]]] if compare[name[merged] is_not constant[None]] begin[:] variable[new_record] assign[=] name[merged] call[name[new_record].add_flanking_seqs, parameter[name[ref_seq], name[final_start_position], name[final_end_position]]] call[name[new_vcf_records].append, parameter[name[new_record]]] variable[alts] assign[=] call[constant[,].join, parameter[call[name[sorted], parameter[call[name[list], parameter[call[name[set], parameter[<ast.ListComp object at 0x7da1b1d4f340>]]]]]]]] variable[new_record] assign[=] call[name[vcf_record].VcfRecord, parameter[call[constant[ ].join, parameter[list[[<ast.Attribute object at 0x7da1b1d4d330>, <ast.Call object at 0x7da1b1d4e3e0>, <ast.Constant object at 0x7da1b1d4cf70>, <ast.Attribute object at 0x7da1b1d4c670>, <ast.Name object at 0x7da1b1d4de70>, <ast.Constant object at 0x7da1b1d4e560>, <ast.Constant object at 0x7da1b1d4c610>, <ast.Constant object at 0x7da1b1d4fa90>]]]]]] return[name[new_record]]
keyword[def] identifier[make_separate_indels_and_one_alt_with_all_snps_no_combinations] ( identifier[self] , identifier[ref_seq] ): literal[string] identifier[final_start_position] = identifier[min] ([ identifier[x] . identifier[POS] keyword[for] identifier[x] keyword[in] identifier[self] . identifier[vcf_records] ]) identifier[final_end_position] = identifier[max] ([ identifier[x] . identifier[ref_end_pos] () keyword[for] identifier[x] keyword[in] identifier[self] . identifier[vcf_records] ]) identifier[snps] =[] identifier[new_vcf_records] =[] keyword[for] identifier[record] keyword[in] identifier[self] . identifier[vcf_records] : keyword[if] identifier[record] . identifier[is_snp] (): identifier[snps] . identifier[append] ( identifier[copy] . identifier[copy] ( identifier[record] )) keyword[else] : identifier[new_record] = identifier[copy] . identifier[copy] ( identifier[record] ) identifier[new_record] . identifier[add_flanking_seqs] ( identifier[ref_seq] , identifier[final_start_position] , identifier[final_end_position] ) identifier[new_vcf_records] . identifier[append] ( identifier[new_record] ) keyword[if] identifier[len] ( identifier[snps] ): identifier[new_record] = identifier[copy] . identifier[copy] ( identifier[snps] [ literal[int] ]) keyword[for] identifier[snp] keyword[in] identifier[snps] [ literal[int] :]: identifier[merged] = identifier[new_record] . identifier[merge] ( identifier[snp] , identifier[ref_seq] ) keyword[if] identifier[merged] keyword[is] keyword[not] keyword[None] : identifier[new_record] = identifier[merged] identifier[new_record] . identifier[add_flanking_seqs] ( identifier[ref_seq] , identifier[final_start_position] , identifier[final_end_position] ) identifier[new_vcf_records] . identifier[append] ( identifier[new_record] ) identifier[alts] = literal[string] . identifier[join] ( identifier[sorted] ( identifier[list] ( identifier[set] ([ identifier[x] . identifier[ALT] [ literal[int] ] keyword[for] identifier[x] keyword[in] identifier[new_vcf_records] ])))) identifier[new_record] = identifier[vcf_record] . identifier[VcfRecord] ( literal[string] . identifier[join] ([ identifier[self] . identifier[vcf_records] [ literal[int] ]. identifier[CHROM] , identifier[str] ( identifier[final_start_position] + literal[int] ), literal[string] , identifier[new_vcf_records] [ literal[int] ]. identifier[REF] , identifier[alts] , literal[string] , literal[string] , literal[string] ])) keyword[return] identifier[new_record]
def make_separate_indels_and_one_alt_with_all_snps_no_combinations(self, ref_seq): """Returns a VCF record, where each indel from this cluster is in a separate ALT. Then all the remaining SNPs are applied to make one ALT. If >1 SNP in same place, either one might be used""" final_start_position = min([x.POS for x in self.vcf_records]) final_end_position = max([x.ref_end_pos() for x in self.vcf_records]) snps = [] new_vcf_records = [] for record in self.vcf_records: if record.is_snp(): snps.append(copy.copy(record)) # depends on [control=['if'], data=[]] else: new_record = copy.copy(record) new_record.add_flanking_seqs(ref_seq, final_start_position, final_end_position) new_vcf_records.append(new_record) # depends on [control=['for'], data=['record']] if len(snps): new_record = copy.copy(snps[0]) for snp in snps[1:]: merged = new_record.merge(snp, ref_seq) if merged is not None: new_record = merged # depends on [control=['if'], data=['merged']] # depends on [control=['for'], data=['snp']] new_record.add_flanking_seqs(ref_seq, final_start_position, final_end_position) new_vcf_records.append(new_record) # depends on [control=['if'], data=[]] alts = ','.join(sorted(list(set([x.ALT[0] for x in new_vcf_records])))) new_record = vcf_record.VcfRecord('\t'.join([self.vcf_records[0].CHROM, str(final_start_position + 1), '.', new_vcf_records[0].REF, alts, '.', 'PASS', '.'])) return new_record
async def print_what_is_playing(loop): """Connect to device and print what is playing.""" details = conf.AppleTV(ADDRESS, NAME) details.add_service(conf.DmapService(HSGID)) print('Connecting to {}'.format(details.address)) atv = pyatv.connect_to_apple_tv(details, loop) try: print((await atv.metadata.playing())) finally: # Do not forget to logout await atv.logout()
<ast.AsyncFunctionDef object at 0x7da18f7215d0>
keyword[async] keyword[def] identifier[print_what_is_playing] ( identifier[loop] ): literal[string] identifier[details] = identifier[conf] . identifier[AppleTV] ( identifier[ADDRESS] , identifier[NAME] ) identifier[details] . identifier[add_service] ( identifier[conf] . identifier[DmapService] ( identifier[HSGID] )) identifier[print] ( literal[string] . identifier[format] ( identifier[details] . identifier[address] )) identifier[atv] = identifier[pyatv] . identifier[connect_to_apple_tv] ( identifier[details] , identifier[loop] ) keyword[try] : identifier[print] (( keyword[await] identifier[atv] . identifier[metadata] . identifier[playing] ())) keyword[finally] : keyword[await] identifier[atv] . identifier[logout] ()
async def print_what_is_playing(loop): """Connect to device and print what is playing.""" details = conf.AppleTV(ADDRESS, NAME) details.add_service(conf.DmapService(HSGID)) print('Connecting to {}'.format(details.address)) atv = pyatv.connect_to_apple_tv(details, loop) try: print(await atv.metadata.playing()) # depends on [control=['try'], data=[]] finally: # Do not forget to logout await atv.logout()
def lazy_module(modname, error_strings=None, lazy_mod_class=LazyModule, level='leaf'): """Function allowing lazy importing of a module into the namespace. A lazy module object is created, registered in `sys.modules`, and returned. This is a hollow module; actual loading, and `ImportErrors` if not found, are delayed until an attempt is made to access attributes of the lazy module. A handy application is to use :func:`lazy_module` early in your own code (say, in `__init__.py`) to register all modulenames you want to be lazy. Because of registration in `sys.modules` later invocations of `import modulename` will also return the lazy object. This means that after initial registration the rest of your code can use regular pyhon import statements and retain the lazyness of the modules. Parameters ---------- modname : str The module to import. error_strings : dict, optional A dictionary of strings to use when module-loading fails. Key 'msg' sets the message to use (defaults to :attr:`lazy_import._MSG`). The message is formatted using the remaining dictionary keys. The default message informs the user of which module is missing (key 'module'), what code loaded the module as lazy (key 'caller'), and which package should be installed to solve the dependency (key 'install_name'). None of the keys is mandatory and all are given smart names by default. lazy_mod_class: type, optional Which class to use when instantiating the lazy module, to allow deep customization. The default is :class:`LazyModule` and custom alternatives **must** be a subclass thereof. level : str, optional Which submodule reference to return. Either a reference to the 'leaf' module (the default) or to the 'base' module. This is useful if you'll be using the module functionality in the same place you're calling :func:`lazy_module` from, since then you don't need to run `import` again. Setting *level* does not affect which names/modules get registered in `sys.modules`. For *level* set to 'base' and *modulename* 'aaa.bbb.ccc':: aaa = lazy_import.lazy_module("aaa.bbb.ccc", level='base') # 'aaa' becomes defined in the current namespace, with # (sub)attributes 'aaa.bbb' and 'aaa.bbb.ccc'. # It's the lazy equivalent to: import aaa.bbb.ccc For *level* set to 'leaf':: ccc = lazy_import.lazy_module("aaa.bbb.ccc", level='leaf') # Only 'ccc' becomes set in the current namespace. # Lazy equivalent to: from aaa.bbb import ccc Returns ------- module The module specified by *modname*, or its base, depending on *level*. The module isn't immediately imported. Instead, an instance of *lazy_mod_class* is returned. Upon access to any of its attributes, the module is finally loaded. Examples -------- >>> import lazy_import, sys >>> np = lazy_import.lazy_module("numpy") >>> np Lazily-loaded module numpy >>> np is sys.modules['numpy'] True >>> np.pi # This causes the full loading of the module ... 3.141592653589793 >>> np # ... and the module is changed in place. <module 'numpy' from '/usr/local/lib/python/site-packages/numpy/__init__.py'> >>> import lazy_import, sys >>> # The following succeeds even when asking for a module that's not available >>> missing = lazy_import.lazy_module("missing_module") >>> missing Lazily-loaded module missing_module >>> missing is sys.modules['missing_module'] True >>> missing.some_attr # This causes the full loading of the module, which now fails. ImportError: __main__ attempted to use a functionality that requires module missing_module, but it couldn't be loaded. Please install missing_module and retry. See Also -------- :func:`lazy_callable` :class:`LazyModule` """ if error_strings is None: error_strings = {} _set_default_errornames(modname, error_strings) mod = _lazy_module(modname, error_strings, lazy_mod_class) if level == 'base': return sys.modules[module_basename(modname)] elif level == 'leaf': return mod else: raise ValueError("Parameter 'level' must be one of ('base', 'leaf')")
def function[lazy_module, parameter[modname, error_strings, lazy_mod_class, level]]: constant[Function allowing lazy importing of a module into the namespace. A lazy module object is created, registered in `sys.modules`, and returned. This is a hollow module; actual loading, and `ImportErrors` if not found, are delayed until an attempt is made to access attributes of the lazy module. A handy application is to use :func:`lazy_module` early in your own code (say, in `__init__.py`) to register all modulenames you want to be lazy. Because of registration in `sys.modules` later invocations of `import modulename` will also return the lazy object. This means that after initial registration the rest of your code can use regular pyhon import statements and retain the lazyness of the modules. Parameters ---------- modname : str The module to import. error_strings : dict, optional A dictionary of strings to use when module-loading fails. Key 'msg' sets the message to use (defaults to :attr:`lazy_import._MSG`). The message is formatted using the remaining dictionary keys. The default message informs the user of which module is missing (key 'module'), what code loaded the module as lazy (key 'caller'), and which package should be installed to solve the dependency (key 'install_name'). None of the keys is mandatory and all are given smart names by default. lazy_mod_class: type, optional Which class to use when instantiating the lazy module, to allow deep customization. The default is :class:`LazyModule` and custom alternatives **must** be a subclass thereof. level : str, optional Which submodule reference to return. Either a reference to the 'leaf' module (the default) or to the 'base' module. This is useful if you'll be using the module functionality in the same place you're calling :func:`lazy_module` from, since then you don't need to run `import` again. Setting *level* does not affect which names/modules get registered in `sys.modules`. For *level* set to 'base' and *modulename* 'aaa.bbb.ccc':: aaa = lazy_import.lazy_module("aaa.bbb.ccc", level='base') # 'aaa' becomes defined in the current namespace, with # (sub)attributes 'aaa.bbb' and 'aaa.bbb.ccc'. # It's the lazy equivalent to: import aaa.bbb.ccc For *level* set to 'leaf':: ccc = lazy_import.lazy_module("aaa.bbb.ccc", level='leaf') # Only 'ccc' becomes set in the current namespace. # Lazy equivalent to: from aaa.bbb import ccc Returns ------- module The module specified by *modname*, or its base, depending on *level*. The module isn't immediately imported. Instead, an instance of *lazy_mod_class* is returned. Upon access to any of its attributes, the module is finally loaded. Examples -------- >>> import lazy_import, sys >>> np = lazy_import.lazy_module("numpy") >>> np Lazily-loaded module numpy >>> np is sys.modules['numpy'] True >>> np.pi # This causes the full loading of the module ... 3.141592653589793 >>> np # ... and the module is changed in place. <module 'numpy' from '/usr/local/lib/python/site-packages/numpy/__init__.py'> >>> import lazy_import, sys >>> # The following succeeds even when asking for a module that's not available >>> missing = lazy_import.lazy_module("missing_module") >>> missing Lazily-loaded module missing_module >>> missing is sys.modules['missing_module'] True >>> missing.some_attr # This causes the full loading of the module, which now fails. ImportError: __main__ attempted to use a functionality that requires module missing_module, but it couldn't be loaded. Please install missing_module and retry. See Also -------- :func:`lazy_callable` :class:`LazyModule` ] if compare[name[error_strings] is constant[None]] begin[:] variable[error_strings] assign[=] dictionary[[], []] call[name[_set_default_errornames], parameter[name[modname], name[error_strings]]] variable[mod] assign[=] call[name[_lazy_module], parameter[name[modname], name[error_strings], name[lazy_mod_class]]] if compare[name[level] equal[==] constant[base]] begin[:] return[call[name[sys].modules][call[name[module_basename], parameter[name[modname]]]]]
keyword[def] identifier[lazy_module] ( identifier[modname] , identifier[error_strings] = keyword[None] , identifier[lazy_mod_class] = identifier[LazyModule] , identifier[level] = literal[string] ): literal[string] keyword[if] identifier[error_strings] keyword[is] keyword[None] : identifier[error_strings] ={} identifier[_set_default_errornames] ( identifier[modname] , identifier[error_strings] ) identifier[mod] = identifier[_lazy_module] ( identifier[modname] , identifier[error_strings] , identifier[lazy_mod_class] ) keyword[if] identifier[level] == literal[string] : keyword[return] identifier[sys] . identifier[modules] [ identifier[module_basename] ( identifier[modname] )] keyword[elif] identifier[level] == literal[string] : keyword[return] identifier[mod] keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] )
def lazy_module(modname, error_strings=None, lazy_mod_class=LazyModule, level='leaf'): """Function allowing lazy importing of a module into the namespace. A lazy module object is created, registered in `sys.modules`, and returned. This is a hollow module; actual loading, and `ImportErrors` if not found, are delayed until an attempt is made to access attributes of the lazy module. A handy application is to use :func:`lazy_module` early in your own code (say, in `__init__.py`) to register all modulenames you want to be lazy. Because of registration in `sys.modules` later invocations of `import modulename` will also return the lazy object. This means that after initial registration the rest of your code can use regular pyhon import statements and retain the lazyness of the modules. Parameters ---------- modname : str The module to import. error_strings : dict, optional A dictionary of strings to use when module-loading fails. Key 'msg' sets the message to use (defaults to :attr:`lazy_import._MSG`). The message is formatted using the remaining dictionary keys. The default message informs the user of which module is missing (key 'module'), what code loaded the module as lazy (key 'caller'), and which package should be installed to solve the dependency (key 'install_name'). None of the keys is mandatory and all are given smart names by default. lazy_mod_class: type, optional Which class to use when instantiating the lazy module, to allow deep customization. The default is :class:`LazyModule` and custom alternatives **must** be a subclass thereof. level : str, optional Which submodule reference to return. Either a reference to the 'leaf' module (the default) or to the 'base' module. This is useful if you'll be using the module functionality in the same place you're calling :func:`lazy_module` from, since then you don't need to run `import` again. Setting *level* does not affect which names/modules get registered in `sys.modules`. For *level* set to 'base' and *modulename* 'aaa.bbb.ccc':: aaa = lazy_import.lazy_module("aaa.bbb.ccc", level='base') # 'aaa' becomes defined in the current namespace, with # (sub)attributes 'aaa.bbb' and 'aaa.bbb.ccc'. # It's the lazy equivalent to: import aaa.bbb.ccc For *level* set to 'leaf':: ccc = lazy_import.lazy_module("aaa.bbb.ccc", level='leaf') # Only 'ccc' becomes set in the current namespace. # Lazy equivalent to: from aaa.bbb import ccc Returns ------- module The module specified by *modname*, or its base, depending on *level*. The module isn't immediately imported. Instead, an instance of *lazy_mod_class* is returned. Upon access to any of its attributes, the module is finally loaded. Examples -------- >>> import lazy_import, sys >>> np = lazy_import.lazy_module("numpy") >>> np Lazily-loaded module numpy >>> np is sys.modules['numpy'] True >>> np.pi # This causes the full loading of the module ... 3.141592653589793 >>> np # ... and the module is changed in place. <module 'numpy' from '/usr/local/lib/python/site-packages/numpy/__init__.py'> >>> import lazy_import, sys >>> # The following succeeds even when asking for a module that's not available >>> missing = lazy_import.lazy_module("missing_module") >>> missing Lazily-loaded module missing_module >>> missing is sys.modules['missing_module'] True >>> missing.some_attr # This causes the full loading of the module, which now fails. ImportError: __main__ attempted to use a functionality that requires module missing_module, but it couldn't be loaded. Please install missing_module and retry. See Also -------- :func:`lazy_callable` :class:`LazyModule` """ if error_strings is None: error_strings = {} # depends on [control=['if'], data=['error_strings']] _set_default_errornames(modname, error_strings) mod = _lazy_module(modname, error_strings, lazy_mod_class) if level == 'base': return sys.modules[module_basename(modname)] # depends on [control=['if'], data=[]] elif level == 'leaf': return mod # depends on [control=['if'], data=[]] else: raise ValueError("Parameter 'level' must be one of ('base', 'leaf')")
def resume(self): """ Resumes execution on all threads of the process. @raise WindowsError: On error an exception is raised. """ if self.get_thread_count() == 0: self.scan_threads() # only refresh the snapshot if empty resumed = list() try: for aThread in self.iter_threads(): aThread.resume() resumed.append(aThread) except Exception: for aThread in resumed: try: aThread.suspend() except Exception: pass raise
def function[resume, parameter[self]]: constant[ Resumes execution on all threads of the process. @raise WindowsError: On error an exception is raised. ] if compare[call[name[self].get_thread_count, parameter[]] equal[==] constant[0]] begin[:] call[name[self].scan_threads, parameter[]] variable[resumed] assign[=] call[name[list], parameter[]] <ast.Try object at 0x7da20c6c7700>
keyword[def] identifier[resume] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[get_thread_count] ()== literal[int] : identifier[self] . identifier[scan_threads] () identifier[resumed] = identifier[list] () keyword[try] : keyword[for] identifier[aThread] keyword[in] identifier[self] . identifier[iter_threads] (): identifier[aThread] . identifier[resume] () identifier[resumed] . identifier[append] ( identifier[aThread] ) keyword[except] identifier[Exception] : keyword[for] identifier[aThread] keyword[in] identifier[resumed] : keyword[try] : identifier[aThread] . identifier[suspend] () keyword[except] identifier[Exception] : keyword[pass] keyword[raise]
def resume(self): """ Resumes execution on all threads of the process. @raise WindowsError: On error an exception is raised. """ if self.get_thread_count() == 0: self.scan_threads() # only refresh the snapshot if empty # depends on [control=['if'], data=[]] resumed = list() try: for aThread in self.iter_threads(): aThread.resume() resumed.append(aThread) # depends on [control=['for'], data=['aThread']] # depends on [control=['try'], data=[]] except Exception: for aThread in resumed: try: aThread.suspend() # depends on [control=['try'], data=[]] except Exception: pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['aThread']] raise # depends on [control=['except'], data=[]]
def windspeed(self, t): """Return the wind speed list at time `t`""" ws = [0] * self.n for i in range(self.n): q = ceil(t / self.dt[i]) q_prev = 0 if q == 0 else q - 1 r = t % self.dt[i] r = 0 if abs(r) < 1e-6 else r if r == 0: ws[i] = self.speed[i][q] else: t1 = self.time[i][q_prev] s1 = self.speed[i][q_prev] s2 = self.speed[i][q] ws[i] = s1 + (t - t1) * (s2 - s1) / self.dt[i] return matrix(ws)
def function[windspeed, parameter[self, t]]: constant[Return the wind speed list at time `t`] variable[ws] assign[=] binary_operation[list[[<ast.Constant object at 0x7da20e74b3d0>]] * name[self].n] for taget[name[i]] in starred[call[name[range], parameter[name[self].n]]] begin[:] variable[q] assign[=] call[name[ceil], parameter[binary_operation[name[t] / call[name[self].dt][name[i]]]]] variable[q_prev] assign[=] <ast.IfExp object at 0x7da1b0241150> variable[r] assign[=] binary_operation[name[t] <ast.Mod object at 0x7da2590d6920> call[name[self].dt][name[i]]] variable[r] assign[=] <ast.IfExp object at 0x7da1b0241060> if compare[name[r] equal[==] constant[0]] begin[:] call[name[ws]][name[i]] assign[=] call[call[name[self].speed][name[i]]][name[q]] return[call[name[matrix], parameter[name[ws]]]]
keyword[def] identifier[windspeed] ( identifier[self] , identifier[t] ): literal[string] identifier[ws] =[ literal[int] ]* identifier[self] . identifier[n] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[self] . identifier[n] ): identifier[q] = identifier[ceil] ( identifier[t] / identifier[self] . identifier[dt] [ identifier[i] ]) identifier[q_prev] = literal[int] keyword[if] identifier[q] == literal[int] keyword[else] identifier[q] - literal[int] identifier[r] = identifier[t] % identifier[self] . identifier[dt] [ identifier[i] ] identifier[r] = literal[int] keyword[if] identifier[abs] ( identifier[r] )< literal[int] keyword[else] identifier[r] keyword[if] identifier[r] == literal[int] : identifier[ws] [ identifier[i] ]= identifier[self] . identifier[speed] [ identifier[i] ][ identifier[q] ] keyword[else] : identifier[t1] = identifier[self] . identifier[time] [ identifier[i] ][ identifier[q_prev] ] identifier[s1] = identifier[self] . identifier[speed] [ identifier[i] ][ identifier[q_prev] ] identifier[s2] = identifier[self] . identifier[speed] [ identifier[i] ][ identifier[q] ] identifier[ws] [ identifier[i] ]= identifier[s1] +( identifier[t] - identifier[t1] )*( identifier[s2] - identifier[s1] )/ identifier[self] . identifier[dt] [ identifier[i] ] keyword[return] identifier[matrix] ( identifier[ws] )
def windspeed(self, t): """Return the wind speed list at time `t`""" ws = [0] * self.n for i in range(self.n): q = ceil(t / self.dt[i]) q_prev = 0 if q == 0 else q - 1 r = t % self.dt[i] r = 0 if abs(r) < 1e-06 else r if r == 0: ws[i] = self.speed[i][q] # depends on [control=['if'], data=[]] else: t1 = self.time[i][q_prev] s1 = self.speed[i][q_prev] s2 = self.speed[i][q] ws[i] = s1 + (t - t1) * (s2 - s1) / self.dt[i] # depends on [control=['for'], data=['i']] return matrix(ws)
def _loop_no_cache(self, helper_function, num, fragment): """ Synthesize all fragments without using the cache """ self.log([u"Examining fragment %d (no cache)...", num]) # synthesize and get the duration of the output file voice_code = self._language_to_voice_code(fragment.language) self.log(u"Calling helper function") succeeded, data = helper_function( text=fragment.filtered_text, voice_code=voice_code, output_file_path=None, return_audio_data=True ) # check output if not succeeded: self.log_crit(u"An unexpected error occurred in helper_function") return (False, None) self.log([u"Examining fragment %d (no cache)... done", num]) return (True, data)
def function[_loop_no_cache, parameter[self, helper_function, num, fragment]]: constant[ Synthesize all fragments without using the cache ] call[name[self].log, parameter[list[[<ast.Constant object at 0x7da18dc982e0>, <ast.Name object at 0x7da18dc99390>]]]] variable[voice_code] assign[=] call[name[self]._language_to_voice_code, parameter[name[fragment].language]] call[name[self].log, parameter[constant[Calling helper function]]] <ast.Tuple object at 0x7da1b18a2440> assign[=] call[name[helper_function], parameter[]] if <ast.UnaryOp object at 0x7da1b18a28c0> begin[:] call[name[self].log_crit, parameter[constant[An unexpected error occurred in helper_function]]] return[tuple[[<ast.Constant object at 0x7da1b18a2710>, <ast.Constant object at 0x7da1b18a1570>]]] call[name[self].log, parameter[list[[<ast.Constant object at 0x7da1b18a35b0>, <ast.Name object at 0x7da1b18a1810>]]]] return[tuple[[<ast.Constant object at 0x7da1b18a0070>, <ast.Name object at 0x7da1b18a1660>]]]
keyword[def] identifier[_loop_no_cache] ( identifier[self] , identifier[helper_function] , identifier[num] , identifier[fragment] ): literal[string] identifier[self] . identifier[log] ([ literal[string] , identifier[num] ]) identifier[voice_code] = identifier[self] . identifier[_language_to_voice_code] ( identifier[fragment] . identifier[language] ) identifier[self] . identifier[log] ( literal[string] ) identifier[succeeded] , identifier[data] = identifier[helper_function] ( identifier[text] = identifier[fragment] . identifier[filtered_text] , identifier[voice_code] = identifier[voice_code] , identifier[output_file_path] = keyword[None] , identifier[return_audio_data] = keyword[True] ) keyword[if] keyword[not] identifier[succeeded] : identifier[self] . identifier[log_crit] ( literal[string] ) keyword[return] ( keyword[False] , keyword[None] ) identifier[self] . identifier[log] ([ literal[string] , identifier[num] ]) keyword[return] ( keyword[True] , identifier[data] )
def _loop_no_cache(self, helper_function, num, fragment): """ Synthesize all fragments without using the cache """ self.log([u'Examining fragment %d (no cache)...', num]) # synthesize and get the duration of the output file voice_code = self._language_to_voice_code(fragment.language) self.log(u'Calling helper function') (succeeded, data) = helper_function(text=fragment.filtered_text, voice_code=voice_code, output_file_path=None, return_audio_data=True) # check output if not succeeded: self.log_crit(u'An unexpected error occurred in helper_function') return (False, None) # depends on [control=['if'], data=[]] self.log([u'Examining fragment %d (no cache)... done', num]) return (True, data)
def from_dict(cls, copula_dict): """Set attributes with provided values.""" instance = cls() instance.fitted = copula_dict['fitted'] instance.constant_value = copula_dict['constant_value'] if instance.fitted and not instance.constant_value: instance.model = scipy.stats.gaussian_kde([-1, 0, 0]) for key in ['dataset', 'covariance', 'inv_cov']: copula_dict[key] = np.array(copula_dict[key]) attributes = ['d', 'n', 'dataset', 'covariance', 'factor', 'inv_cov'] for name in attributes: setattr(instance.model, name, copula_dict[name]) return instance
def function[from_dict, parameter[cls, copula_dict]]: constant[Set attributes with provided values.] variable[instance] assign[=] call[name[cls], parameter[]] name[instance].fitted assign[=] call[name[copula_dict]][constant[fitted]] name[instance].constant_value assign[=] call[name[copula_dict]][constant[constant_value]] if <ast.BoolOp object at 0x7da1b1eef130> begin[:] name[instance].model assign[=] call[name[scipy].stats.gaussian_kde, parameter[list[[<ast.UnaryOp object at 0x7da1b1eed6f0>, <ast.Constant object at 0x7da1b1eed810>, <ast.Constant object at 0x7da1b1eed690>]]]] for taget[name[key]] in starred[list[[<ast.Constant object at 0x7da1b1eecbb0>, <ast.Constant object at 0x7da1b1eee2f0>, <ast.Constant object at 0x7da1b1eecdf0>]]] begin[:] call[name[copula_dict]][name[key]] assign[=] call[name[np].array, parameter[call[name[copula_dict]][name[key]]]] variable[attributes] assign[=] list[[<ast.Constant object at 0x7da1b1eed570>, <ast.Constant object at 0x7da1b1eed8a0>, <ast.Constant object at 0x7da1b1eece20>, <ast.Constant object at 0x7da1b1eee770>, <ast.Constant object at 0x7da1b1eeefb0>, <ast.Constant object at 0x7da1b1eee380>]] for taget[name[name]] in starred[name[attributes]] begin[:] call[name[setattr], parameter[name[instance].model, name[name], call[name[copula_dict]][name[name]]]] return[name[instance]]
keyword[def] identifier[from_dict] ( identifier[cls] , identifier[copula_dict] ): literal[string] identifier[instance] = identifier[cls] () identifier[instance] . identifier[fitted] = identifier[copula_dict] [ literal[string] ] identifier[instance] . identifier[constant_value] = identifier[copula_dict] [ literal[string] ] keyword[if] identifier[instance] . identifier[fitted] keyword[and] keyword[not] identifier[instance] . identifier[constant_value] : identifier[instance] . identifier[model] = identifier[scipy] . identifier[stats] . identifier[gaussian_kde] ([- literal[int] , literal[int] , literal[int] ]) keyword[for] identifier[key] keyword[in] [ literal[string] , literal[string] , literal[string] ]: identifier[copula_dict] [ identifier[key] ]= identifier[np] . identifier[array] ( identifier[copula_dict] [ identifier[key] ]) identifier[attributes] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ] keyword[for] identifier[name] keyword[in] identifier[attributes] : identifier[setattr] ( identifier[instance] . identifier[model] , identifier[name] , identifier[copula_dict] [ identifier[name] ]) keyword[return] identifier[instance]
def from_dict(cls, copula_dict): """Set attributes with provided values.""" instance = cls() instance.fitted = copula_dict['fitted'] instance.constant_value = copula_dict['constant_value'] if instance.fitted and (not instance.constant_value): instance.model = scipy.stats.gaussian_kde([-1, 0, 0]) for key in ['dataset', 'covariance', 'inv_cov']: copula_dict[key] = np.array(copula_dict[key]) # depends on [control=['for'], data=['key']] attributes = ['d', 'n', 'dataset', 'covariance', 'factor', 'inv_cov'] for name in attributes: setattr(instance.model, name, copula_dict[name]) # depends on [control=['for'], data=['name']] # depends on [control=['if'], data=[]] return instance
def rm_watch(self, wd, rec=False, quiet=True): """ Removes watch(s). @param wd: Watch Descriptor of the file or directory to unwatch. Also accepts a list of WDs. @type wd: int or list of int. @param rec: Recursively removes watches on every already watched subdirectories and subfiles. @type rec: bool @param quiet: If False raises a WatchManagerError exception on error. See example not_quiet.py @type quiet: bool @return: dict of watch descriptors associated to booleans values. True if the corresponding wd has been successfully removed, False otherwise. @rtype: dict of {int: bool} """ lwd = self.__format_param(wd) if rec: lwd = self.__get_sub_rec(lwd) ret_ = {} # return {wd: bool, ...} for awd in lwd: # remove watch wd_ = self._inotify_wrapper.inotify_rm_watch(self._fd, awd) if wd_ < 0: ret_[awd] = False err = ('rm_watch: cannot remove WD=%d, %s' % \ (awd, self._inotify_wrapper.str_errno())) if quiet: log.error(err) continue raise WatchManagerError(err, ret_) # Remove watch from our dictionary if awd in self._wmd: del self._wmd[awd] ret_[awd] = True log.debug('Watch WD=%d (%s) removed', awd, self.get_path(awd)) return ret_
def function[rm_watch, parameter[self, wd, rec, quiet]]: constant[ Removes watch(s). @param wd: Watch Descriptor of the file or directory to unwatch. Also accepts a list of WDs. @type wd: int or list of int. @param rec: Recursively removes watches on every already watched subdirectories and subfiles. @type rec: bool @param quiet: If False raises a WatchManagerError exception on error. See example not_quiet.py @type quiet: bool @return: dict of watch descriptors associated to booleans values. True if the corresponding wd has been successfully removed, False otherwise. @rtype: dict of {int: bool} ] variable[lwd] assign[=] call[name[self].__format_param, parameter[name[wd]]] if name[rec] begin[:] variable[lwd] assign[=] call[name[self].__get_sub_rec, parameter[name[lwd]]] variable[ret_] assign[=] dictionary[[], []] for taget[name[awd]] in starred[name[lwd]] begin[:] variable[wd_] assign[=] call[name[self]._inotify_wrapper.inotify_rm_watch, parameter[name[self]._fd, name[awd]]] if compare[name[wd_] less[<] constant[0]] begin[:] call[name[ret_]][name[awd]] assign[=] constant[False] variable[err] assign[=] binary_operation[constant[rm_watch: cannot remove WD=%d, %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da2054a7ca0>, <ast.Call object at 0x7da2054a6ef0>]]] if name[quiet] begin[:] call[name[log].error, parameter[name[err]]] continue <ast.Raise object at 0x7da2054a6080> if compare[name[awd] in name[self]._wmd] begin[:] <ast.Delete object at 0x7da2054a7280> call[name[ret_]][name[awd]] assign[=] constant[True] call[name[log].debug, parameter[constant[Watch WD=%d (%s) removed], name[awd], call[name[self].get_path, parameter[name[awd]]]]] return[name[ret_]]
keyword[def] identifier[rm_watch] ( identifier[self] , identifier[wd] , identifier[rec] = keyword[False] , identifier[quiet] = keyword[True] ): literal[string] identifier[lwd] = identifier[self] . identifier[__format_param] ( identifier[wd] ) keyword[if] identifier[rec] : identifier[lwd] = identifier[self] . identifier[__get_sub_rec] ( identifier[lwd] ) identifier[ret_] ={} keyword[for] identifier[awd] keyword[in] identifier[lwd] : identifier[wd_] = identifier[self] . identifier[_inotify_wrapper] . identifier[inotify_rm_watch] ( identifier[self] . identifier[_fd] , identifier[awd] ) keyword[if] identifier[wd_] < literal[int] : identifier[ret_] [ identifier[awd] ]= keyword[False] identifier[err] =( literal[string] %( identifier[awd] , identifier[self] . identifier[_inotify_wrapper] . identifier[str_errno] ())) keyword[if] identifier[quiet] : identifier[log] . identifier[error] ( identifier[err] ) keyword[continue] keyword[raise] identifier[WatchManagerError] ( identifier[err] , identifier[ret_] ) keyword[if] identifier[awd] keyword[in] identifier[self] . identifier[_wmd] : keyword[del] identifier[self] . identifier[_wmd] [ identifier[awd] ] identifier[ret_] [ identifier[awd] ]= keyword[True] identifier[log] . identifier[debug] ( literal[string] , identifier[awd] , identifier[self] . identifier[get_path] ( identifier[awd] )) keyword[return] identifier[ret_]
def rm_watch(self, wd, rec=False, quiet=True): """ Removes watch(s). @param wd: Watch Descriptor of the file or directory to unwatch. Also accepts a list of WDs. @type wd: int or list of int. @param rec: Recursively removes watches on every already watched subdirectories and subfiles. @type rec: bool @param quiet: If False raises a WatchManagerError exception on error. See example not_quiet.py @type quiet: bool @return: dict of watch descriptors associated to booleans values. True if the corresponding wd has been successfully removed, False otherwise. @rtype: dict of {int: bool} """ lwd = self.__format_param(wd) if rec: lwd = self.__get_sub_rec(lwd) # depends on [control=['if'], data=[]] ret_ = {} # return {wd: bool, ...} for awd in lwd: # remove watch wd_ = self._inotify_wrapper.inotify_rm_watch(self._fd, awd) if wd_ < 0: ret_[awd] = False err = 'rm_watch: cannot remove WD=%d, %s' % (awd, self._inotify_wrapper.str_errno()) if quiet: log.error(err) continue # depends on [control=['if'], data=[]] raise WatchManagerError(err, ret_) # depends on [control=['if'], data=[]] # Remove watch from our dictionary if awd in self._wmd: del self._wmd[awd] # depends on [control=['if'], data=['awd']] ret_[awd] = True log.debug('Watch WD=%d (%s) removed', awd, self.get_path(awd)) # depends on [control=['for'], data=['awd']] return ret_
def get_osm_filter(network_type): """ Create a filter to query OSM for the specified network type. Parameters ---------- network_type : string {'walk', 'bike', 'drive', 'drive_service', 'all', 'all_private', 'none'} what type of street or other network to get Returns ------- string """ filters = {} # driving: filter out un-drivable roads, service roads, private ways, and # anything specifying motor=no. also filter out any non-service roads that # are tagged as providing parking, driveway, private, or emergency-access # services filters['drive'] = ('["area"!~"yes"]["highway"!~"cycleway|footway|path|pedestrian|steps|track|corridor|' 'proposed|construction|bridleway|abandoned|platform|raceway|service"]' '["motor_vehicle"!~"no"]["motorcar"!~"no"]{}' '["service"!~"parking|parking_aisle|driveway|private|emergency_access"]').format(settings.default_access) # drive+service: allow ways tagged 'service' but filter out certain types of # service ways filters['drive_service'] = ('["area"!~"yes"]["highway"!~"cycleway|footway|path|pedestrian|steps|track|corridor|' 'proposed|construction|bridleway|abandoned|platform|raceway"]' '["motor_vehicle"!~"no"]["motorcar"!~"no"]{}' '["service"!~"parking|parking_aisle|private|emergency_access"]').format(settings.default_access) # walking: filter out cycle ways, motor ways, private ways, and anything # specifying foot=no. allow service roads, permitting things like parking # lot lanes, alleys, etc that you *can* walk on even if they're not exactly # pleasant walks. some cycleways may allow pedestrians, but this filter ignores # such cycleways. filters['walk'] = ('["area"!~"yes"]["highway"!~"cycleway|motor|proposed|construction|abandoned|platform|raceway"]' '["foot"!~"no"]["service"!~"private"]{}').format(settings.default_access) # biking: filter out foot ways, motor ways, private ways, and anything # specifying biking=no filters['bike'] = ('["area"!~"yes"]["highway"!~"footway|steps|corridor|motor|proposed|construction|abandoned|platform|raceway"]' '["bicycle"!~"no"]["service"!~"private"]{}').format(settings.default_access) # to download all ways, just filter out everything not currently in use or # that is private-access only filters['all'] = ('["area"!~"yes"]["highway"!~"proposed|construction|abandoned|platform|raceway"]' '["service"!~"private"]{}').format(settings.default_access) # to download all ways, including private-access ones, just filter out # everything not currently in use filters['all_private'] = '["area"!~"yes"]["highway"!~"proposed|construction|abandoned|platform|raceway"]' # no filter, needed for infrastructures other than "highway" filters['none'] = '' if network_type in filters: osm_filter = filters[network_type] else: raise UnknownNetworkType('unknown network_type "{}"'.format(network_type)) return osm_filter
def function[get_osm_filter, parameter[network_type]]: constant[ Create a filter to query OSM for the specified network type. Parameters ---------- network_type : string {'walk', 'bike', 'drive', 'drive_service', 'all', 'all_private', 'none'} what type of street or other network to get Returns ------- string ] variable[filters] assign[=] dictionary[[], []] call[name[filters]][constant[drive]] assign[=] call[constant[["area"!~"yes"]["highway"!~"cycleway|footway|path|pedestrian|steps|track|corridor|proposed|construction|bridleway|abandoned|platform|raceway|service"]["motor_vehicle"!~"no"]["motorcar"!~"no"]{}["service"!~"parking|parking_aisle|driveway|private|emergency_access"]].format, parameter[name[settings].default_access]] call[name[filters]][constant[drive_service]] assign[=] call[constant[["area"!~"yes"]["highway"!~"cycleway|footway|path|pedestrian|steps|track|corridor|proposed|construction|bridleway|abandoned|platform|raceway"]["motor_vehicle"!~"no"]["motorcar"!~"no"]{}["service"!~"parking|parking_aisle|private|emergency_access"]].format, parameter[name[settings].default_access]] call[name[filters]][constant[walk]] assign[=] call[constant[["area"!~"yes"]["highway"!~"cycleway|motor|proposed|construction|abandoned|platform|raceway"]["foot"!~"no"]["service"!~"private"]{}].format, parameter[name[settings].default_access]] call[name[filters]][constant[bike]] assign[=] call[constant[["area"!~"yes"]["highway"!~"footway|steps|corridor|motor|proposed|construction|abandoned|platform|raceway"]["bicycle"!~"no"]["service"!~"private"]{}].format, parameter[name[settings].default_access]] call[name[filters]][constant[all]] assign[=] call[constant[["area"!~"yes"]["highway"!~"proposed|construction|abandoned|platform|raceway"]["service"!~"private"]{}].format, parameter[name[settings].default_access]] call[name[filters]][constant[all_private]] assign[=] constant[["area"!~"yes"]["highway"!~"proposed|construction|abandoned|platform|raceway"]] call[name[filters]][constant[none]] assign[=] constant[] if compare[name[network_type] in name[filters]] begin[:] variable[osm_filter] assign[=] call[name[filters]][name[network_type]] return[name[osm_filter]]
keyword[def] identifier[get_osm_filter] ( identifier[network_type] ): literal[string] identifier[filters] ={} identifier[filters] [ literal[string] ]=( literal[string] literal[string] literal[string] literal[string] ). identifier[format] ( identifier[settings] . identifier[default_access] ) identifier[filters] [ literal[string] ]=( literal[string] literal[string] literal[string] literal[string] ). identifier[format] ( identifier[settings] . identifier[default_access] ) identifier[filters] [ literal[string] ]=( literal[string] literal[string] ). identifier[format] ( identifier[settings] . identifier[default_access] ) identifier[filters] [ literal[string] ]=( literal[string] literal[string] ). identifier[format] ( identifier[settings] . identifier[default_access] ) identifier[filters] [ literal[string] ]=( literal[string] literal[string] ). identifier[format] ( identifier[settings] . identifier[default_access] ) identifier[filters] [ literal[string] ]= literal[string] identifier[filters] [ literal[string] ]= literal[string] keyword[if] identifier[network_type] keyword[in] identifier[filters] : identifier[osm_filter] = identifier[filters] [ identifier[network_type] ] keyword[else] : keyword[raise] identifier[UnknownNetworkType] ( literal[string] . identifier[format] ( identifier[network_type] )) keyword[return] identifier[osm_filter]
def get_osm_filter(network_type): """ Create a filter to query OSM for the specified network type. Parameters ---------- network_type : string {'walk', 'bike', 'drive', 'drive_service', 'all', 'all_private', 'none'} what type of street or other network to get Returns ------- string """ filters = {} # driving: filter out un-drivable roads, service roads, private ways, and # anything specifying motor=no. also filter out any non-service roads that # are tagged as providing parking, driveway, private, or emergency-access # services filters['drive'] = '["area"!~"yes"]["highway"!~"cycleway|footway|path|pedestrian|steps|track|corridor|proposed|construction|bridleway|abandoned|platform|raceway|service"]["motor_vehicle"!~"no"]["motorcar"!~"no"]{}["service"!~"parking|parking_aisle|driveway|private|emergency_access"]'.format(settings.default_access) # drive+service: allow ways tagged 'service' but filter out certain types of # service ways filters['drive_service'] = '["area"!~"yes"]["highway"!~"cycleway|footway|path|pedestrian|steps|track|corridor|proposed|construction|bridleway|abandoned|platform|raceway"]["motor_vehicle"!~"no"]["motorcar"!~"no"]{}["service"!~"parking|parking_aisle|private|emergency_access"]'.format(settings.default_access) # walking: filter out cycle ways, motor ways, private ways, and anything # specifying foot=no. allow service roads, permitting things like parking # lot lanes, alleys, etc that you *can* walk on even if they're not exactly # pleasant walks. some cycleways may allow pedestrians, but this filter ignores # such cycleways. filters['walk'] = '["area"!~"yes"]["highway"!~"cycleway|motor|proposed|construction|abandoned|platform|raceway"]["foot"!~"no"]["service"!~"private"]{}'.format(settings.default_access) # biking: filter out foot ways, motor ways, private ways, and anything # specifying biking=no filters['bike'] = '["area"!~"yes"]["highway"!~"footway|steps|corridor|motor|proposed|construction|abandoned|platform|raceway"]["bicycle"!~"no"]["service"!~"private"]{}'.format(settings.default_access) # to download all ways, just filter out everything not currently in use or # that is private-access only filters['all'] = '["area"!~"yes"]["highway"!~"proposed|construction|abandoned|platform|raceway"]["service"!~"private"]{}'.format(settings.default_access) # to download all ways, including private-access ones, just filter out # everything not currently in use filters['all_private'] = '["area"!~"yes"]["highway"!~"proposed|construction|abandoned|platform|raceway"]' # no filter, needed for infrastructures other than "highway" filters['none'] = '' if network_type in filters: osm_filter = filters[network_type] # depends on [control=['if'], data=['network_type', 'filters']] else: raise UnknownNetworkType('unknown network_type "{}"'.format(network_type)) return osm_filter
def plot_nodes_pcolor_to_ax(self, ax, nid, **kwargs): """Plot node data to an axes object Parameters ---------- ax : axes object axes to plot to nid : int node id pointing to the respective data set cmap : string, optional color map to use. Default: jet vmin : float, optional Minimum colorbar value vmax : float, optional Maximum colorbar value Returns ------- """ fig = ax.get_figure() x = self.grid.nodes['presort'][:, 1] z = self.grid.nodes['presort'][:, 2] ax.scatter(x, z) xz = np.vstack((x, z)).T # generate grid X, Z = np.meshgrid( np.linspace(x.min(), x.max(), 100), np.linspace(z.min(), z.max(), 100), ) values = np.array(self.nodeman.nodevals[nid]) # linear # cubic cint = scipy.interpolate.griddata( xz, values, (X, Z), method='linear', # method='linear', # method='nearest', fill_value=np.nan, ) cint_ma = np.ma.masked_invalid(cint) pc = ax.pcolormesh( X, Z, cint_ma, cmap=kwargs.get('cmap', 'jet'), vmin=kwargs.get('vmin', None), vmax=kwargs.get('vmax', None), ) if kwargs.get('plot_colorbar', False): divider = make_axes_locatable(ax) cbposition = kwargs.get('cbposition', 'vertical') if cbposition == 'horizontal': ax_cb = divider.new_vertical( size=0.1, pad=0.4, pack_start=True ) elif cbposition == 'vertical': ax_cb = divider.new_horizontal( size=0.1, pad=0.4, ) else: raise Exception('cbposition not recognized') ax.get_figure().add_axes(ax_cb) cb = fig.colorbar( pc, cax=ax_cb, orientation=cbposition, label=kwargs.get('cblabel', ''), ticks=mpl.ticker.MaxNLocator(kwargs.get('cbnrticks', 3)), format=kwargs.get('cbformat', None), extend='both', ) no_elecs = kwargs.get('no_elecs', False) if self.grid.electrodes is not None and no_elecs is not True: ax.scatter( self.grid.electrodes[:, 1], self.grid.electrodes[:, 2], color=self.grid.props['electrode_color'], # clip_on=False, ) return fig, ax, pc, cb return fig, ax, pc
def function[plot_nodes_pcolor_to_ax, parameter[self, ax, nid]]: constant[Plot node data to an axes object Parameters ---------- ax : axes object axes to plot to nid : int node id pointing to the respective data set cmap : string, optional color map to use. Default: jet vmin : float, optional Minimum colorbar value vmax : float, optional Maximum colorbar value Returns ------- ] variable[fig] assign[=] call[name[ax].get_figure, parameter[]] variable[x] assign[=] call[call[name[self].grid.nodes][constant[presort]]][tuple[[<ast.Slice object at 0x7da1b242b880>, <ast.Constant object at 0x7da1b2361c00>]]] variable[z] assign[=] call[call[name[self].grid.nodes][constant[presort]]][tuple[[<ast.Slice object at 0x7da1b23616c0>, <ast.Constant object at 0x7da1b2362cb0>]]] call[name[ax].scatter, parameter[name[x], name[z]]] variable[xz] assign[=] call[name[np].vstack, parameter[tuple[[<ast.Name object at 0x7da1b2361d20>, <ast.Name object at 0x7da1b2360670>]]]].T <ast.Tuple object at 0x7da1b2361150> assign[=] call[name[np].meshgrid, parameter[call[name[np].linspace, parameter[call[name[x].min, parameter[]], call[name[x].max, parameter[]], constant[100]]], call[name[np].linspace, parameter[call[name[z].min, parameter[]], call[name[z].max, parameter[]], constant[100]]]]] variable[values] assign[=] call[name[np].array, parameter[call[name[self].nodeman.nodevals][name[nid]]]] variable[cint] assign[=] call[name[scipy].interpolate.griddata, parameter[name[xz], name[values], tuple[[<ast.Name object at 0x7da1b2346a70>, <ast.Name object at 0x7da1b2345a20>]]]] variable[cint_ma] assign[=] call[name[np].ma.masked_invalid, parameter[name[cint]]] variable[pc] assign[=] call[name[ax].pcolormesh, parameter[name[X], name[Z], name[cint_ma]]] if call[name[kwargs].get, parameter[constant[plot_colorbar], constant[False]]] begin[:] variable[divider] assign[=] call[name[make_axes_locatable], parameter[name[ax]]] variable[cbposition] assign[=] call[name[kwargs].get, parameter[constant[cbposition], constant[vertical]]] if compare[name[cbposition] equal[==] constant[horizontal]] begin[:] variable[ax_cb] assign[=] call[name[divider].new_vertical, parameter[]] call[call[name[ax].get_figure, parameter[]].add_axes, parameter[name[ax_cb]]] variable[cb] assign[=] call[name[fig].colorbar, parameter[name[pc]]] variable[no_elecs] assign[=] call[name[kwargs].get, parameter[constant[no_elecs], constant[False]]] if <ast.BoolOp object at 0x7da1b225ef80> begin[:] call[name[ax].scatter, parameter[call[name[self].grid.electrodes][tuple[[<ast.Slice object at 0x7da1b225d510>, <ast.Constant object at 0x7da1b225e980>]]], call[name[self].grid.electrodes][tuple[[<ast.Slice object at 0x7da1b225f730>, <ast.Constant object at 0x7da1b225dc90>]]]]] return[tuple[[<ast.Name object at 0x7da1b225e380>, <ast.Name object at 0x7da1b225d540>, <ast.Name object at 0x7da1b225e020>, <ast.Name object at 0x7da1b225cee0>]]] return[tuple[[<ast.Name object at 0x7da1b225db40>, <ast.Name object at 0x7da1b225e260>, <ast.Name object at 0x7da1b225fca0>]]]
keyword[def] identifier[plot_nodes_pcolor_to_ax] ( identifier[self] , identifier[ax] , identifier[nid] ,** identifier[kwargs] ): literal[string] identifier[fig] = identifier[ax] . identifier[get_figure] () identifier[x] = identifier[self] . identifier[grid] . identifier[nodes] [ literal[string] ][:, literal[int] ] identifier[z] = identifier[self] . identifier[grid] . identifier[nodes] [ literal[string] ][:, literal[int] ] identifier[ax] . identifier[scatter] ( identifier[x] , identifier[z] ) identifier[xz] = identifier[np] . identifier[vstack] (( identifier[x] , identifier[z] )). identifier[T] identifier[X] , identifier[Z] = identifier[np] . identifier[meshgrid] ( identifier[np] . identifier[linspace] ( identifier[x] . identifier[min] (), identifier[x] . identifier[max] (), literal[int] ), identifier[np] . identifier[linspace] ( identifier[z] . identifier[min] (), identifier[z] . identifier[max] (), literal[int] ), ) identifier[values] = identifier[np] . identifier[array] ( identifier[self] . identifier[nodeman] . identifier[nodevals] [ identifier[nid] ]) identifier[cint] = identifier[scipy] . identifier[interpolate] . identifier[griddata] ( identifier[xz] , identifier[values] , ( identifier[X] , identifier[Z] ), identifier[method] = literal[string] , identifier[fill_value] = identifier[np] . identifier[nan] , ) identifier[cint_ma] = identifier[np] . identifier[ma] . identifier[masked_invalid] ( identifier[cint] ) identifier[pc] = identifier[ax] . identifier[pcolormesh] ( identifier[X] , identifier[Z] , identifier[cint_ma] , identifier[cmap] = identifier[kwargs] . identifier[get] ( literal[string] , literal[string] ), identifier[vmin] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[None] ), identifier[vmax] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[None] ), ) keyword[if] identifier[kwargs] . identifier[get] ( literal[string] , keyword[False] ): identifier[divider] = identifier[make_axes_locatable] ( identifier[ax] ) identifier[cbposition] = identifier[kwargs] . identifier[get] ( literal[string] , literal[string] ) keyword[if] identifier[cbposition] == literal[string] : identifier[ax_cb] = identifier[divider] . identifier[new_vertical] ( identifier[size] = literal[int] , identifier[pad] = literal[int] , identifier[pack_start] = keyword[True] ) keyword[elif] identifier[cbposition] == literal[string] : identifier[ax_cb] = identifier[divider] . identifier[new_horizontal] ( identifier[size] = literal[int] , identifier[pad] = literal[int] , ) keyword[else] : keyword[raise] identifier[Exception] ( literal[string] ) identifier[ax] . identifier[get_figure] (). identifier[add_axes] ( identifier[ax_cb] ) identifier[cb] = identifier[fig] . identifier[colorbar] ( identifier[pc] , identifier[cax] = identifier[ax_cb] , identifier[orientation] = identifier[cbposition] , identifier[label] = identifier[kwargs] . identifier[get] ( literal[string] , literal[string] ), identifier[ticks] = identifier[mpl] . identifier[ticker] . identifier[MaxNLocator] ( identifier[kwargs] . identifier[get] ( literal[string] , literal[int] )), identifier[format] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[None] ), identifier[extend] = literal[string] , ) identifier[no_elecs] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[False] ) keyword[if] identifier[self] . identifier[grid] . identifier[electrodes] keyword[is] keyword[not] keyword[None] keyword[and] identifier[no_elecs] keyword[is] keyword[not] keyword[True] : identifier[ax] . identifier[scatter] ( identifier[self] . identifier[grid] . identifier[electrodes] [:, literal[int] ], identifier[self] . identifier[grid] . identifier[electrodes] [:, literal[int] ], identifier[color] = identifier[self] . identifier[grid] . identifier[props] [ literal[string] ], ) keyword[return] identifier[fig] , identifier[ax] , identifier[pc] , identifier[cb] keyword[return] identifier[fig] , identifier[ax] , identifier[pc]
def plot_nodes_pcolor_to_ax(self, ax, nid, **kwargs): """Plot node data to an axes object Parameters ---------- ax : axes object axes to plot to nid : int node id pointing to the respective data set cmap : string, optional color map to use. Default: jet vmin : float, optional Minimum colorbar value vmax : float, optional Maximum colorbar value Returns ------- """ fig = ax.get_figure() x = self.grid.nodes['presort'][:, 1] z = self.grid.nodes['presort'][:, 2] ax.scatter(x, z) xz = np.vstack((x, z)).T # generate grid (X, Z) = np.meshgrid(np.linspace(x.min(), x.max(), 100), np.linspace(z.min(), z.max(), 100)) values = np.array(self.nodeman.nodevals[nid]) # linear # cubic # method='linear', # method='nearest', cint = scipy.interpolate.griddata(xz, values, (X, Z), method='linear', fill_value=np.nan) cint_ma = np.ma.masked_invalid(cint) pc = ax.pcolormesh(X, Z, cint_ma, cmap=kwargs.get('cmap', 'jet'), vmin=kwargs.get('vmin', None), vmax=kwargs.get('vmax', None)) if kwargs.get('plot_colorbar', False): divider = make_axes_locatable(ax) cbposition = kwargs.get('cbposition', 'vertical') if cbposition == 'horizontal': ax_cb = divider.new_vertical(size=0.1, pad=0.4, pack_start=True) # depends on [control=['if'], data=[]] elif cbposition == 'vertical': ax_cb = divider.new_horizontal(size=0.1, pad=0.4) # depends on [control=['if'], data=[]] else: raise Exception('cbposition not recognized') ax.get_figure().add_axes(ax_cb) cb = fig.colorbar(pc, cax=ax_cb, orientation=cbposition, label=kwargs.get('cblabel', ''), ticks=mpl.ticker.MaxNLocator(kwargs.get('cbnrticks', 3)), format=kwargs.get('cbformat', None), extend='both') # depends on [control=['if'], data=[]] no_elecs = kwargs.get('no_elecs', False) if self.grid.electrodes is not None and no_elecs is not True: # clip_on=False, ax.scatter(self.grid.electrodes[:, 1], self.grid.electrodes[:, 2], color=self.grid.props['electrode_color']) return (fig, ax, pc, cb) # depends on [control=['if'], data=[]] return (fig, ax, pc)
def template_exists_db(self, template): """ Receives a template and checks if it exists in the database using the template name and language """ name = utils.camel_to_snake(template[0]).upper() language = utils.camel_to_snake(template[3]) try: models.EmailTemplate.objects.get(name=name, language=language) except models.EmailTemplate.DoesNotExist: return False return True
def function[template_exists_db, parameter[self, template]]: constant[ Receives a template and checks if it exists in the database using the template name and language ] variable[name] assign[=] call[call[name[utils].camel_to_snake, parameter[call[name[template]][constant[0]]]].upper, parameter[]] variable[language] assign[=] call[name[utils].camel_to_snake, parameter[call[name[template]][constant[3]]]] <ast.Try object at 0x7da18dc05f90> return[constant[True]]
keyword[def] identifier[template_exists_db] ( identifier[self] , identifier[template] ): literal[string] identifier[name] = identifier[utils] . identifier[camel_to_snake] ( identifier[template] [ literal[int] ]). identifier[upper] () identifier[language] = identifier[utils] . identifier[camel_to_snake] ( identifier[template] [ literal[int] ]) keyword[try] : identifier[models] . identifier[EmailTemplate] . identifier[objects] . identifier[get] ( identifier[name] = identifier[name] , identifier[language] = identifier[language] ) keyword[except] identifier[models] . identifier[EmailTemplate] . identifier[DoesNotExist] : keyword[return] keyword[False] keyword[return] keyword[True]
def template_exists_db(self, template): """ Receives a template and checks if it exists in the database using the template name and language """ name = utils.camel_to_snake(template[0]).upper() language = utils.camel_to_snake(template[3]) try: models.EmailTemplate.objects.get(name=name, language=language) # depends on [control=['try'], data=[]] except models.EmailTemplate.DoesNotExist: return False # depends on [control=['except'], data=[]] return True
def _set_weight(self, v, load=False): """ Setter method for weight, mapped from YANG variable /routing_system/route_map/content/set/weight (container) If this variable is read-only (config: false) in the source YANG file, then _set_weight is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_weight() directly. YANG Description: BGP weight for routing table """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=weight.weight, is_container='container', presence=False, yang_name="weight", rest_name="weight", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'BGP weight for routing table', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """weight must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=weight.weight, is_container='container', presence=False, yang_name="weight", rest_name="weight", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'BGP weight for routing table', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='container', is_config=True)""", }) self.__weight = t if hasattr(self, '_set'): self._set()
def function[_set_weight, parameter[self, v, load]]: constant[ Setter method for weight, mapped from YANG variable /routing_system/route_map/content/set/weight (container) If this variable is read-only (config: false) in the source YANG file, then _set_weight is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_weight() directly. YANG Description: BGP weight for routing table ] if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:] variable[v] assign[=] call[name[v]._utype, parameter[name[v]]] <ast.Try object at 0x7da1b25965f0> name[self].__weight assign[=] name[t] if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:] call[name[self]._set, parameter[]]
keyword[def] identifier[_set_weight] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ): literal[string] keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ): identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] ) keyword[try] : identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[weight] . identifier[weight] , identifier[is_container] = literal[string] , identifier[presence] = keyword[False] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : keyword[None] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] ) keyword[except] ( identifier[TypeError] , identifier[ValueError] ): keyword[raise] identifier[ValueError] ({ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , }) identifier[self] . identifier[__weight] = identifier[t] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ): identifier[self] . identifier[_set] ()
def _set_weight(self, v, load=False): """ Setter method for weight, mapped from YANG variable /routing_system/route_map/content/set/weight (container) If this variable is read-only (config: false) in the source YANG file, then _set_weight is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_weight() directly. YANG Description: BGP weight for routing table """ if hasattr(v, '_utype'): v = v._utype(v) # depends on [control=['if'], data=[]] try: t = YANGDynClass(v, base=weight.weight, is_container='container', presence=False, yang_name='weight', rest_name='weight', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'BGP weight for routing table', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='container', is_config=True) # depends on [control=['try'], data=[]] except (TypeError, ValueError): raise ValueError({'error-string': 'weight must be of a type compatible with container', 'defined-type': 'container', 'generated-type': 'YANGDynClass(base=weight.weight, is_container=\'container\', presence=False, yang_name="weight", rest_name="weight", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'BGP weight for routing table\', u\'cli-full-no\': None}}, namespace=\'urn:brocade.com:mgmt:brocade-ip-policy\', defining_module=\'brocade-ip-policy\', yang_type=\'container\', is_config=True)'}) # depends on [control=['except'], data=[]] self.__weight = t if hasattr(self, '_set'): self._set() # depends on [control=['if'], data=[]]
def seek(self, offset, whence=os.SEEK_SET): """Seeks to an offset within the file-like object. Args: offset (int): offset to seek to. whence (Optional(int)): value that indicates whether offset is an absolute or relative position within the file. Raises: IOError: if the seek failed. OSError: if the seek failed. """ if not self._is_open: raise IOError('Not opened.') # For a yet unknown reason a Python file-like object on Windows allows for # invalid whence values to be passed to the seek function. This check # makes sure the behavior of the function is the same on all platforms. if whence not in [os.SEEK_SET, os.SEEK_CUR, os.SEEK_END]: raise IOError('Unsupported whence.') self._file_object.seek(offset, whence)
def function[seek, parameter[self, offset, whence]]: constant[Seeks to an offset within the file-like object. Args: offset (int): offset to seek to. whence (Optional(int)): value that indicates whether offset is an absolute or relative position within the file. Raises: IOError: if the seek failed. OSError: if the seek failed. ] if <ast.UnaryOp object at 0x7da1b07a9780> begin[:] <ast.Raise object at 0x7da1b07abc10> if compare[name[whence] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Attribute object at 0x7da1b07aba60>, <ast.Attribute object at 0x7da1b07abe50>, <ast.Attribute object at 0x7da1b07a87c0>]]] begin[:] <ast.Raise object at 0x7da1b07a8190> call[name[self]._file_object.seek, parameter[name[offset], name[whence]]]
keyword[def] identifier[seek] ( identifier[self] , identifier[offset] , identifier[whence] = identifier[os] . identifier[SEEK_SET] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[_is_open] : keyword[raise] identifier[IOError] ( literal[string] ) keyword[if] identifier[whence] keyword[not] keyword[in] [ identifier[os] . identifier[SEEK_SET] , identifier[os] . identifier[SEEK_CUR] , identifier[os] . identifier[SEEK_END] ]: keyword[raise] identifier[IOError] ( literal[string] ) identifier[self] . identifier[_file_object] . identifier[seek] ( identifier[offset] , identifier[whence] )
def seek(self, offset, whence=os.SEEK_SET): """Seeks to an offset within the file-like object. Args: offset (int): offset to seek to. whence (Optional(int)): value that indicates whether offset is an absolute or relative position within the file. Raises: IOError: if the seek failed. OSError: if the seek failed. """ if not self._is_open: raise IOError('Not opened.') # depends on [control=['if'], data=[]] # For a yet unknown reason a Python file-like object on Windows allows for # invalid whence values to be passed to the seek function. This check # makes sure the behavior of the function is the same on all platforms. if whence not in [os.SEEK_SET, os.SEEK_CUR, os.SEEK_END]: raise IOError('Unsupported whence.') # depends on [control=['if'], data=[]] self._file_object.seek(offset, whence)
def _add_call_site(self, call_site_addr, call_target_addr, retn_addr): """ Registers a basic block as calling a function and returning somewhere. :param call_site_addr: The address of a basic block that ends in a call. :param call_target_addr: The address of the target of said call. :param retn_addr: The address that said call will return to. """ self._call_sites[call_site_addr] = (call_target_addr, retn_addr)
def function[_add_call_site, parameter[self, call_site_addr, call_target_addr, retn_addr]]: constant[ Registers a basic block as calling a function and returning somewhere. :param call_site_addr: The address of a basic block that ends in a call. :param call_target_addr: The address of the target of said call. :param retn_addr: The address that said call will return to. ] call[name[self]._call_sites][name[call_site_addr]] assign[=] tuple[[<ast.Name object at 0x7da207f03be0>, <ast.Name object at 0x7da207f01420>]]
keyword[def] identifier[_add_call_site] ( identifier[self] , identifier[call_site_addr] , identifier[call_target_addr] , identifier[retn_addr] ): literal[string] identifier[self] . identifier[_call_sites] [ identifier[call_site_addr] ]=( identifier[call_target_addr] , identifier[retn_addr] )
def _add_call_site(self, call_site_addr, call_target_addr, retn_addr): """ Registers a basic block as calling a function and returning somewhere. :param call_site_addr: The address of a basic block that ends in a call. :param call_target_addr: The address of the target of said call. :param retn_addr: The address that said call will return to. """ self._call_sites[call_site_addr] = (call_target_addr, retn_addr)
def check_password_readable(self, section, fields): """Check if there is a readable configuration file and print a warning.""" if not fields: return # The information which of the configuration files # included which option is not available. To avoid false positives, # a warning is only printed if exactly one file has been read. if len(self.read_ok) != 1: return fn = self.read_ok[0] if fileutil.is_accessable_by_others(fn): log.warn(LOG_CHECK, "The configuration file %s contains password information (in section [%s] and options %s) and the file is readable by others. Please make the file only readable by you.", fn, section, fields) if os.name == 'posix': log.warn(LOG_CHECK, _("For example execute 'chmod go-rw %s'.") % fn) elif os.name == 'nt': log.warn(LOG_CHECK, _("See http://support.microsoft.com/kb/308419 for more info on setting file permissions."))
def function[check_password_readable, parameter[self, section, fields]]: constant[Check if there is a readable configuration file and print a warning.] if <ast.UnaryOp object at 0x7da20e954f40> begin[:] return[None] if compare[call[name[len], parameter[name[self].read_ok]] not_equal[!=] constant[1]] begin[:] return[None] variable[fn] assign[=] call[name[self].read_ok][constant[0]] if call[name[fileutil].is_accessable_by_others, parameter[name[fn]]] begin[:] call[name[log].warn, parameter[name[LOG_CHECK], constant[The configuration file %s contains password information (in section [%s] and options %s) and the file is readable by others. Please make the file only readable by you.], name[fn], name[section], name[fields]]] if compare[name[os].name equal[==] constant[posix]] begin[:] call[name[log].warn, parameter[name[LOG_CHECK], binary_operation[call[name[_], parameter[constant[For example execute 'chmod go-rw %s'.]]] <ast.Mod object at 0x7da2590d6920> name[fn]]]]
keyword[def] identifier[check_password_readable] ( identifier[self] , identifier[section] , identifier[fields] ): literal[string] keyword[if] keyword[not] identifier[fields] : keyword[return] keyword[if] identifier[len] ( identifier[self] . identifier[read_ok] )!= literal[int] : keyword[return] identifier[fn] = identifier[self] . identifier[read_ok] [ literal[int] ] keyword[if] identifier[fileutil] . identifier[is_accessable_by_others] ( identifier[fn] ): identifier[log] . identifier[warn] ( identifier[LOG_CHECK] , literal[string] , identifier[fn] , identifier[section] , identifier[fields] ) keyword[if] identifier[os] . identifier[name] == literal[string] : identifier[log] . identifier[warn] ( identifier[LOG_CHECK] , identifier[_] ( literal[string] )% identifier[fn] ) keyword[elif] identifier[os] . identifier[name] == literal[string] : identifier[log] . identifier[warn] ( identifier[LOG_CHECK] , identifier[_] ( literal[string] ))
def check_password_readable(self, section, fields): """Check if there is a readable configuration file and print a warning.""" if not fields: return # depends on [control=['if'], data=[]] # The information which of the configuration files # included which option is not available. To avoid false positives, # a warning is only printed if exactly one file has been read. if len(self.read_ok) != 1: return # depends on [control=['if'], data=[]] fn = self.read_ok[0] if fileutil.is_accessable_by_others(fn): log.warn(LOG_CHECK, 'The configuration file %s contains password information (in section [%s] and options %s) and the file is readable by others. Please make the file only readable by you.', fn, section, fields) if os.name == 'posix': log.warn(LOG_CHECK, _("For example execute 'chmod go-rw %s'.") % fn) # depends on [control=['if'], data=[]] elif os.name == 'nt': log.warn(LOG_CHECK, _('See http://support.microsoft.com/kb/308419 for more info on setting file permissions.')) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
def get_master_url(request, image_id): ''' get image's master url ... :param request: http GET request /renderer/master/url/<image_id>/ :param image_id: the master image primary key :return: master url in a json dictionary ''' im = get_object_or_404(MasterImage, pk=image_id) return JsonResponse({'url': im.get_master_url()})
def function[get_master_url, parameter[request, image_id]]: constant[ get image's master url ... :param request: http GET request /renderer/master/url/<image_id>/ :param image_id: the master image primary key :return: master url in a json dictionary ] variable[im] assign[=] call[name[get_object_or_404], parameter[name[MasterImage]]] return[call[name[JsonResponse], parameter[dictionary[[<ast.Constant object at 0x7da1b179c7c0>], [<ast.Call object at 0x7da1b179cb50>]]]]]
keyword[def] identifier[get_master_url] ( identifier[request] , identifier[image_id] ): literal[string] identifier[im] = identifier[get_object_or_404] ( identifier[MasterImage] , identifier[pk] = identifier[image_id] ) keyword[return] identifier[JsonResponse] ({ literal[string] : identifier[im] . identifier[get_master_url] ()})
def get_master_url(request, image_id): """ get image's master url ... :param request: http GET request /renderer/master/url/<image_id>/ :param image_id: the master image primary key :return: master url in a json dictionary """ im = get_object_or_404(MasterImage, pk=image_id) return JsonResponse({'url': im.get_master_url()})
def calc_search_range(url, match_type, surt_ordered=True, url_canon=None): """ Canonicalize a url (either with custom canonicalizer or standard canonicalizer with or without surt) Then, compute a start and end search url search range for a given match type. Support match types: * exact * prefix * host * domain (only available when for surt ordering) Examples below: # surt ranges >>> calc_search_range('http://example.com/path/file.html', 'exact') ('com,example)/path/file.html', 'com,example)/path/file.html!') >>> calc_search_range('http://example.com/path/file.html', 'prefix') ('com,example)/path/file.html', 'com,example)/path/file.htmm') # slash and ? >>> calc_search_range('http://example.com/path/', 'prefix') ('com,example)/path/', 'com,example)/path0') >>> calc_search_range('http://example.com/path?', 'prefix') ('com,example)/path?', 'com,example)/path@') >>> calc_search_range('http://example.com/path/?', 'prefix') ('com,example)/path?', 'com,example)/path@') >>> calc_search_range('http://example.com/path/file.html', 'host') ('com,example)/', 'com,example*') >>> calc_search_range('http://example.com/path/file.html', 'domain') ('com,example)/', 'com,example-') special case for tld domain range >>> calc_search_range('com', 'domain') ('com,', 'com-') # non-surt ranges >>> calc_search_range('http://example.com/path/file.html', 'exact', False) ('example.com/path/file.html', 'example.com/path/file.html!') >>> calc_search_range('http://example.com/path/file.html', 'prefix', False) ('example.com/path/file.html', 'example.com/path/file.htmm') >>> calc_search_range('http://example.com/path/file.html', 'host', False) ('example.com/', 'example.com0') # errors: domain range not supported >>> calc_search_range('http://example.com/path/file.html', 'domain', False) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): UrlCanonicalizeException: matchType=domain unsupported for non-surt >>> calc_search_range('http://example.com/path/file.html', 'blah', False) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): UrlCanonicalizeException: Invalid match_type: blah """ def inc_last_char(x): return x[0:-1] + chr(ord(x[-1]) + 1) if not url_canon: # make new canon url_canon = UrlCanonicalizer(surt_ordered) else: # ensure surt order matches url_canon surt_ordered = url_canon.surt_ordered start_key = url_canon(url) if match_type == 'exact': end_key = start_key + '!' elif match_type == 'prefix': # add trailing slash if url has it if url.endswith('/') and not start_key.endswith('/'): start_key += '/' if url.endswith('?') and not start_key.endswith('?'): start_key += '?' end_key = inc_last_char(start_key) elif match_type == 'host': if surt_ordered: host = start_key.split(')/')[0] start_key = host + ')/' end_key = host + '*' else: host = urlparse.urlsplit(url).netloc start_key = host + '/' end_key = host + '0' elif match_type == 'domain': if not surt_ordered: msg = 'matchType=domain unsupported for non-surt' raise UrlCanonicalizeException(msg) host = start_key.split(')/')[0] # if tld, use com, as start_key # otherwise, stick with com,example)/ if ',' not in host: start_key = host + ',' else: start_key = host + ')/' end_key = host + '-' else: raise UrlCanonicalizeException('Invalid match_type: ' + match_type) return (start_key, end_key)
def function[calc_search_range, parameter[url, match_type, surt_ordered, url_canon]]: constant[ Canonicalize a url (either with custom canonicalizer or standard canonicalizer with or without surt) Then, compute a start and end search url search range for a given match type. Support match types: * exact * prefix * host * domain (only available when for surt ordering) Examples below: # surt ranges >>> calc_search_range('http://example.com/path/file.html', 'exact') ('com,example)/path/file.html', 'com,example)/path/file.html!') >>> calc_search_range('http://example.com/path/file.html', 'prefix') ('com,example)/path/file.html', 'com,example)/path/file.htmm') # slash and ? >>> calc_search_range('http://example.com/path/', 'prefix') ('com,example)/path/', 'com,example)/path0') >>> calc_search_range('http://example.com/path?', 'prefix') ('com,example)/path?', 'com,example)/path@') >>> calc_search_range('http://example.com/path/?', 'prefix') ('com,example)/path?', 'com,example)/path@') >>> calc_search_range('http://example.com/path/file.html', 'host') ('com,example)/', 'com,example*') >>> calc_search_range('http://example.com/path/file.html', 'domain') ('com,example)/', 'com,example-') special case for tld domain range >>> calc_search_range('com', 'domain') ('com,', 'com-') # non-surt ranges >>> calc_search_range('http://example.com/path/file.html', 'exact', False) ('example.com/path/file.html', 'example.com/path/file.html!') >>> calc_search_range('http://example.com/path/file.html', 'prefix', False) ('example.com/path/file.html', 'example.com/path/file.htmm') >>> calc_search_range('http://example.com/path/file.html', 'host', False) ('example.com/', 'example.com0') # errors: domain range not supported >>> calc_search_range('http://example.com/path/file.html', 'domain', False) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): UrlCanonicalizeException: matchType=domain unsupported for non-surt >>> calc_search_range('http://example.com/path/file.html', 'blah', False) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): UrlCanonicalizeException: Invalid match_type: blah ] def function[inc_last_char, parameter[x]]: return[binary_operation[call[name[x]][<ast.Slice object at 0x7da204565ff0>] + call[name[chr], parameter[binary_operation[call[name[ord], parameter[call[name[x]][<ast.UnaryOp object at 0x7da2045651e0>]]] + constant[1]]]]]] if <ast.UnaryOp object at 0x7da204567250> begin[:] variable[url_canon] assign[=] call[name[UrlCanonicalizer], parameter[name[surt_ordered]]] variable[start_key] assign[=] call[name[url_canon], parameter[name[url]]] if compare[name[match_type] equal[==] constant[exact]] begin[:] variable[end_key] assign[=] binary_operation[name[start_key] + constant[!]] return[tuple[[<ast.Name object at 0x7da204963130>, <ast.Name object at 0x7da204960ee0>]]]
keyword[def] identifier[calc_search_range] ( identifier[url] , identifier[match_type] , identifier[surt_ordered] = keyword[True] , identifier[url_canon] = keyword[None] ): literal[string] keyword[def] identifier[inc_last_char] ( identifier[x] ): keyword[return] identifier[x] [ literal[int] :- literal[int] ]+ identifier[chr] ( identifier[ord] ( identifier[x] [- literal[int] ])+ literal[int] ) keyword[if] keyword[not] identifier[url_canon] : identifier[url_canon] = identifier[UrlCanonicalizer] ( identifier[surt_ordered] ) keyword[else] : identifier[surt_ordered] = identifier[url_canon] . identifier[surt_ordered] identifier[start_key] = identifier[url_canon] ( identifier[url] ) keyword[if] identifier[match_type] == literal[string] : identifier[end_key] = identifier[start_key] + literal[string] keyword[elif] identifier[match_type] == literal[string] : keyword[if] identifier[url] . identifier[endswith] ( literal[string] ) keyword[and] keyword[not] identifier[start_key] . identifier[endswith] ( literal[string] ): identifier[start_key] += literal[string] keyword[if] identifier[url] . identifier[endswith] ( literal[string] ) keyword[and] keyword[not] identifier[start_key] . identifier[endswith] ( literal[string] ): identifier[start_key] += literal[string] identifier[end_key] = identifier[inc_last_char] ( identifier[start_key] ) keyword[elif] identifier[match_type] == literal[string] : keyword[if] identifier[surt_ordered] : identifier[host] = identifier[start_key] . identifier[split] ( literal[string] )[ literal[int] ] identifier[start_key] = identifier[host] + literal[string] identifier[end_key] = identifier[host] + literal[string] keyword[else] : identifier[host] = identifier[urlparse] . identifier[urlsplit] ( identifier[url] ). identifier[netloc] identifier[start_key] = identifier[host] + literal[string] identifier[end_key] = identifier[host] + literal[string] keyword[elif] identifier[match_type] == literal[string] : keyword[if] keyword[not] identifier[surt_ordered] : identifier[msg] = literal[string] keyword[raise] identifier[UrlCanonicalizeException] ( identifier[msg] ) identifier[host] = identifier[start_key] . identifier[split] ( literal[string] )[ literal[int] ] keyword[if] literal[string] keyword[not] keyword[in] identifier[host] : identifier[start_key] = identifier[host] + literal[string] keyword[else] : identifier[start_key] = identifier[host] + literal[string] identifier[end_key] = identifier[host] + literal[string] keyword[else] : keyword[raise] identifier[UrlCanonicalizeException] ( literal[string] + identifier[match_type] ) keyword[return] ( identifier[start_key] , identifier[end_key] )
def calc_search_range(url, match_type, surt_ordered=True, url_canon=None): """ Canonicalize a url (either with custom canonicalizer or standard canonicalizer with or without surt) Then, compute a start and end search url search range for a given match type. Support match types: * exact * prefix * host * domain (only available when for surt ordering) Examples below: # surt ranges >>> calc_search_range('http://example.com/path/file.html', 'exact') ('com,example)/path/file.html', 'com,example)/path/file.html!') >>> calc_search_range('http://example.com/path/file.html', 'prefix') ('com,example)/path/file.html', 'com,example)/path/file.htmm') # slash and ? >>> calc_search_range('http://example.com/path/', 'prefix') ('com,example)/path/', 'com,example)/path0') >>> calc_search_range('http://example.com/path?', 'prefix') ('com,example)/path?', 'com,example)/path@') >>> calc_search_range('http://example.com/path/?', 'prefix') ('com,example)/path?', 'com,example)/path@') >>> calc_search_range('http://example.com/path/file.html', 'host') ('com,example)/', 'com,example*') >>> calc_search_range('http://example.com/path/file.html', 'domain') ('com,example)/', 'com,example-') special case for tld domain range >>> calc_search_range('com', 'domain') ('com,', 'com-') # non-surt ranges >>> calc_search_range('http://example.com/path/file.html', 'exact', False) ('example.com/path/file.html', 'example.com/path/file.html!') >>> calc_search_range('http://example.com/path/file.html', 'prefix', False) ('example.com/path/file.html', 'example.com/path/file.htmm') >>> calc_search_range('http://example.com/path/file.html', 'host', False) ('example.com/', 'example.com0') # errors: domain range not supported >>> calc_search_range('http://example.com/path/file.html', 'domain', False) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): UrlCanonicalizeException: matchType=domain unsupported for non-surt >>> calc_search_range('http://example.com/path/file.html', 'blah', False) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): UrlCanonicalizeException: Invalid match_type: blah """ def inc_last_char(x): return x[0:-1] + chr(ord(x[-1]) + 1) if not url_canon: # make new canon url_canon = UrlCanonicalizer(surt_ordered) # depends on [control=['if'], data=[]] else: # ensure surt order matches url_canon surt_ordered = url_canon.surt_ordered start_key = url_canon(url) if match_type == 'exact': end_key = start_key + '!' # depends on [control=['if'], data=[]] elif match_type == 'prefix': # add trailing slash if url has it if url.endswith('/') and (not start_key.endswith('/')): start_key += '/' # depends on [control=['if'], data=[]] if url.endswith('?') and (not start_key.endswith('?')): start_key += '?' # depends on [control=['if'], data=[]] end_key = inc_last_char(start_key) # depends on [control=['if'], data=[]] elif match_type == 'host': if surt_ordered: host = start_key.split(')/')[0] start_key = host + ')/' end_key = host + '*' # depends on [control=['if'], data=[]] else: host = urlparse.urlsplit(url).netloc start_key = host + '/' end_key = host + '0' # depends on [control=['if'], data=[]] elif match_type == 'domain': if not surt_ordered: msg = 'matchType=domain unsupported for non-surt' raise UrlCanonicalizeException(msg) # depends on [control=['if'], data=[]] host = start_key.split(')/')[0] # if tld, use com, as start_key # otherwise, stick with com,example)/ if ',' not in host: start_key = host + ',' # depends on [control=['if'], data=['host']] else: start_key = host + ')/' end_key = host + '-' # depends on [control=['if'], data=[]] else: raise UrlCanonicalizeException('Invalid match_type: ' + match_type) return (start_key, end_key)
def from_payload(self, payload): """Init frame from binary data.""" self._software_version = payload[0:6] self.hardware_version = payload[6] self.product_group = payload[7] self.product_type = payload[8]
def function[from_payload, parameter[self, payload]]: constant[Init frame from binary data.] name[self]._software_version assign[=] call[name[payload]][<ast.Slice object at 0x7da20c6c64d0>] name[self].hardware_version assign[=] call[name[payload]][constant[6]] name[self].product_group assign[=] call[name[payload]][constant[7]] name[self].product_type assign[=] call[name[payload]][constant[8]]
keyword[def] identifier[from_payload] ( identifier[self] , identifier[payload] ): literal[string] identifier[self] . identifier[_software_version] = identifier[payload] [ literal[int] : literal[int] ] identifier[self] . identifier[hardware_version] = identifier[payload] [ literal[int] ] identifier[self] . identifier[product_group] = identifier[payload] [ literal[int] ] identifier[self] . identifier[product_type] = identifier[payload] [ literal[int] ]
def from_payload(self, payload): """Init frame from binary data.""" self._software_version = payload[0:6] self.hardware_version = payload[6] self.product_group = payload[7] self.product_type = payload[8]
def activities(self, *args, **kwargs): """Retrieve activities belonging to this scope. See :class:`pykechain.Client.activities` for available parameters. """ if self._client.match_app_version(label='wim', version='<2.0.0', default=True): return self._client.activities(*args, scope=self.id, **kwargs) else: return self._client.activities(*args, scope_id=self.id, **kwargs)
def function[activities, parameter[self]]: constant[Retrieve activities belonging to this scope. See :class:`pykechain.Client.activities` for available parameters. ] if call[name[self]._client.match_app_version, parameter[]] begin[:] return[call[name[self]._client.activities, parameter[<ast.Starred object at 0x7da1b24e5420>]]]
keyword[def] identifier[activities] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ): literal[string] keyword[if] identifier[self] . identifier[_client] . identifier[match_app_version] ( identifier[label] = literal[string] , identifier[version] = literal[string] , identifier[default] = keyword[True] ): keyword[return] identifier[self] . identifier[_client] . identifier[activities] (* identifier[args] , identifier[scope] = identifier[self] . identifier[id] ,** identifier[kwargs] ) keyword[else] : keyword[return] identifier[self] . identifier[_client] . identifier[activities] (* identifier[args] , identifier[scope_id] = identifier[self] . identifier[id] ,** identifier[kwargs] )
def activities(self, *args, **kwargs): """Retrieve activities belonging to this scope. See :class:`pykechain.Client.activities` for available parameters. """ if self._client.match_app_version(label='wim', version='<2.0.0', default=True): return self._client.activities(*args, scope=self.id, **kwargs) # depends on [control=['if'], data=[]] else: return self._client.activities(*args, scope_id=self.id, **kwargs)
def setExtension(self, ext): """ Set a new file extension for the sequence. Note: A leading period will be added if none is provided. Args: ext (str): the new file extension """ if ext[0] != ".": ext = "." + ext self._ext = utils.asString(ext)
def function[setExtension, parameter[self, ext]]: constant[ Set a new file extension for the sequence. Note: A leading period will be added if none is provided. Args: ext (str): the new file extension ] if compare[call[name[ext]][constant[0]] not_equal[!=] constant[.]] begin[:] variable[ext] assign[=] binary_operation[constant[.] + name[ext]] name[self]._ext assign[=] call[name[utils].asString, parameter[name[ext]]]
keyword[def] identifier[setExtension] ( identifier[self] , identifier[ext] ): literal[string] keyword[if] identifier[ext] [ literal[int] ]!= literal[string] : identifier[ext] = literal[string] + identifier[ext] identifier[self] . identifier[_ext] = identifier[utils] . identifier[asString] ( identifier[ext] )
def setExtension(self, ext): """ Set a new file extension for the sequence. Note: A leading period will be added if none is provided. Args: ext (str): the new file extension """ if ext[0] != '.': ext = '.' + ext # depends on [control=['if'], data=[]] self._ext = utils.asString(ext)
def _getitem(self, key, validate=False): """Return specified page from cache or file.""" key = int(key) pages = self.pages if key < 0: key %= len(self) elif self._indexed and key >= len(pages): raise IndexError('index out of range') if key < len(pages): page = pages[key] if self._cache: if not isinstance(page, inttypes): if validate and validate != page.hash: raise RuntimeError('page hash mismatch') return page elif isinstance(page, (TiffPage, self._tiffpage)): if validate and validate != page.hash: raise RuntimeError('page hash mismatch') return page self._seek(key) page = self._tiffpage(self.parent, index=key, keyframe=self._keyframe) if validate and validate != page.hash: raise RuntimeError('page hash mismatch') if self._cache: pages[key] = page return page
def function[_getitem, parameter[self, key, validate]]: constant[Return specified page from cache or file.] variable[key] assign[=] call[name[int], parameter[name[key]]] variable[pages] assign[=] name[self].pages if compare[name[key] less[<] constant[0]] begin[:] <ast.AugAssign object at 0x7da1b189f160> if compare[name[key] less[<] call[name[len], parameter[name[pages]]]] begin[:] variable[page] assign[=] call[name[pages]][name[key]] if name[self]._cache begin[:] if <ast.UnaryOp object at 0x7da1b189f940> begin[:] if <ast.BoolOp object at 0x7da1b189ebc0> begin[:] <ast.Raise object at 0x7da1b189eb00> return[name[page]] call[name[self]._seek, parameter[name[key]]] variable[page] assign[=] call[name[self]._tiffpage, parameter[name[self].parent]] if <ast.BoolOp object at 0x7da1b189f6d0> begin[:] <ast.Raise object at 0x7da1b189dae0> if name[self]._cache begin[:] call[name[pages]][name[key]] assign[=] name[page] return[name[page]]
keyword[def] identifier[_getitem] ( identifier[self] , identifier[key] , identifier[validate] = keyword[False] ): literal[string] identifier[key] = identifier[int] ( identifier[key] ) identifier[pages] = identifier[self] . identifier[pages] keyword[if] identifier[key] < literal[int] : identifier[key] %= identifier[len] ( identifier[self] ) keyword[elif] identifier[self] . identifier[_indexed] keyword[and] identifier[key] >= identifier[len] ( identifier[pages] ): keyword[raise] identifier[IndexError] ( literal[string] ) keyword[if] identifier[key] < identifier[len] ( identifier[pages] ): identifier[page] = identifier[pages] [ identifier[key] ] keyword[if] identifier[self] . identifier[_cache] : keyword[if] keyword[not] identifier[isinstance] ( identifier[page] , identifier[inttypes] ): keyword[if] identifier[validate] keyword[and] identifier[validate] != identifier[page] . identifier[hash] : keyword[raise] identifier[RuntimeError] ( literal[string] ) keyword[return] identifier[page] keyword[elif] identifier[isinstance] ( identifier[page] ,( identifier[TiffPage] , identifier[self] . identifier[_tiffpage] )): keyword[if] identifier[validate] keyword[and] identifier[validate] != identifier[page] . identifier[hash] : keyword[raise] identifier[RuntimeError] ( literal[string] ) keyword[return] identifier[page] identifier[self] . identifier[_seek] ( identifier[key] ) identifier[page] = identifier[self] . identifier[_tiffpage] ( identifier[self] . identifier[parent] , identifier[index] = identifier[key] , identifier[keyframe] = identifier[self] . identifier[_keyframe] ) keyword[if] identifier[validate] keyword[and] identifier[validate] != identifier[page] . identifier[hash] : keyword[raise] identifier[RuntimeError] ( literal[string] ) keyword[if] identifier[self] . identifier[_cache] : identifier[pages] [ identifier[key] ]= identifier[page] keyword[return] identifier[page]
def _getitem(self, key, validate=False): """Return specified page from cache or file.""" key = int(key) pages = self.pages if key < 0: key %= len(self) # depends on [control=['if'], data=['key']] elif self._indexed and key >= len(pages): raise IndexError('index out of range') # depends on [control=['if'], data=[]] if key < len(pages): page = pages[key] if self._cache: if not isinstance(page, inttypes): if validate and validate != page.hash: raise RuntimeError('page hash mismatch') # depends on [control=['if'], data=[]] return page # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif isinstance(page, (TiffPage, self._tiffpage)): if validate and validate != page.hash: raise RuntimeError('page hash mismatch') # depends on [control=['if'], data=[]] return page # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['key']] self._seek(key) page = self._tiffpage(self.parent, index=key, keyframe=self._keyframe) if validate and validate != page.hash: raise RuntimeError('page hash mismatch') # depends on [control=['if'], data=[]] if self._cache: pages[key] = page # depends on [control=['if'], data=[]] return page
def order_param(dts, axis=2): """Order parameter of phase synchronization""" return np.abs(np.exp(1.0j * dts).mean(axis=axis))
def function[order_param, parameter[dts, axis]]: constant[Order parameter of phase synchronization] return[call[name[np].abs, parameter[call[call[name[np].exp, parameter[binary_operation[constant[1j] * name[dts]]]].mean, parameter[]]]]]
keyword[def] identifier[order_param] ( identifier[dts] , identifier[axis] = literal[int] ): literal[string] keyword[return] identifier[np] . identifier[abs] ( identifier[np] . identifier[exp] ( literal[int] * identifier[dts] ). identifier[mean] ( identifier[axis] = identifier[axis] ))
def order_param(dts, axis=2): """Order parameter of phase synchronization""" return np.abs(np.exp(1j * dts).mean(axis=axis))
def _add_record(self, record_set_class, name, values, ttl=60, weight=None, region=None,set_identifier=None, alias_hosted_zone_id=None, alias_dns_name=None): """ Convenience method for creating ResourceRecordSets. Most of the calls are basically the same, this saves on repetition. :rtype: tuple :returns: A tuple in the form of ``(rrset, change_info)``, where ``rrset`` is the newly created ResourceRecordSet sub-class instance. """ self._halt_if_already_deleted() rrset_kwargs = dict( connection=self.connection, zone_id=self.id, name=name, ttl=ttl, records=values, weight=weight, region=region, set_identifier=set_identifier, ) if alias_hosted_zone_id or alias_dns_name: rrset_kwargs.update(dict( alias_hosted_zone_id=alias_hosted_zone_id, alias_dns_name=alias_dns_name )) rrset = record_set_class(**rrset_kwargs) cset = ChangeSet(connection=self.connection, hosted_zone_id=self.id) cset.add_change('CREATE', rrset) change_info = self.connection._change_resource_record_sets(cset) return rrset, change_info
def function[_add_record, parameter[self, record_set_class, name, values, ttl, weight, region, set_identifier, alias_hosted_zone_id, alias_dns_name]]: constant[ Convenience method for creating ResourceRecordSets. Most of the calls are basically the same, this saves on repetition. :rtype: tuple :returns: A tuple in the form of ``(rrset, change_info)``, where ``rrset`` is the newly created ResourceRecordSet sub-class instance. ] call[name[self]._halt_if_already_deleted, parameter[]] variable[rrset_kwargs] assign[=] call[name[dict], parameter[]] if <ast.BoolOp object at 0x7da1b0370700> begin[:] call[name[rrset_kwargs].update, parameter[call[name[dict], parameter[]]]] variable[rrset] assign[=] call[name[record_set_class], parameter[]] variable[cset] assign[=] call[name[ChangeSet], parameter[]] call[name[cset].add_change, parameter[constant[CREATE], name[rrset]]] variable[change_info] assign[=] call[name[self].connection._change_resource_record_sets, parameter[name[cset]]] return[tuple[[<ast.Name object at 0x7da1b03716f0>, <ast.Name object at 0x7da1b0373220>]]]
keyword[def] identifier[_add_record] ( identifier[self] , identifier[record_set_class] , identifier[name] , identifier[values] , identifier[ttl] = literal[int] , identifier[weight] = keyword[None] , identifier[region] = keyword[None] , identifier[set_identifier] = keyword[None] , identifier[alias_hosted_zone_id] = keyword[None] , identifier[alias_dns_name] = keyword[None] ): literal[string] identifier[self] . identifier[_halt_if_already_deleted] () identifier[rrset_kwargs] = identifier[dict] ( identifier[connection] = identifier[self] . identifier[connection] , identifier[zone_id] = identifier[self] . identifier[id] , identifier[name] = identifier[name] , identifier[ttl] = identifier[ttl] , identifier[records] = identifier[values] , identifier[weight] = identifier[weight] , identifier[region] = identifier[region] , identifier[set_identifier] = identifier[set_identifier] , ) keyword[if] identifier[alias_hosted_zone_id] keyword[or] identifier[alias_dns_name] : identifier[rrset_kwargs] . identifier[update] ( identifier[dict] ( identifier[alias_hosted_zone_id] = identifier[alias_hosted_zone_id] , identifier[alias_dns_name] = identifier[alias_dns_name] )) identifier[rrset] = identifier[record_set_class] (** identifier[rrset_kwargs] ) identifier[cset] = identifier[ChangeSet] ( identifier[connection] = identifier[self] . identifier[connection] , identifier[hosted_zone_id] = identifier[self] . identifier[id] ) identifier[cset] . identifier[add_change] ( literal[string] , identifier[rrset] ) identifier[change_info] = identifier[self] . identifier[connection] . identifier[_change_resource_record_sets] ( identifier[cset] ) keyword[return] identifier[rrset] , identifier[change_info]
def _add_record(self, record_set_class, name, values, ttl=60, weight=None, region=None, set_identifier=None, alias_hosted_zone_id=None, alias_dns_name=None): """ Convenience method for creating ResourceRecordSets. Most of the calls are basically the same, this saves on repetition. :rtype: tuple :returns: A tuple in the form of ``(rrset, change_info)``, where ``rrset`` is the newly created ResourceRecordSet sub-class instance. """ self._halt_if_already_deleted() rrset_kwargs = dict(connection=self.connection, zone_id=self.id, name=name, ttl=ttl, records=values, weight=weight, region=region, set_identifier=set_identifier) if alias_hosted_zone_id or alias_dns_name: rrset_kwargs.update(dict(alias_hosted_zone_id=alias_hosted_zone_id, alias_dns_name=alias_dns_name)) # depends on [control=['if'], data=[]] rrset = record_set_class(**rrset_kwargs) cset = ChangeSet(connection=self.connection, hosted_zone_id=self.id) cset.add_change('CREATE', rrset) change_info = self.connection._change_resource_record_sets(cset) return (rrset, change_info)
def get_common_cbc_transforms(requested_params, variable_args, valid_params=None): """Determines if any additional parameters from the InferenceFile are needed to get derived parameters that user has asked for. First it will try to add any base parameters that are required to calculate the derived parameters. Then it will add any sampling parameters that are required to calculate the base parameters needed. Parameters ---------- requested_params : list List of parameters that user wants. variable_args : list List of parameters that InferenceFile has. valid_params : list List of parameters that can be accepted. Returns ------- requested_params : list Updated list of parameters that user wants. all_c : list List of BaseTransforms to apply. """ variable_args = set(variable_args) if not isinstance(variable_args, set) \ else variable_args # try to parse any equations by putting all strings together # this will get some garbage but ensures all alphanumeric/underscored # parameter names are added new_params = [] for opt in requested_params: s = "" for ch in opt: s += ch if ch.isalnum() or ch == "_" else " " new_params += s.split(" ") requested_params = set(list(requested_params) + list(new_params)) # can pass a list of valid parameters to remove garbage from parsing above if valid_params: valid_params = set(valid_params) requested_params = requested_params.intersection(valid_params) # find all the transforms for the requested derived parameters # calculated from base parameters from_base_c = [] for converter in common_cbc_inverse_transforms: if (converter.outputs.issubset(variable_args) or converter.outputs.isdisjoint(requested_params)): continue intersect = converter.outputs.intersection(requested_params) if (not intersect or intersect.issubset(converter.inputs) or intersect.issubset(variable_args)): continue requested_params.update(converter.inputs) from_base_c.append(converter) # find all the tranforms for the required base parameters # calculated from sampling parameters to_base_c = [] for converter in common_cbc_forward_transforms: if (converter.inputs.issubset(variable_args) and len(converter.outputs.intersection(requested_params)) > 0): requested_params.update(converter.inputs) to_base_c.append(converter) variable_args.update(converter.outputs) # get list of transforms that converts sampling parameters to the base # parameters and then converts base parameters to the derived parameters all_c = to_base_c + from_base_c return list(requested_params), all_c
def function[get_common_cbc_transforms, parameter[requested_params, variable_args, valid_params]]: constant[Determines if any additional parameters from the InferenceFile are needed to get derived parameters that user has asked for. First it will try to add any base parameters that are required to calculate the derived parameters. Then it will add any sampling parameters that are required to calculate the base parameters needed. Parameters ---------- requested_params : list List of parameters that user wants. variable_args : list List of parameters that InferenceFile has. valid_params : list List of parameters that can be accepted. Returns ------- requested_params : list Updated list of parameters that user wants. all_c : list List of BaseTransforms to apply. ] variable[variable_args] assign[=] <ast.IfExp object at 0x7da2044c2b30> variable[new_params] assign[=] list[[]] for taget[name[opt]] in starred[name[requested_params]] begin[:] variable[s] assign[=] constant[] for taget[name[ch]] in starred[name[opt]] begin[:] <ast.AugAssign object at 0x7da2044c0700> <ast.AugAssign object at 0x7da2044c1ab0> variable[requested_params] assign[=] call[name[set], parameter[binary_operation[call[name[list], parameter[name[requested_params]]] + call[name[list], parameter[name[new_params]]]]]] if name[valid_params] begin[:] variable[valid_params] assign[=] call[name[set], parameter[name[valid_params]]] variable[requested_params] assign[=] call[name[requested_params].intersection, parameter[name[valid_params]]] variable[from_base_c] assign[=] list[[]] for taget[name[converter]] in starred[name[common_cbc_inverse_transforms]] begin[:] if <ast.BoolOp object at 0x7da2044c0640> begin[:] continue variable[intersect] assign[=] call[name[converter].outputs.intersection, parameter[name[requested_params]]] if <ast.BoolOp object at 0x7da2044c1a80> begin[:] continue call[name[requested_params].update, parameter[name[converter].inputs]] call[name[from_base_c].append, parameter[name[converter]]] variable[to_base_c] assign[=] list[[]] for taget[name[converter]] in starred[name[common_cbc_forward_transforms]] begin[:] if <ast.BoolOp object at 0x7da2044c15a0> begin[:] call[name[requested_params].update, parameter[name[converter].inputs]] call[name[to_base_c].append, parameter[name[converter]]] call[name[variable_args].update, parameter[name[converter].outputs]] variable[all_c] assign[=] binary_operation[name[to_base_c] + name[from_base_c]] return[tuple[[<ast.Call object at 0x7da207f01f90>, <ast.Name object at 0x7da207f03520>]]]
keyword[def] identifier[get_common_cbc_transforms] ( identifier[requested_params] , identifier[variable_args] , identifier[valid_params] = keyword[None] ): literal[string] identifier[variable_args] = identifier[set] ( identifier[variable_args] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[variable_args] , identifier[set] ) keyword[else] identifier[variable_args] identifier[new_params] =[] keyword[for] identifier[opt] keyword[in] identifier[requested_params] : identifier[s] = literal[string] keyword[for] identifier[ch] keyword[in] identifier[opt] : identifier[s] += identifier[ch] keyword[if] identifier[ch] . identifier[isalnum] () keyword[or] identifier[ch] == literal[string] keyword[else] literal[string] identifier[new_params] += identifier[s] . identifier[split] ( literal[string] ) identifier[requested_params] = identifier[set] ( identifier[list] ( identifier[requested_params] )+ identifier[list] ( identifier[new_params] )) keyword[if] identifier[valid_params] : identifier[valid_params] = identifier[set] ( identifier[valid_params] ) identifier[requested_params] = identifier[requested_params] . identifier[intersection] ( identifier[valid_params] ) identifier[from_base_c] =[] keyword[for] identifier[converter] keyword[in] identifier[common_cbc_inverse_transforms] : keyword[if] ( identifier[converter] . identifier[outputs] . identifier[issubset] ( identifier[variable_args] ) keyword[or] identifier[converter] . identifier[outputs] . identifier[isdisjoint] ( identifier[requested_params] )): keyword[continue] identifier[intersect] = identifier[converter] . identifier[outputs] . identifier[intersection] ( identifier[requested_params] ) keyword[if] ( keyword[not] identifier[intersect] keyword[or] identifier[intersect] . identifier[issubset] ( identifier[converter] . identifier[inputs] ) keyword[or] identifier[intersect] . identifier[issubset] ( identifier[variable_args] )): keyword[continue] identifier[requested_params] . identifier[update] ( identifier[converter] . identifier[inputs] ) identifier[from_base_c] . identifier[append] ( identifier[converter] ) identifier[to_base_c] =[] keyword[for] identifier[converter] keyword[in] identifier[common_cbc_forward_transforms] : keyword[if] ( identifier[converter] . identifier[inputs] . identifier[issubset] ( identifier[variable_args] ) keyword[and] identifier[len] ( identifier[converter] . identifier[outputs] . identifier[intersection] ( identifier[requested_params] ))> literal[int] ): identifier[requested_params] . identifier[update] ( identifier[converter] . identifier[inputs] ) identifier[to_base_c] . identifier[append] ( identifier[converter] ) identifier[variable_args] . identifier[update] ( identifier[converter] . identifier[outputs] ) identifier[all_c] = identifier[to_base_c] + identifier[from_base_c] keyword[return] identifier[list] ( identifier[requested_params] ), identifier[all_c]
def get_common_cbc_transforms(requested_params, variable_args, valid_params=None): """Determines if any additional parameters from the InferenceFile are needed to get derived parameters that user has asked for. First it will try to add any base parameters that are required to calculate the derived parameters. Then it will add any sampling parameters that are required to calculate the base parameters needed. Parameters ---------- requested_params : list List of parameters that user wants. variable_args : list List of parameters that InferenceFile has. valid_params : list List of parameters that can be accepted. Returns ------- requested_params : list Updated list of parameters that user wants. all_c : list List of BaseTransforms to apply. """ variable_args = set(variable_args) if not isinstance(variable_args, set) else variable_args # try to parse any equations by putting all strings together # this will get some garbage but ensures all alphanumeric/underscored # parameter names are added new_params = [] for opt in requested_params: s = '' for ch in opt: s += ch if ch.isalnum() or ch == '_' else ' ' # depends on [control=['for'], data=['ch']] new_params += s.split(' ') # depends on [control=['for'], data=['opt']] requested_params = set(list(requested_params) + list(new_params)) # can pass a list of valid parameters to remove garbage from parsing above if valid_params: valid_params = set(valid_params) requested_params = requested_params.intersection(valid_params) # depends on [control=['if'], data=[]] # find all the transforms for the requested derived parameters # calculated from base parameters from_base_c = [] for converter in common_cbc_inverse_transforms: if converter.outputs.issubset(variable_args) or converter.outputs.isdisjoint(requested_params): continue # depends on [control=['if'], data=[]] intersect = converter.outputs.intersection(requested_params) if not intersect or intersect.issubset(converter.inputs) or intersect.issubset(variable_args): continue # depends on [control=['if'], data=[]] requested_params.update(converter.inputs) from_base_c.append(converter) # depends on [control=['for'], data=['converter']] # find all the tranforms for the required base parameters # calculated from sampling parameters to_base_c = [] for converter in common_cbc_forward_transforms: if converter.inputs.issubset(variable_args) and len(converter.outputs.intersection(requested_params)) > 0: requested_params.update(converter.inputs) to_base_c.append(converter) variable_args.update(converter.outputs) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['converter']] # get list of transforms that converts sampling parameters to the base # parameters and then converts base parameters to the derived parameters all_c = to_base_c + from_base_c return (list(requested_params), all_c)
def _valid_locales(locales, normalize): """ Return a list of normalized locales that do not throw an ``Exception`` when set. Parameters ---------- locales : str A string where each locale is separated by a newline. normalize : bool Whether to call ``locale.normalize`` on each locale. Returns ------- valid_locales : list A list of valid locales. """ if normalize: normalizer = lambda x: locale.normalize(x.strip()) else: normalizer = lambda x: x.strip() return list(filter(can_set_locale, map(normalizer, locales)))
def function[_valid_locales, parameter[locales, normalize]]: constant[ Return a list of normalized locales that do not throw an ``Exception`` when set. Parameters ---------- locales : str A string where each locale is separated by a newline. normalize : bool Whether to call ``locale.normalize`` on each locale. Returns ------- valid_locales : list A list of valid locales. ] if name[normalize] begin[:] variable[normalizer] assign[=] <ast.Lambda object at 0x7da1b1e77e20> return[call[name[list], parameter[call[name[filter], parameter[name[can_set_locale], call[name[map], parameter[name[normalizer], name[locales]]]]]]]]
keyword[def] identifier[_valid_locales] ( identifier[locales] , identifier[normalize] ): literal[string] keyword[if] identifier[normalize] : identifier[normalizer] = keyword[lambda] identifier[x] : identifier[locale] . identifier[normalize] ( identifier[x] . identifier[strip] ()) keyword[else] : identifier[normalizer] = keyword[lambda] identifier[x] : identifier[x] . identifier[strip] () keyword[return] identifier[list] ( identifier[filter] ( identifier[can_set_locale] , identifier[map] ( identifier[normalizer] , identifier[locales] )))
def _valid_locales(locales, normalize): """ Return a list of normalized locales that do not throw an ``Exception`` when set. Parameters ---------- locales : str A string where each locale is separated by a newline. normalize : bool Whether to call ``locale.normalize`` on each locale. Returns ------- valid_locales : list A list of valid locales. """ if normalize: normalizer = lambda x: locale.normalize(x.strip()) # depends on [control=['if'], data=[]] else: normalizer = lambda x: x.strip() return list(filter(can_set_locale, map(normalizer, locales)))
def build_source_namespace_filter(namespaces: Strings) -> EdgePredicate: """Pass for edges whose source nodes have the given namespace or one of the given namespaces. :param namespaces: The namespace or namespaces to filter by """ if isinstance(namespaces, str): def source_namespace_filter(_, u: BaseEntity, __, ___) -> bool: return node_has_namespace(u, namespaces) elif isinstance(namespaces, Iterable): namespaces = set(namespaces) def source_namespace_filter(_, u: BaseEntity, __, ___) -> bool: return node_has_namespaces(u, namespaces) else: raise TypeError return source_namespace_filter
def function[build_source_namespace_filter, parameter[namespaces]]: constant[Pass for edges whose source nodes have the given namespace or one of the given namespaces. :param namespaces: The namespace or namespaces to filter by ] if call[name[isinstance], parameter[name[namespaces], name[str]]] begin[:] def function[source_namespace_filter, parameter[_, u, __, ___]]: return[call[name[node_has_namespace], parameter[name[u], name[namespaces]]]] return[name[source_namespace_filter]]
keyword[def] identifier[build_source_namespace_filter] ( identifier[namespaces] : identifier[Strings] )-> identifier[EdgePredicate] : literal[string] keyword[if] identifier[isinstance] ( identifier[namespaces] , identifier[str] ): keyword[def] identifier[source_namespace_filter] ( identifier[_] , identifier[u] : identifier[BaseEntity] , identifier[__] , identifier[___] )-> identifier[bool] : keyword[return] identifier[node_has_namespace] ( identifier[u] , identifier[namespaces] ) keyword[elif] identifier[isinstance] ( identifier[namespaces] , identifier[Iterable] ): identifier[namespaces] = identifier[set] ( identifier[namespaces] ) keyword[def] identifier[source_namespace_filter] ( identifier[_] , identifier[u] : identifier[BaseEntity] , identifier[__] , identifier[___] )-> identifier[bool] : keyword[return] identifier[node_has_namespaces] ( identifier[u] , identifier[namespaces] ) keyword[else] : keyword[raise] identifier[TypeError] keyword[return] identifier[source_namespace_filter]
def build_source_namespace_filter(namespaces: Strings) -> EdgePredicate: """Pass for edges whose source nodes have the given namespace or one of the given namespaces. :param namespaces: The namespace or namespaces to filter by """ if isinstance(namespaces, str): def source_namespace_filter(_, u: BaseEntity, __, ___) -> bool: return node_has_namespace(u, namespaces) # depends on [control=['if'], data=[]] elif isinstance(namespaces, Iterable): namespaces = set(namespaces) def source_namespace_filter(_, u: BaseEntity, __, ___) -> bool: return node_has_namespaces(u, namespaces) # depends on [control=['if'], data=[]] else: raise TypeError return source_namespace_filter
def mmInformation(NetworkName_presence=0, NetworkName_presence1=0, TimeZone_presence=0, TimeZoneAndTime_presence=0, LsaIdentifier_presence=0): """MM INFORMATION Section 9.2.15a""" a = TpPd(pd=0x5) b = MessageType(mesType=0x32) # 00110010 packet = a / b if NetworkName_presence is 1: c = NetworkNameHdr(ieiNN=0x43, eightBitNN=0x0) packet = packet / c if NetworkName_presence1 is 1: d = NetworkNameHdr(ieiNN=0x45, eightBitNN=0x0) packet = packet / d if TimeZone_presence is 1: e = TimeZoneHdr(ieiTZ=0x46, eightBitTZ=0x0) packet = packet / e if TimeZoneAndTime_presence is 1: f = TimeZoneAndTimeHdr(ieiTZAT=0x47, eightBitTZAT=0x0) packet = packet / f if LsaIdentifier_presence is 1: g = LsaIdentifierHdr(ieiLI=0x48, eightBitLI=0x0) packet = packet / g return packet
def function[mmInformation, parameter[NetworkName_presence, NetworkName_presence1, TimeZone_presence, TimeZoneAndTime_presence, LsaIdentifier_presence]]: constant[MM INFORMATION Section 9.2.15a] variable[a] assign[=] call[name[TpPd], parameter[]] variable[b] assign[=] call[name[MessageType], parameter[]] variable[packet] assign[=] binary_operation[name[a] / name[b]] if compare[name[NetworkName_presence] is constant[1]] begin[:] variable[c] assign[=] call[name[NetworkNameHdr], parameter[]] variable[packet] assign[=] binary_operation[name[packet] / name[c]] if compare[name[NetworkName_presence1] is constant[1]] begin[:] variable[d] assign[=] call[name[NetworkNameHdr], parameter[]] variable[packet] assign[=] binary_operation[name[packet] / name[d]] if compare[name[TimeZone_presence] is constant[1]] begin[:] variable[e] assign[=] call[name[TimeZoneHdr], parameter[]] variable[packet] assign[=] binary_operation[name[packet] / name[e]] if compare[name[TimeZoneAndTime_presence] is constant[1]] begin[:] variable[f] assign[=] call[name[TimeZoneAndTimeHdr], parameter[]] variable[packet] assign[=] binary_operation[name[packet] / name[f]] if compare[name[LsaIdentifier_presence] is constant[1]] begin[:] variable[g] assign[=] call[name[LsaIdentifierHdr], parameter[]] variable[packet] assign[=] binary_operation[name[packet] / name[g]] return[name[packet]]
keyword[def] identifier[mmInformation] ( identifier[NetworkName_presence] = literal[int] , identifier[NetworkName_presence1] = literal[int] , identifier[TimeZone_presence] = literal[int] , identifier[TimeZoneAndTime_presence] = literal[int] , identifier[LsaIdentifier_presence] = literal[int] ): literal[string] identifier[a] = identifier[TpPd] ( identifier[pd] = literal[int] ) identifier[b] = identifier[MessageType] ( identifier[mesType] = literal[int] ) identifier[packet] = identifier[a] / identifier[b] keyword[if] identifier[NetworkName_presence] keyword[is] literal[int] : identifier[c] = identifier[NetworkNameHdr] ( identifier[ieiNN] = literal[int] , identifier[eightBitNN] = literal[int] ) identifier[packet] = identifier[packet] / identifier[c] keyword[if] identifier[NetworkName_presence1] keyword[is] literal[int] : identifier[d] = identifier[NetworkNameHdr] ( identifier[ieiNN] = literal[int] , identifier[eightBitNN] = literal[int] ) identifier[packet] = identifier[packet] / identifier[d] keyword[if] identifier[TimeZone_presence] keyword[is] literal[int] : identifier[e] = identifier[TimeZoneHdr] ( identifier[ieiTZ] = literal[int] , identifier[eightBitTZ] = literal[int] ) identifier[packet] = identifier[packet] / identifier[e] keyword[if] identifier[TimeZoneAndTime_presence] keyword[is] literal[int] : identifier[f] = identifier[TimeZoneAndTimeHdr] ( identifier[ieiTZAT] = literal[int] , identifier[eightBitTZAT] = literal[int] ) identifier[packet] = identifier[packet] / identifier[f] keyword[if] identifier[LsaIdentifier_presence] keyword[is] literal[int] : identifier[g] = identifier[LsaIdentifierHdr] ( identifier[ieiLI] = literal[int] , identifier[eightBitLI] = literal[int] ) identifier[packet] = identifier[packet] / identifier[g] keyword[return] identifier[packet]
def mmInformation(NetworkName_presence=0, NetworkName_presence1=0, TimeZone_presence=0, TimeZoneAndTime_presence=0, LsaIdentifier_presence=0): """MM INFORMATION Section 9.2.15a""" a = TpPd(pd=5) b = MessageType(mesType=50) # 00110010 packet = a / b if NetworkName_presence is 1: c = NetworkNameHdr(ieiNN=67, eightBitNN=0) packet = packet / c # depends on [control=['if'], data=[]] if NetworkName_presence1 is 1: d = NetworkNameHdr(ieiNN=69, eightBitNN=0) packet = packet / d # depends on [control=['if'], data=[]] if TimeZone_presence is 1: e = TimeZoneHdr(ieiTZ=70, eightBitTZ=0) packet = packet / e # depends on [control=['if'], data=[]] if TimeZoneAndTime_presence is 1: f = TimeZoneAndTimeHdr(ieiTZAT=71, eightBitTZAT=0) packet = packet / f # depends on [control=['if'], data=[]] if LsaIdentifier_presence is 1: g = LsaIdentifierHdr(ieiLI=72, eightBitLI=0) packet = packet / g # depends on [control=['if'], data=[]] return packet
def dispatch_webhook_request(url=None, method='GET', params=None, json=None, data=None, headers=None, timeout=5): """Task dispatching to an URL. :param url: The URL location of the HTTP callback task. :param method: Method to use when dispatching the callback. Usually `GET` or `POST`. :param params: Keyword arguments to pass on to the HTTP callback. :param json: JSON as body to pass on to the POST HTTP callback. :param headers: HTTP headers applied to callback. """ if method == 'GET': resp = urlopen(url, method, params=params, headers=headers) elif method in ('POST', 'DELETE', 'PUT'): resp = urlopen(url, method, json=json, data=data, headers=headers) else: raise NotImplementedError return extract_response(resp)
def function[dispatch_webhook_request, parameter[url, method, params, json, data, headers, timeout]]: constant[Task dispatching to an URL. :param url: The URL location of the HTTP callback task. :param method: Method to use when dispatching the callback. Usually `GET` or `POST`. :param params: Keyword arguments to pass on to the HTTP callback. :param json: JSON as body to pass on to the POST HTTP callback. :param headers: HTTP headers applied to callback. ] if compare[name[method] equal[==] constant[GET]] begin[:] variable[resp] assign[=] call[name[urlopen], parameter[name[url], name[method]]] return[call[name[extract_response], parameter[name[resp]]]]
keyword[def] identifier[dispatch_webhook_request] ( identifier[url] = keyword[None] , identifier[method] = literal[string] , identifier[params] = keyword[None] , identifier[json] = keyword[None] , identifier[data] = keyword[None] , identifier[headers] = keyword[None] , identifier[timeout] = literal[int] ): literal[string] keyword[if] identifier[method] == literal[string] : identifier[resp] = identifier[urlopen] ( identifier[url] , identifier[method] , identifier[params] = identifier[params] , identifier[headers] = identifier[headers] ) keyword[elif] identifier[method] keyword[in] ( literal[string] , literal[string] , literal[string] ): identifier[resp] = identifier[urlopen] ( identifier[url] , identifier[method] , identifier[json] = identifier[json] , identifier[data] = identifier[data] , identifier[headers] = identifier[headers] ) keyword[else] : keyword[raise] identifier[NotImplementedError] keyword[return] identifier[extract_response] ( identifier[resp] )
def dispatch_webhook_request(url=None, method='GET', params=None, json=None, data=None, headers=None, timeout=5): """Task dispatching to an URL. :param url: The URL location of the HTTP callback task. :param method: Method to use when dispatching the callback. Usually `GET` or `POST`. :param params: Keyword arguments to pass on to the HTTP callback. :param json: JSON as body to pass on to the POST HTTP callback. :param headers: HTTP headers applied to callback. """ if method == 'GET': resp = urlopen(url, method, params=params, headers=headers) # depends on [control=['if'], data=['method']] elif method in ('POST', 'DELETE', 'PUT'): resp = urlopen(url, method, json=json, data=data, headers=headers) # depends on [control=['if'], data=['method']] else: raise NotImplementedError return extract_response(resp)
def cleanup(self): """ Destructive finishing up after execution stopped. """ for actor in self.actors: if actor.skip: continue actor.cleanup() super(ActorHandler, self).cleanup()
def function[cleanup, parameter[self]]: constant[ Destructive finishing up after execution stopped. ] for taget[name[actor]] in starred[name[self].actors] begin[:] if name[actor].skip begin[:] continue call[name[actor].cleanup, parameter[]] call[call[name[super], parameter[name[ActorHandler], name[self]]].cleanup, parameter[]]
keyword[def] identifier[cleanup] ( identifier[self] ): literal[string] keyword[for] identifier[actor] keyword[in] identifier[self] . identifier[actors] : keyword[if] identifier[actor] . identifier[skip] : keyword[continue] identifier[actor] . identifier[cleanup] () identifier[super] ( identifier[ActorHandler] , identifier[self] ). identifier[cleanup] ()
def cleanup(self): """ Destructive finishing up after execution stopped. """ for actor in self.actors: if actor.skip: continue # depends on [control=['if'], data=[]] actor.cleanup() # depends on [control=['for'], data=['actor']] super(ActorHandler, self).cleanup()
def parse(self, hcl, canonicalize=False): """ Parse a HCL Job file. Returns a dict with the JSON formatted job. This API endpoint is only supported from Nomad version 0.8.3. https://www.nomadproject.io/api/jobs.html#parse-job returns: dict raises: - nomad.api.exceptions.BaseNomadException - nomad.api.exceptions.URLNotFoundNomadException """ return self.request("parse", json={"JobHCL": hcl, "Canonicalize": canonicalize}, method="post", allow_redirects=True).json()
def function[parse, parameter[self, hcl, canonicalize]]: constant[ Parse a HCL Job file. Returns a dict with the JSON formatted job. This API endpoint is only supported from Nomad version 0.8.3. https://www.nomadproject.io/api/jobs.html#parse-job returns: dict raises: - nomad.api.exceptions.BaseNomadException - nomad.api.exceptions.URLNotFoundNomadException ] return[call[call[name[self].request, parameter[constant[parse]]].json, parameter[]]]
keyword[def] identifier[parse] ( identifier[self] , identifier[hcl] , identifier[canonicalize] = keyword[False] ): literal[string] keyword[return] identifier[self] . identifier[request] ( literal[string] , identifier[json] ={ literal[string] : identifier[hcl] , literal[string] : identifier[canonicalize] }, identifier[method] = literal[string] , identifier[allow_redirects] = keyword[True] ). identifier[json] ()
def parse(self, hcl, canonicalize=False): """ Parse a HCL Job file. Returns a dict with the JSON formatted job. This API endpoint is only supported from Nomad version 0.8.3. https://www.nomadproject.io/api/jobs.html#parse-job returns: dict raises: - nomad.api.exceptions.BaseNomadException - nomad.api.exceptions.URLNotFoundNomadException """ return self.request('parse', json={'JobHCL': hcl, 'Canonicalize': canonicalize}, method='post', allow_redirects=True).json()
def handle_post_request(self): """Handle incoming POST request to an Endpoint and marshal the request data via the specified RequestHandler. :meth:`.CreateMixin.save_object`. is then called and must be implemented by mixins implementing this interfce. .. seealso:: :meth:`CreateMixin.save_object` :meth:`Endpoint.post` """ self.request = self.get_request_handler() self.obj = self.request.process().data self.save_object(self.obj) return self.create_response()
def function[handle_post_request, parameter[self]]: constant[Handle incoming POST request to an Endpoint and marshal the request data via the specified RequestHandler. :meth:`.CreateMixin.save_object`. is then called and must be implemented by mixins implementing this interfce. .. seealso:: :meth:`CreateMixin.save_object` :meth:`Endpoint.post` ] name[self].request assign[=] call[name[self].get_request_handler, parameter[]] name[self].obj assign[=] call[name[self].request.process, parameter[]].data call[name[self].save_object, parameter[name[self].obj]] return[call[name[self].create_response, parameter[]]]
keyword[def] identifier[handle_post_request] ( identifier[self] ): literal[string] identifier[self] . identifier[request] = identifier[self] . identifier[get_request_handler] () identifier[self] . identifier[obj] = identifier[self] . identifier[request] . identifier[process] (). identifier[data] identifier[self] . identifier[save_object] ( identifier[self] . identifier[obj] ) keyword[return] identifier[self] . identifier[create_response] ()
def handle_post_request(self): """Handle incoming POST request to an Endpoint and marshal the request data via the specified RequestHandler. :meth:`.CreateMixin.save_object`. is then called and must be implemented by mixins implementing this interfce. .. seealso:: :meth:`CreateMixin.save_object` :meth:`Endpoint.post` """ self.request = self.get_request_handler() self.obj = self.request.process().data self.save_object(self.obj) return self.create_response()
def load(self, path): """Load from disk Parameters ---------- path : str path to the directory which typically contains a config.pkl file and a model.bin file Returns ------- DepParser parser itself """ config = _Config.load(os.path.join(path, 'config.pkl')) config.save_dir = path # redirect root path to what user specified self._vocab = vocab = ParserVocabulary.load(config.save_vocab_path) with mx.Context(mxnet_prefer_gpu()): self._parser = BiaffineParser(vocab, config.word_dims, config.tag_dims, config.dropout_emb, config.lstm_layers, config.lstm_hiddens, config.dropout_lstm_input, config.dropout_lstm_hidden, config.mlp_arc_size, config.mlp_rel_size, config.dropout_mlp, config.debug) self._parser.load(config.save_model_path) return self
def function[load, parameter[self, path]]: constant[Load from disk Parameters ---------- path : str path to the directory which typically contains a config.pkl file and a model.bin file Returns ------- DepParser parser itself ] variable[config] assign[=] call[name[_Config].load, parameter[call[name[os].path.join, parameter[name[path], constant[config.pkl]]]]] name[config].save_dir assign[=] name[path] name[self]._vocab assign[=] call[name[ParserVocabulary].load, parameter[name[config].save_vocab_path]] with call[name[mx].Context, parameter[call[name[mxnet_prefer_gpu], parameter[]]]] begin[:] name[self]._parser assign[=] call[name[BiaffineParser], parameter[name[vocab], name[config].word_dims, name[config].tag_dims, name[config].dropout_emb, name[config].lstm_layers, name[config].lstm_hiddens, name[config].dropout_lstm_input, name[config].dropout_lstm_hidden, name[config].mlp_arc_size, name[config].mlp_rel_size, name[config].dropout_mlp, name[config].debug]] call[name[self]._parser.load, parameter[name[config].save_model_path]] return[name[self]]
keyword[def] identifier[load] ( identifier[self] , identifier[path] ): literal[string] identifier[config] = identifier[_Config] . identifier[load] ( identifier[os] . identifier[path] . identifier[join] ( identifier[path] , literal[string] )) identifier[config] . identifier[save_dir] = identifier[path] identifier[self] . identifier[_vocab] = identifier[vocab] = identifier[ParserVocabulary] . identifier[load] ( identifier[config] . identifier[save_vocab_path] ) keyword[with] identifier[mx] . identifier[Context] ( identifier[mxnet_prefer_gpu] ()): identifier[self] . identifier[_parser] = identifier[BiaffineParser] ( identifier[vocab] , identifier[config] . identifier[word_dims] , identifier[config] . identifier[tag_dims] , identifier[config] . identifier[dropout_emb] , identifier[config] . identifier[lstm_layers] , identifier[config] . identifier[lstm_hiddens] , identifier[config] . identifier[dropout_lstm_input] , identifier[config] . identifier[dropout_lstm_hidden] , identifier[config] . identifier[mlp_arc_size] , identifier[config] . identifier[mlp_rel_size] , identifier[config] . identifier[dropout_mlp] , identifier[config] . identifier[debug] ) identifier[self] . identifier[_parser] . identifier[load] ( identifier[config] . identifier[save_model_path] ) keyword[return] identifier[self]
def load(self, path): """Load from disk Parameters ---------- path : str path to the directory which typically contains a config.pkl file and a model.bin file Returns ------- DepParser parser itself """ config = _Config.load(os.path.join(path, 'config.pkl')) config.save_dir = path # redirect root path to what user specified self._vocab = vocab = ParserVocabulary.load(config.save_vocab_path) with mx.Context(mxnet_prefer_gpu()): self._parser = BiaffineParser(vocab, config.word_dims, config.tag_dims, config.dropout_emb, config.lstm_layers, config.lstm_hiddens, config.dropout_lstm_input, config.dropout_lstm_hidden, config.mlp_arc_size, config.mlp_rel_size, config.dropout_mlp, config.debug) self._parser.load(config.save_model_path) # depends on [control=['with'], data=[]] return self
def get_completed_tasks(self): """Return a list of all completed tasks in this project. :return: A list of all completed tasks in this project. :rtype: list of :class:`pytodoist.todoist.Task` >>> from pytodoist import todoist >>> user = todoist.login('[email protected]', 'password') >>> project = user.get_project('PyTodoist') >>> task = project.add_task('Install PyTodoist') >>> task.complete() >>> completed_tasks = project.get_completed_tasks() >>> for task in completed_tasks: ... task.uncomplete() """ self.owner.sync() tasks = [] offset = 0 while True: response = API.get_all_completed_tasks(self.owner.api_token, limit=_PAGE_LIMIT, offset=offset, project_id=self.id) _fail_if_contains_errors(response) response_json = response.json() tasks_json = response_json['items'] if len(tasks_json) == 0: break # There are no more completed tasks to retreive. for task_json in tasks_json: project = self.owner.projects[task_json['project_id']] tasks.append(Task(task_json, project)) offset += _PAGE_LIMIT return tasks
def function[get_completed_tasks, parameter[self]]: constant[Return a list of all completed tasks in this project. :return: A list of all completed tasks in this project. :rtype: list of :class:`pytodoist.todoist.Task` >>> from pytodoist import todoist >>> user = todoist.login('[email protected]', 'password') >>> project = user.get_project('PyTodoist') >>> task = project.add_task('Install PyTodoist') >>> task.complete() >>> completed_tasks = project.get_completed_tasks() >>> for task in completed_tasks: ... task.uncomplete() ] call[name[self].owner.sync, parameter[]] variable[tasks] assign[=] list[[]] variable[offset] assign[=] constant[0] while constant[True] begin[:] variable[response] assign[=] call[name[API].get_all_completed_tasks, parameter[name[self].owner.api_token]] call[name[_fail_if_contains_errors], parameter[name[response]]] variable[response_json] assign[=] call[name[response].json, parameter[]] variable[tasks_json] assign[=] call[name[response_json]][constant[items]] if compare[call[name[len], parameter[name[tasks_json]]] equal[==] constant[0]] begin[:] break for taget[name[task_json]] in starred[name[tasks_json]] begin[:] variable[project] assign[=] call[name[self].owner.projects][call[name[task_json]][constant[project_id]]] call[name[tasks].append, parameter[call[name[Task], parameter[name[task_json], name[project]]]]] <ast.AugAssign object at 0x7da1b0ffba90> return[name[tasks]]
keyword[def] identifier[get_completed_tasks] ( identifier[self] ): literal[string] identifier[self] . identifier[owner] . identifier[sync] () identifier[tasks] =[] identifier[offset] = literal[int] keyword[while] keyword[True] : identifier[response] = identifier[API] . identifier[get_all_completed_tasks] ( identifier[self] . identifier[owner] . identifier[api_token] , identifier[limit] = identifier[_PAGE_LIMIT] , identifier[offset] = identifier[offset] , identifier[project_id] = identifier[self] . identifier[id] ) identifier[_fail_if_contains_errors] ( identifier[response] ) identifier[response_json] = identifier[response] . identifier[json] () identifier[tasks_json] = identifier[response_json] [ literal[string] ] keyword[if] identifier[len] ( identifier[tasks_json] )== literal[int] : keyword[break] keyword[for] identifier[task_json] keyword[in] identifier[tasks_json] : identifier[project] = identifier[self] . identifier[owner] . identifier[projects] [ identifier[task_json] [ literal[string] ]] identifier[tasks] . identifier[append] ( identifier[Task] ( identifier[task_json] , identifier[project] )) identifier[offset] += identifier[_PAGE_LIMIT] keyword[return] identifier[tasks]
def get_completed_tasks(self): """Return a list of all completed tasks in this project. :return: A list of all completed tasks in this project. :rtype: list of :class:`pytodoist.todoist.Task` >>> from pytodoist import todoist >>> user = todoist.login('[email protected]', 'password') >>> project = user.get_project('PyTodoist') >>> task = project.add_task('Install PyTodoist') >>> task.complete() >>> completed_tasks = project.get_completed_tasks() >>> for task in completed_tasks: ... task.uncomplete() """ self.owner.sync() tasks = [] offset = 0 while True: response = API.get_all_completed_tasks(self.owner.api_token, limit=_PAGE_LIMIT, offset=offset, project_id=self.id) _fail_if_contains_errors(response) response_json = response.json() tasks_json = response_json['items'] if len(tasks_json) == 0: break # There are no more completed tasks to retreive. # depends on [control=['if'], data=[]] for task_json in tasks_json: project = self.owner.projects[task_json['project_id']] tasks.append(Task(task_json, project)) # depends on [control=['for'], data=['task_json']] offset += _PAGE_LIMIT # depends on [control=['while'], data=[]] return tasks
def _verify_part_size(self): """ Verifies that the part size is smaller then the maximum part size which is 5GB. """ if self._part_size > PartSize.MAXIMUM_UPLOAD_SIZE: self._status = TransferState.FAILED raise SbgError('Part size = {}b. Maximum part size is {}b'.format( self._part_size, PartSize.MAXIMUM_UPLOAD_SIZE) )
def function[_verify_part_size, parameter[self]]: constant[ Verifies that the part size is smaller then the maximum part size which is 5GB. ] if compare[name[self]._part_size greater[>] name[PartSize].MAXIMUM_UPLOAD_SIZE] begin[:] name[self]._status assign[=] name[TransferState].FAILED <ast.Raise object at 0x7da1b26ae830>
keyword[def] identifier[_verify_part_size] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[_part_size] > identifier[PartSize] . identifier[MAXIMUM_UPLOAD_SIZE] : identifier[self] . identifier[_status] = identifier[TransferState] . identifier[FAILED] keyword[raise] identifier[SbgError] ( literal[string] . identifier[format] ( identifier[self] . identifier[_part_size] , identifier[PartSize] . identifier[MAXIMUM_UPLOAD_SIZE] ) )
def _verify_part_size(self): """ Verifies that the part size is smaller then the maximum part size which is 5GB. """ if self._part_size > PartSize.MAXIMUM_UPLOAD_SIZE: self._status = TransferState.FAILED raise SbgError('Part size = {}b. Maximum part size is {}b'.format(self._part_size, PartSize.MAXIMUM_UPLOAD_SIZE)) # depends on [control=['if'], data=[]]
def current_song(self, song): """设置当前歌曲,将歌曲加入到播放列表,并发出 song_changed 信号 .. note:: 该方法理论上只应该被 Player 对象调用。 """ self._last_song = self.current_song if song is None: self._current_song = None # add it to playlist if song not in playlist elif song in self._songs: self._current_song = song else: self.insert(song) self._current_song = song self.song_changed.emit(song)
def function[current_song, parameter[self, song]]: constant[设置当前歌曲,将歌曲加入到播放列表,并发出 song_changed 信号 .. note:: 该方法理论上只应该被 Player 对象调用。 ] name[self]._last_song assign[=] name[self].current_song if compare[name[song] is constant[None]] begin[:] name[self]._current_song assign[=] constant[None] call[name[self].song_changed.emit, parameter[name[song]]]
keyword[def] identifier[current_song] ( identifier[self] , identifier[song] ): literal[string] identifier[self] . identifier[_last_song] = identifier[self] . identifier[current_song] keyword[if] identifier[song] keyword[is] keyword[None] : identifier[self] . identifier[_current_song] = keyword[None] keyword[elif] identifier[song] keyword[in] identifier[self] . identifier[_songs] : identifier[self] . identifier[_current_song] = identifier[song] keyword[else] : identifier[self] . identifier[insert] ( identifier[song] ) identifier[self] . identifier[_current_song] = identifier[song] identifier[self] . identifier[song_changed] . identifier[emit] ( identifier[song] )
def current_song(self, song): """设置当前歌曲,将歌曲加入到播放列表,并发出 song_changed 信号 .. note:: 该方法理论上只应该被 Player 对象调用。 """ self._last_song = self.current_song if song is None: self._current_song = None # depends on [control=['if'], data=[]] # add it to playlist if song not in playlist elif song in self._songs: self._current_song = song # depends on [control=['if'], data=['song']] else: self.insert(song) self._current_song = song self.song_changed.emit(song)
def _build_model(self): """ Build the model. """ if "input_dim" not in self.settings: raise ValueError("Model parameter input_dim cannot be None.") self.sparse_linear = SparseLinear( self.settings["input_dim"], self.cardinality, self.settings["bias"] )
def function[_build_model, parameter[self]]: constant[ Build the model. ] if compare[constant[input_dim] <ast.NotIn object at 0x7da2590d7190> name[self].settings] begin[:] <ast.Raise object at 0x7da18f00feb0> name[self].sparse_linear assign[=] call[name[SparseLinear], parameter[call[name[self].settings][constant[input_dim]], name[self].cardinality, call[name[self].settings][constant[bias]]]]
keyword[def] identifier[_build_model] ( identifier[self] ): literal[string] keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[settings] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[self] . identifier[sparse_linear] = identifier[SparseLinear] ( identifier[self] . identifier[settings] [ literal[string] ], identifier[self] . identifier[cardinality] , identifier[self] . identifier[settings] [ literal[string] ] )
def _build_model(self): """ Build the model. """ if 'input_dim' not in self.settings: raise ValueError('Model parameter input_dim cannot be None.') # depends on [control=['if'], data=[]] self.sparse_linear = SparseLinear(self.settings['input_dim'], self.cardinality, self.settings['bias'])
def str_def(self): """ :term:`string`: The exception as a string in a Python definition-style format, e.g. for parsing by scripts: .. code-block:: text classname={}; connect_retries={}; message={}; """ return "classname={!r}; connect_retries={!r}; message={!r};". \ format(self.__class__.__name__, self.connect_retries, self.args[0])
def function[str_def, parameter[self]]: constant[ :term:`string`: The exception as a string in a Python definition-style format, e.g. for parsing by scripts: .. code-block:: text classname={}; connect_retries={}; message={}; ] return[call[constant[classname={!r}; connect_retries={!r}; message={!r};].format, parameter[name[self].__class__.__name__, name[self].connect_retries, call[name[self].args][constant[0]]]]]
keyword[def] identifier[str_def] ( identifier[self] ): literal[string] keyword[return] literal[string] . identifier[format] ( identifier[self] . identifier[__class__] . identifier[__name__] , identifier[self] . identifier[connect_retries] , identifier[self] . identifier[args] [ literal[int] ])
def str_def(self): """ :term:`string`: The exception as a string in a Python definition-style format, e.g. for parsing by scripts: .. code-block:: text classname={}; connect_retries={}; message={}; """ return 'classname={!r}; connect_retries={!r}; message={!r};'.format(self.__class__.__name__, self.connect_retries, self.args[0])
def process(self, data): """ Update the state based on the incoming data This function updates the state of the DeviceSpec object, giving values for each axis [x,y,z,roll,pitch,yaw] in range [-1.0, 1.0] The state tuple is only set when all 6 DoF have been read correctly. The timestamp (in fractional seconds since the start of the program) is written as element "t" If callback is provided, it is called on with a copy of the current state tuple. If button_callback is provided, it is called only on button state changes with the argument (state, button_state). Parameters: data The data for this HID event, as returned by the HID callback """ button_changed = False for name,(chan,b1,b2,flip) in self.mappings.items(): if data[0] == chan: self.dict_state[name] = flip * to_int16(data[b1], data[b2])/float(self.axis_scale) for button_index, (chan, byte, bit) in enumerate(self.button_mapping): if data[0] == chan: button_changed = True # update the button vector mask = 1<<bit self.dict_state["buttons"][button_index] = 1 if (data[byte] & mask) != 0 else 0 self.dict_state["t"] = high_acc_clock() # must receive both parts of the 6DOF state before we return the state dictionary if len(self.dict_state)==8: self.tuple_state = SpaceNavigator(**self.dict_state) # call any attached callbacks if self.callback: self.callback(self.tuple_state) # only call the button callback if the button state actually changed if self.button_callback and button_changed: self.button_callback(self.tuple_state, self.tuple_state.buttons)
def function[process, parameter[self, data]]: constant[ Update the state based on the incoming data This function updates the state of the DeviceSpec object, giving values for each axis [x,y,z,roll,pitch,yaw] in range [-1.0, 1.0] The state tuple is only set when all 6 DoF have been read correctly. The timestamp (in fractional seconds since the start of the program) is written as element "t" If callback is provided, it is called on with a copy of the current state tuple. If button_callback is provided, it is called only on button state changes with the argument (state, button_state). Parameters: data The data for this HID event, as returned by the HID callback ] variable[button_changed] assign[=] constant[False] for taget[tuple[[<ast.Name object at 0x7da1b2652cb0>, <ast.Tuple object at 0x7da1b2652200>]]] in starred[call[name[self].mappings.items, parameter[]]] begin[:] if compare[call[name[data]][constant[0]] equal[==] name[chan]] begin[:] call[name[self].dict_state][name[name]] assign[=] binary_operation[binary_operation[name[flip] * call[name[to_int16], parameter[call[name[data]][name[b1]], call[name[data]][name[b2]]]]] / call[name[float], parameter[name[self].axis_scale]]] for taget[tuple[[<ast.Name object at 0x7da1b2653d30>, <ast.Tuple object at 0x7da18f00d3c0>]]] in starred[call[name[enumerate], parameter[name[self].button_mapping]]] begin[:] if compare[call[name[data]][constant[0]] equal[==] name[chan]] begin[:] variable[button_changed] assign[=] constant[True] variable[mask] assign[=] binary_operation[constant[1] <ast.LShift object at 0x7da2590d69e0> name[bit]] call[call[name[self].dict_state][constant[buttons]]][name[button_index]] assign[=] <ast.IfExp object at 0x7da18f00d000> call[name[self].dict_state][constant[t]] assign[=] call[name[high_acc_clock], parameter[]] if compare[call[name[len], parameter[name[self].dict_state]] equal[==] constant[8]] begin[:] name[self].tuple_state assign[=] call[name[SpaceNavigator], parameter[]] if name[self].callback begin[:] call[name[self].callback, parameter[name[self].tuple_state]] if <ast.BoolOp object at 0x7da20e957610> begin[:] call[name[self].button_callback, parameter[name[self].tuple_state, name[self].tuple_state.buttons]]
keyword[def] identifier[process] ( identifier[self] , identifier[data] ): literal[string] identifier[button_changed] = keyword[False] keyword[for] identifier[name] ,( identifier[chan] , identifier[b1] , identifier[b2] , identifier[flip] ) keyword[in] identifier[self] . identifier[mappings] . identifier[items] (): keyword[if] identifier[data] [ literal[int] ]== identifier[chan] : identifier[self] . identifier[dict_state] [ identifier[name] ]= identifier[flip] * identifier[to_int16] ( identifier[data] [ identifier[b1] ], identifier[data] [ identifier[b2] ])/ identifier[float] ( identifier[self] . identifier[axis_scale] ) keyword[for] identifier[button_index] ,( identifier[chan] , identifier[byte] , identifier[bit] ) keyword[in] identifier[enumerate] ( identifier[self] . identifier[button_mapping] ): keyword[if] identifier[data] [ literal[int] ]== identifier[chan] : identifier[button_changed] = keyword[True] identifier[mask] = literal[int] << identifier[bit] identifier[self] . identifier[dict_state] [ literal[string] ][ identifier[button_index] ]= literal[int] keyword[if] ( identifier[data] [ identifier[byte] ]& identifier[mask] )!= literal[int] keyword[else] literal[int] identifier[self] . identifier[dict_state] [ literal[string] ]= identifier[high_acc_clock] () keyword[if] identifier[len] ( identifier[self] . identifier[dict_state] )== literal[int] : identifier[self] . identifier[tuple_state] = identifier[SpaceNavigator] (** identifier[self] . identifier[dict_state] ) keyword[if] identifier[self] . identifier[callback] : identifier[self] . identifier[callback] ( identifier[self] . identifier[tuple_state] ) keyword[if] identifier[self] . identifier[button_callback] keyword[and] identifier[button_changed] : identifier[self] . identifier[button_callback] ( identifier[self] . identifier[tuple_state] , identifier[self] . identifier[tuple_state] . identifier[buttons] )
def process(self, data): """ Update the state based on the incoming data This function updates the state of the DeviceSpec object, giving values for each axis [x,y,z,roll,pitch,yaw] in range [-1.0, 1.0] The state tuple is only set when all 6 DoF have been read correctly. The timestamp (in fractional seconds since the start of the program) is written as element "t" If callback is provided, it is called on with a copy of the current state tuple. If button_callback is provided, it is called only on button state changes with the argument (state, button_state). Parameters: data The data for this HID event, as returned by the HID callback """ button_changed = False for (name, (chan, b1, b2, flip)) in self.mappings.items(): if data[0] == chan: self.dict_state[name] = flip * to_int16(data[b1], data[b2]) / float(self.axis_scale) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] for (button_index, (chan, byte, bit)) in enumerate(self.button_mapping): if data[0] == chan: button_changed = True # update the button vector mask = 1 << bit self.dict_state['buttons'][button_index] = 1 if data[byte] & mask != 0 else 0 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] self.dict_state['t'] = high_acc_clock() # must receive both parts of the 6DOF state before we return the state dictionary if len(self.dict_state) == 8: self.tuple_state = SpaceNavigator(**self.dict_state) # depends on [control=['if'], data=[]] # call any attached callbacks if self.callback: self.callback(self.tuple_state) # depends on [control=['if'], data=[]] # only call the button callback if the button state actually changed if self.button_callback and button_changed: self.button_callback(self.tuple_state, self.tuple_state.buttons) # depends on [control=['if'], data=[]]
def sys_dup(self, fd): """ Duplicates an open file descriptor :rtype: int :param fd: the open file descriptor to duplicate. :return: the new file descriptor. """ if not self._is_fd_open(fd): logger.info("DUP: Passed fd is not open. Returning EBADF") return -errno.EBADF newfd = self._dup(fd) return newfd
def function[sys_dup, parameter[self, fd]]: constant[ Duplicates an open file descriptor :rtype: int :param fd: the open file descriptor to duplicate. :return: the new file descriptor. ] if <ast.UnaryOp object at 0x7da1b000ef50> begin[:] call[name[logger].info, parameter[constant[DUP: Passed fd is not open. Returning EBADF]]] return[<ast.UnaryOp object at 0x7da1b000d8d0>] variable[newfd] assign[=] call[name[self]._dup, parameter[name[fd]]] return[name[newfd]]
keyword[def] identifier[sys_dup] ( identifier[self] , identifier[fd] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[_is_fd_open] ( identifier[fd] ): identifier[logger] . identifier[info] ( literal[string] ) keyword[return] - identifier[errno] . identifier[EBADF] identifier[newfd] = identifier[self] . identifier[_dup] ( identifier[fd] ) keyword[return] identifier[newfd]
def sys_dup(self, fd): """ Duplicates an open file descriptor :rtype: int :param fd: the open file descriptor to duplicate. :return: the new file descriptor. """ if not self._is_fd_open(fd): logger.info('DUP: Passed fd is not open. Returning EBADF') return -errno.EBADF # depends on [control=['if'], data=[]] newfd = self._dup(fd) return newfd
def add_paths(self, paths): """ Adds entries to the Python path. The given paths are normalized before being added to the left of the list :param paths: New paths to add """ if paths: # Use new paths in priority self._paths = list(paths) + self._paths
def function[add_paths, parameter[self, paths]]: constant[ Adds entries to the Python path. The given paths are normalized before being added to the left of the list :param paths: New paths to add ] if name[paths] begin[:] name[self]._paths assign[=] binary_operation[call[name[list], parameter[name[paths]]] + name[self]._paths]
keyword[def] identifier[add_paths] ( identifier[self] , identifier[paths] ): literal[string] keyword[if] identifier[paths] : identifier[self] . identifier[_paths] = identifier[list] ( identifier[paths] )+ identifier[self] . identifier[_paths]
def add_paths(self, paths): """ Adds entries to the Python path. The given paths are normalized before being added to the left of the list :param paths: New paths to add """ if paths: # Use new paths in priority self._paths = list(paths) + self._paths # depends on [control=['if'], data=[]]
def info(self): """return info about server as dict object""" proc_info = {"name": self.name, "params": self.cfg, "alive": self.is_alive, "optfile": self.config_path} if self.is_alive: proc_info['pid'] = self.proc.pid logger.debug("proc_info: {proc_info}".format(**locals())) mongodb_uri = '' server_info = {} status_info = {} if self.hostname and self.cfg.get('port', None): try: c = self.connection server_info = c.server_info() logger.debug("server_info: {server_info}".format(**locals())) mongodb_uri = 'mongodb://' + self.hostname status_info = {"primary": c.is_primary, "mongos": c.is_mongos} logger.debug("status_info: {status_info}".format(**locals())) except (pymongo.errors.AutoReconnect, pymongo.errors.OperationFailure, pymongo.errors.ConnectionFailure): server_info = {} status_info = {} result = {"mongodb_uri": mongodb_uri, "statuses": status_info, "serverInfo": server_info, "procInfo": proc_info, "orchestration": 'servers'} if self.login: result['mongodb_auth_uri'] = self.mongodb_auth_uri(self.hostname) logger.debug("return {result}".format(result=result)) return result
def function[info, parameter[self]]: constant[return info about server as dict object] variable[proc_info] assign[=] dictionary[[<ast.Constant object at 0x7da18dc9b700>, <ast.Constant object at 0x7da18dc9a230>, <ast.Constant object at 0x7da18dc991e0>, <ast.Constant object at 0x7da18dc99d50>], [<ast.Attribute object at 0x7da18dc99db0>, <ast.Attribute object at 0x7da18dc9b790>, <ast.Attribute object at 0x7da18dc99660>, <ast.Attribute object at 0x7da18dc99180>]] if name[self].is_alive begin[:] call[name[proc_info]][constant[pid]] assign[=] name[self].proc.pid call[name[logger].debug, parameter[call[constant[proc_info: {proc_info}].format, parameter[]]]] variable[mongodb_uri] assign[=] constant[] variable[server_info] assign[=] dictionary[[], []] variable[status_info] assign[=] dictionary[[], []] if <ast.BoolOp object at 0x7da18dc9abc0> begin[:] <ast.Try object at 0x7da18dc9b7f0> variable[result] assign[=] dictionary[[<ast.Constant object at 0x7da18dc98a30>, <ast.Constant object at 0x7da18dc9a4d0>, <ast.Constant object at 0x7da18dc98340>, <ast.Constant object at 0x7da18dc99d80>, <ast.Constant object at 0x7da18dc99300>], [<ast.Name object at 0x7da18dc9a650>, <ast.Name object at 0x7da18dc995d0>, <ast.Name object at 0x7da18dc9b8e0>, <ast.Name object at 0x7da18dc9a3e0>, <ast.Constant object at 0x7da18dc9bd30>]] if name[self].login begin[:] call[name[result]][constant[mongodb_auth_uri]] assign[=] call[name[self].mongodb_auth_uri, parameter[name[self].hostname]] call[name[logger].debug, parameter[call[constant[return {result}].format, parameter[]]]] return[name[result]]
keyword[def] identifier[info] ( identifier[self] ): literal[string] identifier[proc_info] ={ literal[string] : identifier[self] . identifier[name] , literal[string] : identifier[self] . identifier[cfg] , literal[string] : identifier[self] . identifier[is_alive] , literal[string] : identifier[self] . identifier[config_path] } keyword[if] identifier[self] . identifier[is_alive] : identifier[proc_info] [ literal[string] ]= identifier[self] . identifier[proc] . identifier[pid] identifier[logger] . identifier[debug] ( literal[string] . identifier[format] (** identifier[locals] ())) identifier[mongodb_uri] = literal[string] identifier[server_info] ={} identifier[status_info] ={} keyword[if] identifier[self] . identifier[hostname] keyword[and] identifier[self] . identifier[cfg] . identifier[get] ( literal[string] , keyword[None] ): keyword[try] : identifier[c] = identifier[self] . identifier[connection] identifier[server_info] = identifier[c] . identifier[server_info] () identifier[logger] . identifier[debug] ( literal[string] . identifier[format] (** identifier[locals] ())) identifier[mongodb_uri] = literal[string] + identifier[self] . identifier[hostname] identifier[status_info] ={ literal[string] : identifier[c] . identifier[is_primary] , literal[string] : identifier[c] . identifier[is_mongos] } identifier[logger] . identifier[debug] ( literal[string] . identifier[format] (** identifier[locals] ())) keyword[except] ( identifier[pymongo] . identifier[errors] . identifier[AutoReconnect] , identifier[pymongo] . identifier[errors] . identifier[OperationFailure] , identifier[pymongo] . identifier[errors] . identifier[ConnectionFailure] ): identifier[server_info] ={} identifier[status_info] ={} identifier[result] ={ literal[string] : identifier[mongodb_uri] , literal[string] : identifier[status_info] , literal[string] : identifier[server_info] , literal[string] : identifier[proc_info] , literal[string] : literal[string] } keyword[if] identifier[self] . identifier[login] : identifier[result] [ literal[string] ]= identifier[self] . identifier[mongodb_auth_uri] ( identifier[self] . identifier[hostname] ) identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[result] = identifier[result] )) keyword[return] identifier[result]
def info(self): """return info about server as dict object""" proc_info = {'name': self.name, 'params': self.cfg, 'alive': self.is_alive, 'optfile': self.config_path} if self.is_alive: proc_info['pid'] = self.proc.pid # depends on [control=['if'], data=[]] logger.debug('proc_info: {proc_info}'.format(**locals())) mongodb_uri = '' server_info = {} status_info = {} if self.hostname and self.cfg.get('port', None): try: c = self.connection server_info = c.server_info() logger.debug('server_info: {server_info}'.format(**locals())) mongodb_uri = 'mongodb://' + self.hostname status_info = {'primary': c.is_primary, 'mongos': c.is_mongos} logger.debug('status_info: {status_info}'.format(**locals())) # depends on [control=['try'], data=[]] except (pymongo.errors.AutoReconnect, pymongo.errors.OperationFailure, pymongo.errors.ConnectionFailure): server_info = {} status_info = {} # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] result = {'mongodb_uri': mongodb_uri, 'statuses': status_info, 'serverInfo': server_info, 'procInfo': proc_info, 'orchestration': 'servers'} if self.login: result['mongodb_auth_uri'] = self.mongodb_auth_uri(self.hostname) # depends on [control=['if'], data=[]] logger.debug('return {result}'.format(result=result)) return result
def get_matchers(): """ Get matcher functions from treeherder.autoclassify.matchers We classify matchers as any function treeherder.autoclassify.matchers with a name ending in _matcher. This is currently overkill but protects against the unwarey engineer adding new functions to the matchers module that shouldn't be treated as matchers. """ from . import matchers def is_matcher_func(member): return inspect.isfunction(member) and member.__name__.endswith("_matcher") members = inspect.getmembers(matchers, is_matcher_func) for name, func in members: yield func
def function[get_matchers, parameter[]]: constant[ Get matcher functions from treeherder.autoclassify.matchers We classify matchers as any function treeherder.autoclassify.matchers with a name ending in _matcher. This is currently overkill but protects against the unwarey engineer adding new functions to the matchers module that shouldn't be treated as matchers. ] from relative_module[None] import module[matchers] def function[is_matcher_func, parameter[member]]: return[<ast.BoolOp object at 0x7da1b08a4e80>] variable[members] assign[=] call[name[inspect].getmembers, parameter[name[matchers], name[is_matcher_func]]] for taget[tuple[[<ast.Name object at 0x7da1b08a76a0>, <ast.Name object at 0x7da1b08a71f0>]]] in starred[name[members]] begin[:] <ast.Yield object at 0x7da1b08a4790>
keyword[def] identifier[get_matchers] (): literal[string] keyword[from] . keyword[import] identifier[matchers] keyword[def] identifier[is_matcher_func] ( identifier[member] ): keyword[return] identifier[inspect] . identifier[isfunction] ( identifier[member] ) keyword[and] identifier[member] . identifier[__name__] . identifier[endswith] ( literal[string] ) identifier[members] = identifier[inspect] . identifier[getmembers] ( identifier[matchers] , identifier[is_matcher_func] ) keyword[for] identifier[name] , identifier[func] keyword[in] identifier[members] : keyword[yield] identifier[func]
def get_matchers(): """ Get matcher functions from treeherder.autoclassify.matchers We classify matchers as any function treeherder.autoclassify.matchers with a name ending in _matcher. This is currently overkill but protects against the unwarey engineer adding new functions to the matchers module that shouldn't be treated as matchers. """ from . import matchers def is_matcher_func(member): return inspect.isfunction(member) and member.__name__.endswith('_matcher') members = inspect.getmembers(matchers, is_matcher_func) for (name, func) in members: yield func # depends on [control=['for'], data=[]]
def _AddCredentialConfiguration( self, path_spec, credential_type, credential_data): """Adds a credential configuration. Args: path_spec (dfvfs.PathSpec): path specification. credential_type (str): credential type. credential_data (bytes): credential data. """ credential_configuration = configurations.CredentialConfiguration( credential_data=credential_data, credential_type=credential_type, path_spec=path_spec) self._credential_configurations.append(credential_configuration)
def function[_AddCredentialConfiguration, parameter[self, path_spec, credential_type, credential_data]]: constant[Adds a credential configuration. Args: path_spec (dfvfs.PathSpec): path specification. credential_type (str): credential type. credential_data (bytes): credential data. ] variable[credential_configuration] assign[=] call[name[configurations].CredentialConfiguration, parameter[]] call[name[self]._credential_configurations.append, parameter[name[credential_configuration]]]
keyword[def] identifier[_AddCredentialConfiguration] ( identifier[self] , identifier[path_spec] , identifier[credential_type] , identifier[credential_data] ): literal[string] identifier[credential_configuration] = identifier[configurations] . identifier[CredentialConfiguration] ( identifier[credential_data] = identifier[credential_data] , identifier[credential_type] = identifier[credential_type] , identifier[path_spec] = identifier[path_spec] ) identifier[self] . identifier[_credential_configurations] . identifier[append] ( identifier[credential_configuration] )
def _AddCredentialConfiguration(self, path_spec, credential_type, credential_data): """Adds a credential configuration. Args: path_spec (dfvfs.PathSpec): path specification. credential_type (str): credential type. credential_data (bytes): credential data. """ credential_configuration = configurations.CredentialConfiguration(credential_data=credential_data, credential_type=credential_type, path_spec=path_spec) self._credential_configurations.append(credential_configuration)
def QA_fetch_index_min( code, start, end, format='numpy', frequence='1min', collections=DATABASE.index_min): '获取股票分钟线' if frequence in ['1min', '1m']: frequence = '1min' elif frequence in ['5min', '5m']: frequence = '5min' elif frequence in ['15min', '15m']: frequence = '15min' elif frequence in ['30min', '30m']: frequence = '30min' elif frequence in ['60min', '60m']: frequence = '60min' __data = [] code = QA_util_code_tolist(code) cursor = collections.find({ 'code': {'$in': code}, "time_stamp": { "$gte": QA_util_time_stamp(start), "$lte": QA_util_time_stamp(end) }, 'type': frequence }, {"_id": 0}, batch_size=10000) if format in ['dict', 'json']: return [data for data in cursor] # for item in cursor: __data = pd.DataFrame([item for item in cursor]) __data = __data.assign(datetime=pd.to_datetime(__data['datetime'])) # __data.append([str(item['code']), float(item['open']), float(item['high']), float( # item['low']), float(item['close']), int(item['up_count']), int(item['down_count']), float(item['vol']), float(item['amount']), item['datetime'], item['time_stamp'], item['date'], item['type']]) # __data = DataFrame(__data, columns=[ # 'code', 'open', 'high', 'low', 'close', 'up_count', 'down_count', 'volume', 'amount', 'datetime', 'time_stamp', 'date', 'type']) # __data['datetime'] = pd.to_datetime(__data['datetime']) __data = __data.set_index('datetime', drop=False) if format in ['numpy', 'np', 'n']: return numpy.asarray(__data) elif format in ['list', 'l', 'L']: return numpy.asarray(__data).tolist() elif format in ['P', 'p', 'pandas', 'pd']: return __data
def function[QA_fetch_index_min, parameter[code, start, end, format, frequence, collections]]: constant[获取股票分钟线] if compare[name[frequence] in list[[<ast.Constant object at 0x7da1b1ff2b30>, <ast.Constant object at 0x7da1b1ff1000>]]] begin[:] variable[frequence] assign[=] constant[1min] variable[__data] assign[=] list[[]] variable[code] assign[=] call[name[QA_util_code_tolist], parameter[name[code]]] variable[cursor] assign[=] call[name[collections].find, parameter[dictionary[[<ast.Constant object at 0x7da1b1ff0bb0>, <ast.Constant object at 0x7da1b1ff1f00>, <ast.Constant object at 0x7da1b1ff0850>], [<ast.Dict object at 0x7da1b1ff09a0>, <ast.Dict object at 0x7da1b1ff02b0>, <ast.Name object at 0x7da1b1ff08e0>]], dictionary[[<ast.Constant object at 0x7da1b1ff26e0>], [<ast.Constant object at 0x7da1b1ff1fc0>]]]] if compare[name[format] in list[[<ast.Constant object at 0x7da1b1ff2ec0>, <ast.Constant object at 0x7da1b1ff2530>]]] begin[:] return[<ast.ListComp object at 0x7da1b1ff3610>] variable[__data] assign[=] call[name[pd].DataFrame, parameter[<ast.ListComp object at 0x7da1b1ff2290>]] variable[__data] assign[=] call[name[__data].assign, parameter[]] variable[__data] assign[=] call[name[__data].set_index, parameter[constant[datetime]]] if compare[name[format] in list[[<ast.Constant object at 0x7da1b1ff20b0>, <ast.Constant object at 0x7da1b1ff0d30>, <ast.Constant object at 0x7da1b1ff30d0>]]] begin[:] return[call[name[numpy].asarray, parameter[name[__data]]]]
keyword[def] identifier[QA_fetch_index_min] ( identifier[code] , identifier[start] , identifier[end] , identifier[format] = literal[string] , identifier[frequence] = literal[string] , identifier[collections] = identifier[DATABASE] . identifier[index_min] ): literal[string] keyword[if] identifier[frequence] keyword[in] [ literal[string] , literal[string] ]: identifier[frequence] = literal[string] keyword[elif] identifier[frequence] keyword[in] [ literal[string] , literal[string] ]: identifier[frequence] = literal[string] keyword[elif] identifier[frequence] keyword[in] [ literal[string] , literal[string] ]: identifier[frequence] = literal[string] keyword[elif] identifier[frequence] keyword[in] [ literal[string] , literal[string] ]: identifier[frequence] = literal[string] keyword[elif] identifier[frequence] keyword[in] [ literal[string] , literal[string] ]: identifier[frequence] = literal[string] identifier[__data] =[] identifier[code] = identifier[QA_util_code_tolist] ( identifier[code] ) identifier[cursor] = identifier[collections] . identifier[find] ({ literal[string] :{ literal[string] : identifier[code] }, literal[string] :{ literal[string] : identifier[QA_util_time_stamp] ( identifier[start] ), literal[string] : identifier[QA_util_time_stamp] ( identifier[end] ) }, literal[string] : identifier[frequence] },{ literal[string] : literal[int] }, identifier[batch_size] = literal[int] ) keyword[if] identifier[format] keyword[in] [ literal[string] , literal[string] ]: keyword[return] [ identifier[data] keyword[for] identifier[data] keyword[in] identifier[cursor] ] identifier[__data] = identifier[pd] . identifier[DataFrame] ([ identifier[item] keyword[for] identifier[item] keyword[in] identifier[cursor] ]) identifier[__data] = identifier[__data] . identifier[assign] ( identifier[datetime] = identifier[pd] . identifier[to_datetime] ( identifier[__data] [ literal[string] ])) identifier[__data] = identifier[__data] . identifier[set_index] ( literal[string] , identifier[drop] = keyword[False] ) keyword[if] identifier[format] keyword[in] [ literal[string] , literal[string] , literal[string] ]: keyword[return] identifier[numpy] . identifier[asarray] ( identifier[__data] ) keyword[elif] identifier[format] keyword[in] [ literal[string] , literal[string] , literal[string] ]: keyword[return] identifier[numpy] . identifier[asarray] ( identifier[__data] ). identifier[tolist] () keyword[elif] identifier[format] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] ]: keyword[return] identifier[__data]
def QA_fetch_index_min(code, start, end, format='numpy', frequence='1min', collections=DATABASE.index_min): """获取股票分钟线""" if frequence in ['1min', '1m']: frequence = '1min' # depends on [control=['if'], data=['frequence']] elif frequence in ['5min', '5m']: frequence = '5min' # depends on [control=['if'], data=['frequence']] elif frequence in ['15min', '15m']: frequence = '15min' # depends on [control=['if'], data=['frequence']] elif frequence in ['30min', '30m']: frequence = '30min' # depends on [control=['if'], data=['frequence']] elif frequence in ['60min', '60m']: frequence = '60min' # depends on [control=['if'], data=['frequence']] __data = [] code = QA_util_code_tolist(code) cursor = collections.find({'code': {'$in': code}, 'time_stamp': {'$gte': QA_util_time_stamp(start), '$lte': QA_util_time_stamp(end)}, 'type': frequence}, {'_id': 0}, batch_size=10000) if format in ['dict', 'json']: return [data for data in cursor] # depends on [control=['if'], data=[]] # for item in cursor: __data = pd.DataFrame([item for item in cursor]) __data = __data.assign(datetime=pd.to_datetime(__data['datetime'])) # __data.append([str(item['code']), float(item['open']), float(item['high']), float( # item['low']), float(item['close']), int(item['up_count']), int(item['down_count']), float(item['vol']), float(item['amount']), item['datetime'], item['time_stamp'], item['date'], item['type']]) # __data = DataFrame(__data, columns=[ # 'code', 'open', 'high', 'low', 'close', 'up_count', 'down_count', 'volume', 'amount', 'datetime', 'time_stamp', 'date', 'type']) # __data['datetime'] = pd.to_datetime(__data['datetime']) __data = __data.set_index('datetime', drop=False) if format in ['numpy', 'np', 'n']: return numpy.asarray(__data) # depends on [control=['if'], data=[]] elif format in ['list', 'l', 'L']: return numpy.asarray(__data).tolist() # depends on [control=['if'], data=[]] elif format in ['P', 'p', 'pandas', 'pd']: return __data # depends on [control=['if'], data=[]]
def distance(self, other, config): """ Returns the genetic distance between this genome and the other. This distance value is used to compute genome compatibility for speciation. """ # Compute node gene distance component. node_distance = 0.0 if self.nodes or other.nodes: disjoint_nodes = 0 for k2 in iterkeys(other.nodes): if k2 not in self.nodes: disjoint_nodes += 1 for k1, n1 in iteritems(self.nodes): n2 = other.nodes.get(k1) if n2 is None: disjoint_nodes += 1 else: # Homologous genes compute their own distance value. node_distance += n1.distance(n2, config) max_nodes = max(len(self.nodes), len(other.nodes)) node_distance = (node_distance + config.compatibility_disjoint_coefficient * disjoint_nodes) / max_nodes # Compute connection gene differences. connection_distance = 0.0 if self.connections or other.connections: disjoint_connections = 0 for k2 in iterkeys(other.connections): if k2 not in self.connections: disjoint_connections += 1 for k1, c1 in iteritems(self.connections): c2 = other.connections.get(k1) if c2 is None: disjoint_connections += 1 else: # Homologous genes compute their own distance value. connection_distance += c1.distance(c2, config) max_conn = max(len(self.connections), len(other.connections)) connection_distance = (connection_distance + config.compatibility_disjoint_coefficient * disjoint_connections) / max_conn distance = node_distance + connection_distance return distance
def function[distance, parameter[self, other, config]]: constant[ Returns the genetic distance between this genome and the other. This distance value is used to compute genome compatibility for speciation. ] variable[node_distance] assign[=] constant[0.0] if <ast.BoolOp object at 0x7da1b19dbd60> begin[:] variable[disjoint_nodes] assign[=] constant[0] for taget[name[k2]] in starred[call[name[iterkeys], parameter[name[other].nodes]]] begin[:] if compare[name[k2] <ast.NotIn object at 0x7da2590d7190> name[self].nodes] begin[:] <ast.AugAssign object at 0x7da1b19db9a0> for taget[tuple[[<ast.Name object at 0x7da1b19db8b0>, <ast.Name object at 0x7da1b19db880>]]] in starred[call[name[iteritems], parameter[name[self].nodes]]] begin[:] variable[n2] assign[=] call[name[other].nodes.get, parameter[name[k1]]] if compare[name[n2] is constant[None]] begin[:] <ast.AugAssign object at 0x7da1b19db580> variable[max_nodes] assign[=] call[name[max], parameter[call[name[len], parameter[name[self].nodes]], call[name[len], parameter[name[other].nodes]]]] variable[node_distance] assign[=] binary_operation[binary_operation[name[node_distance] + binary_operation[name[config].compatibility_disjoint_coefficient * name[disjoint_nodes]]] / name[max_nodes]] variable[connection_distance] assign[=] constant[0.0] if <ast.BoolOp object at 0x7da1b19daec0> begin[:] variable[disjoint_connections] assign[=] constant[0] for taget[name[k2]] in starred[call[name[iterkeys], parameter[name[other].connections]]] begin[:] if compare[name[k2] <ast.NotIn object at 0x7da2590d7190> name[self].connections] begin[:] <ast.AugAssign object at 0x7da1b19dab00> for taget[tuple[[<ast.Name object at 0x7da1b19daa10>, <ast.Name object at 0x7da1b19da9e0>]]] in starred[call[name[iteritems], parameter[name[self].connections]]] begin[:] variable[c2] assign[=] call[name[other].connections.get, parameter[name[k1]]] if compare[name[c2] is constant[None]] begin[:] <ast.AugAssign object at 0x7da1b19da6e0> variable[max_conn] assign[=] call[name[max], parameter[call[name[len], parameter[name[self].connections]], call[name[len], parameter[name[other].connections]]]] variable[connection_distance] assign[=] binary_operation[binary_operation[name[connection_distance] + binary_operation[name[config].compatibility_disjoint_coefficient * name[disjoint_connections]]] / name[max_conn]] variable[distance] assign[=] binary_operation[name[node_distance] + name[connection_distance]] return[name[distance]]
keyword[def] identifier[distance] ( identifier[self] , identifier[other] , identifier[config] ): literal[string] identifier[node_distance] = literal[int] keyword[if] identifier[self] . identifier[nodes] keyword[or] identifier[other] . identifier[nodes] : identifier[disjoint_nodes] = literal[int] keyword[for] identifier[k2] keyword[in] identifier[iterkeys] ( identifier[other] . identifier[nodes] ): keyword[if] identifier[k2] keyword[not] keyword[in] identifier[self] . identifier[nodes] : identifier[disjoint_nodes] += literal[int] keyword[for] identifier[k1] , identifier[n1] keyword[in] identifier[iteritems] ( identifier[self] . identifier[nodes] ): identifier[n2] = identifier[other] . identifier[nodes] . identifier[get] ( identifier[k1] ) keyword[if] identifier[n2] keyword[is] keyword[None] : identifier[disjoint_nodes] += literal[int] keyword[else] : identifier[node_distance] += identifier[n1] . identifier[distance] ( identifier[n2] , identifier[config] ) identifier[max_nodes] = identifier[max] ( identifier[len] ( identifier[self] . identifier[nodes] ), identifier[len] ( identifier[other] . identifier[nodes] )) identifier[node_distance] =( identifier[node_distance] + identifier[config] . identifier[compatibility_disjoint_coefficient] * identifier[disjoint_nodes] )/ identifier[max_nodes] identifier[connection_distance] = literal[int] keyword[if] identifier[self] . identifier[connections] keyword[or] identifier[other] . identifier[connections] : identifier[disjoint_connections] = literal[int] keyword[for] identifier[k2] keyword[in] identifier[iterkeys] ( identifier[other] . identifier[connections] ): keyword[if] identifier[k2] keyword[not] keyword[in] identifier[self] . identifier[connections] : identifier[disjoint_connections] += literal[int] keyword[for] identifier[k1] , identifier[c1] keyword[in] identifier[iteritems] ( identifier[self] . identifier[connections] ): identifier[c2] = identifier[other] . identifier[connections] . identifier[get] ( identifier[k1] ) keyword[if] identifier[c2] keyword[is] keyword[None] : identifier[disjoint_connections] += literal[int] keyword[else] : identifier[connection_distance] += identifier[c1] . identifier[distance] ( identifier[c2] , identifier[config] ) identifier[max_conn] = identifier[max] ( identifier[len] ( identifier[self] . identifier[connections] ), identifier[len] ( identifier[other] . identifier[connections] )) identifier[connection_distance] =( identifier[connection_distance] + identifier[config] . identifier[compatibility_disjoint_coefficient] * identifier[disjoint_connections] )/ identifier[max_conn] identifier[distance] = identifier[node_distance] + identifier[connection_distance] keyword[return] identifier[distance]
def distance(self, other, config): """ Returns the genetic distance between this genome and the other. This distance value is used to compute genome compatibility for speciation. """ # Compute node gene distance component. node_distance = 0.0 if self.nodes or other.nodes: disjoint_nodes = 0 for k2 in iterkeys(other.nodes): if k2 not in self.nodes: disjoint_nodes += 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['k2']] for (k1, n1) in iteritems(self.nodes): n2 = other.nodes.get(k1) if n2 is None: disjoint_nodes += 1 # depends on [control=['if'], data=[]] else: # Homologous genes compute their own distance value. node_distance += n1.distance(n2, config) # depends on [control=['for'], data=[]] max_nodes = max(len(self.nodes), len(other.nodes)) node_distance = (node_distance + config.compatibility_disjoint_coefficient * disjoint_nodes) / max_nodes # depends on [control=['if'], data=[]] # Compute connection gene differences. connection_distance = 0.0 if self.connections or other.connections: disjoint_connections = 0 for k2 in iterkeys(other.connections): if k2 not in self.connections: disjoint_connections += 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['k2']] for (k1, c1) in iteritems(self.connections): c2 = other.connections.get(k1) if c2 is None: disjoint_connections += 1 # depends on [control=['if'], data=[]] else: # Homologous genes compute their own distance value. connection_distance += c1.distance(c2, config) # depends on [control=['for'], data=[]] max_conn = max(len(self.connections), len(other.connections)) connection_distance = (connection_distance + config.compatibility_disjoint_coefficient * disjoint_connections) / max_conn # depends on [control=['if'], data=[]] distance = node_distance + connection_distance return distance
def check_resource(resource): ''' Check a resource availability against a linkchecker backend The linkchecker used can be configured on a resource basis by setting the `resource.extras['check:checker']` attribute with a key that points to a valid `udata.linkcheckers` entrypoint. If not set, it will fallback on the default linkchecker defined by the configuration variable `LINKCHECKING_DEFAULT_LINKCHECKER`. Returns ------- dict or (dict, int) Check results dict and status code (if error). ''' linkchecker_type = resource.extras.get('check:checker') LinkChecker = get_linkchecker(linkchecker_type) if not LinkChecker: return {'error': 'No linkchecker configured.'}, 503 if is_ignored(resource): return dummy_check_response() result = LinkChecker().check(resource) if not result: return {'error': 'No response from linkchecker'}, 503 elif result.get('check:error'): return {'error': result['check:error']}, 500 elif not result.get('check:status'): return {'error': 'No status in response from linkchecker'}, 503 # store the check result in the resource's extras # XXX maybe this logic should be in the `Resource` model? previous_status = resource.extras.get('check:available') check_keys = _get_check_keys(result, resource, previous_status) resource.extras.update(check_keys) resource.save(signal_kwargs={'ignores': ['post_save']}) # Prevent signal triggering on dataset return result
def function[check_resource, parameter[resource]]: constant[ Check a resource availability against a linkchecker backend The linkchecker used can be configured on a resource basis by setting the `resource.extras['check:checker']` attribute with a key that points to a valid `udata.linkcheckers` entrypoint. If not set, it will fallback on the default linkchecker defined by the configuration variable `LINKCHECKING_DEFAULT_LINKCHECKER`. Returns ------- dict or (dict, int) Check results dict and status code (if error). ] variable[linkchecker_type] assign[=] call[name[resource].extras.get, parameter[constant[check:checker]]] variable[LinkChecker] assign[=] call[name[get_linkchecker], parameter[name[linkchecker_type]]] if <ast.UnaryOp object at 0x7da18f09cd00> begin[:] return[tuple[[<ast.Dict object at 0x7da18f09f520>, <ast.Constant object at 0x7da18f09c250>]]] if call[name[is_ignored], parameter[name[resource]]] begin[:] return[call[name[dummy_check_response], parameter[]]] variable[result] assign[=] call[call[name[LinkChecker], parameter[]].check, parameter[name[resource]]] if <ast.UnaryOp object at 0x7da18f09fd60> begin[:] return[tuple[[<ast.Dict object at 0x7da18f09ea10>, <ast.Constant object at 0x7da18f09d270>]]] variable[previous_status] assign[=] call[name[resource].extras.get, parameter[constant[check:available]]] variable[check_keys] assign[=] call[name[_get_check_keys], parameter[name[result], name[resource], name[previous_status]]] call[name[resource].extras.update, parameter[name[check_keys]]] call[name[resource].save, parameter[]] return[name[result]]
keyword[def] identifier[check_resource] ( identifier[resource] ): literal[string] identifier[linkchecker_type] = identifier[resource] . identifier[extras] . identifier[get] ( literal[string] ) identifier[LinkChecker] = identifier[get_linkchecker] ( identifier[linkchecker_type] ) keyword[if] keyword[not] identifier[LinkChecker] : keyword[return] { literal[string] : literal[string] }, literal[int] keyword[if] identifier[is_ignored] ( identifier[resource] ): keyword[return] identifier[dummy_check_response] () identifier[result] = identifier[LinkChecker] (). identifier[check] ( identifier[resource] ) keyword[if] keyword[not] identifier[result] : keyword[return] { literal[string] : literal[string] }, literal[int] keyword[elif] identifier[result] . identifier[get] ( literal[string] ): keyword[return] { literal[string] : identifier[result] [ literal[string] ]}, literal[int] keyword[elif] keyword[not] identifier[result] . identifier[get] ( literal[string] ): keyword[return] { literal[string] : literal[string] }, literal[int] identifier[previous_status] = identifier[resource] . identifier[extras] . identifier[get] ( literal[string] ) identifier[check_keys] = identifier[_get_check_keys] ( identifier[result] , identifier[resource] , identifier[previous_status] ) identifier[resource] . identifier[extras] . identifier[update] ( identifier[check_keys] ) identifier[resource] . identifier[save] ( identifier[signal_kwargs] ={ literal[string] :[ literal[string] ]}) keyword[return] identifier[result]
def check_resource(resource): """ Check a resource availability against a linkchecker backend The linkchecker used can be configured on a resource basis by setting the `resource.extras['check:checker']` attribute with a key that points to a valid `udata.linkcheckers` entrypoint. If not set, it will fallback on the default linkchecker defined by the configuration variable `LINKCHECKING_DEFAULT_LINKCHECKER`. Returns ------- dict or (dict, int) Check results dict and status code (if error). """ linkchecker_type = resource.extras.get('check:checker') LinkChecker = get_linkchecker(linkchecker_type) if not LinkChecker: return ({'error': 'No linkchecker configured.'}, 503) # depends on [control=['if'], data=[]] if is_ignored(resource): return dummy_check_response() # depends on [control=['if'], data=[]] result = LinkChecker().check(resource) if not result: return ({'error': 'No response from linkchecker'}, 503) # depends on [control=['if'], data=[]] elif result.get('check:error'): return ({'error': result['check:error']}, 500) # depends on [control=['if'], data=[]] elif not result.get('check:status'): return ({'error': 'No status in response from linkchecker'}, 503) # depends on [control=['if'], data=[]] # store the check result in the resource's extras # XXX maybe this logic should be in the `Resource` model? previous_status = resource.extras.get('check:available') check_keys = _get_check_keys(result, resource, previous_status) resource.extras.update(check_keys) resource.save(signal_kwargs={'ignores': ['post_save']}) # Prevent signal triggering on dataset return result
def set_pulse_duration(self, duration): """ Sets the pulse duration for events in miliseconds when activate_line is called """ if duration > 4294967295: raise ValueError('Duration is too long. Please choose a value ' 'less than 4294967296.') big_endian = hex(duration)[2:] if len(big_endian) % 2 != 0: big_endian = '0'+big_endian little_endian = [] for i in range(0, len(big_endian), 2): little_endian.insert(0, big_endian[i:i+2]) for i in range(0, 4-len(little_endian)): little_endian.append('00') command = 'mp' for i in little_endian: command += chr(int(i, 16)) self.con.send_xid_command(command, 0)
def function[set_pulse_duration, parameter[self, duration]]: constant[ Sets the pulse duration for events in miliseconds when activate_line is called ] if compare[name[duration] greater[>] constant[4294967295]] begin[:] <ast.Raise object at 0x7da20e9b1c60> variable[big_endian] assign[=] call[call[name[hex], parameter[name[duration]]]][<ast.Slice object at 0x7da20e9b25f0>] if compare[binary_operation[call[name[len], parameter[name[big_endian]]] <ast.Mod object at 0x7da2590d6920> constant[2]] not_equal[!=] constant[0]] begin[:] variable[big_endian] assign[=] binary_operation[constant[0] + name[big_endian]] variable[little_endian] assign[=] list[[]] for taget[name[i]] in starred[call[name[range], parameter[constant[0], call[name[len], parameter[name[big_endian]]], constant[2]]]] begin[:] call[name[little_endian].insert, parameter[constant[0], call[name[big_endian]][<ast.Slice object at 0x7da20e9b3220>]]] for taget[name[i]] in starred[call[name[range], parameter[constant[0], binary_operation[constant[4] - call[name[len], parameter[name[little_endian]]]]]]] begin[:] call[name[little_endian].append, parameter[constant[00]]] variable[command] assign[=] constant[mp] for taget[name[i]] in starred[name[little_endian]] begin[:] <ast.AugAssign object at 0x7da20e9b2b90> call[name[self].con.send_xid_command, parameter[name[command], constant[0]]]
keyword[def] identifier[set_pulse_duration] ( identifier[self] , identifier[duration] ): literal[string] keyword[if] identifier[duration] > literal[int] : keyword[raise] identifier[ValueError] ( literal[string] literal[string] ) identifier[big_endian] = identifier[hex] ( identifier[duration] )[ literal[int] :] keyword[if] identifier[len] ( identifier[big_endian] )% literal[int] != literal[int] : identifier[big_endian] = literal[string] + identifier[big_endian] identifier[little_endian] =[] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[big_endian] ), literal[int] ): identifier[little_endian] . identifier[insert] ( literal[int] , identifier[big_endian] [ identifier[i] : identifier[i] + literal[int] ]) keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , literal[int] - identifier[len] ( identifier[little_endian] )): identifier[little_endian] . identifier[append] ( literal[string] ) identifier[command] = literal[string] keyword[for] identifier[i] keyword[in] identifier[little_endian] : identifier[command] += identifier[chr] ( identifier[int] ( identifier[i] , literal[int] )) identifier[self] . identifier[con] . identifier[send_xid_command] ( identifier[command] , literal[int] )
def set_pulse_duration(self, duration): """ Sets the pulse duration for events in miliseconds when activate_line is called """ if duration > 4294967295: raise ValueError('Duration is too long. Please choose a value less than 4294967296.') # depends on [control=['if'], data=[]] big_endian = hex(duration)[2:] if len(big_endian) % 2 != 0: big_endian = '0' + big_endian # depends on [control=['if'], data=[]] little_endian = [] for i in range(0, len(big_endian), 2): little_endian.insert(0, big_endian[i:i + 2]) # depends on [control=['for'], data=['i']] for i in range(0, 4 - len(little_endian)): little_endian.append('00') # depends on [control=['for'], data=[]] command = 'mp' for i in little_endian: command += chr(int(i, 16)) # depends on [control=['for'], data=['i']] self.con.send_xid_command(command, 0)
def IPT_to_XYZ(cobj, *args, **kwargs): """ Converts IPT to XYZ. """ ipt_values = numpy.array(cobj.get_value_tuple()) lms_values = numpy.dot( numpy.linalg.inv(IPTColor.conversion_matrices['lms_to_ipt']), ipt_values) lms_prime = numpy.sign(lms_values) * numpy.abs(lms_values) ** (1 / 0.43) xyz_values = numpy.dot( numpy.linalg.inv(IPTColor.conversion_matrices['xyz_to_lms']), lms_prime) return XYZColor(*xyz_values, observer='2', illuminant='d65')
def function[IPT_to_XYZ, parameter[cobj]]: constant[ Converts IPT to XYZ. ] variable[ipt_values] assign[=] call[name[numpy].array, parameter[call[name[cobj].get_value_tuple, parameter[]]]] variable[lms_values] assign[=] call[name[numpy].dot, parameter[call[name[numpy].linalg.inv, parameter[call[name[IPTColor].conversion_matrices][constant[lms_to_ipt]]]], name[ipt_values]]] variable[lms_prime] assign[=] binary_operation[call[name[numpy].sign, parameter[name[lms_values]]] * binary_operation[call[name[numpy].abs, parameter[name[lms_values]]] ** binary_operation[constant[1] / constant[0.43]]]] variable[xyz_values] assign[=] call[name[numpy].dot, parameter[call[name[numpy].linalg.inv, parameter[call[name[IPTColor].conversion_matrices][constant[xyz_to_lms]]]], name[lms_prime]]] return[call[name[XYZColor], parameter[<ast.Starred object at 0x7da207f9b550>]]]
keyword[def] identifier[IPT_to_XYZ] ( identifier[cobj] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[ipt_values] = identifier[numpy] . identifier[array] ( identifier[cobj] . identifier[get_value_tuple] ()) identifier[lms_values] = identifier[numpy] . identifier[dot] ( identifier[numpy] . identifier[linalg] . identifier[inv] ( identifier[IPTColor] . identifier[conversion_matrices] [ literal[string] ]), identifier[ipt_values] ) identifier[lms_prime] = identifier[numpy] . identifier[sign] ( identifier[lms_values] )* identifier[numpy] . identifier[abs] ( identifier[lms_values] )**( literal[int] / literal[int] ) identifier[xyz_values] = identifier[numpy] . identifier[dot] ( identifier[numpy] . identifier[linalg] . identifier[inv] ( identifier[IPTColor] . identifier[conversion_matrices] [ literal[string] ]), identifier[lms_prime] ) keyword[return] identifier[XYZColor] (* identifier[xyz_values] , identifier[observer] = literal[string] , identifier[illuminant] = literal[string] )
def IPT_to_XYZ(cobj, *args, **kwargs): """ Converts IPT to XYZ. """ ipt_values = numpy.array(cobj.get_value_tuple()) lms_values = numpy.dot(numpy.linalg.inv(IPTColor.conversion_matrices['lms_to_ipt']), ipt_values) lms_prime = numpy.sign(lms_values) * numpy.abs(lms_values) ** (1 / 0.43) xyz_values = numpy.dot(numpy.linalg.inv(IPTColor.conversion_matrices['xyz_to_lms']), lms_prime) return XYZColor(*xyz_values, observer='2', illuminant='d65')
def _get_config_dirs(self): """Return a list of directories where config files may be located. following directories are returned:: ./ ../etc ~/ /etc/zvmsdk/ """ _cwd = os.path.split(os.path.abspath(__file__))[0] _pdir = os.path.split(_cwd)[0] _etcdir = ''.join((_pdir, '/', 'etc/')) cfg_dirs = [ self._fixpath(_cwd), self._fixpath('/etc/zvmsdk/'), self._fixpath('/etc/'), self._fixpath('~'), self._fixpath(_etcdir), ] return [x for x in cfg_dirs if x]
def function[_get_config_dirs, parameter[self]]: constant[Return a list of directories where config files may be located. following directories are returned:: ./ ../etc ~/ /etc/zvmsdk/ ] variable[_cwd] assign[=] call[call[name[os].path.split, parameter[call[name[os].path.abspath, parameter[name[__file__]]]]]][constant[0]] variable[_pdir] assign[=] call[call[name[os].path.split, parameter[name[_cwd]]]][constant[0]] variable[_etcdir] assign[=] call[constant[].join, parameter[tuple[[<ast.Name object at 0x7da20e749ab0>, <ast.Constant object at 0x7da20e74ada0>, <ast.Constant object at 0x7da20e7494b0>]]]] variable[cfg_dirs] assign[=] list[[<ast.Call object at 0x7da20e74b3d0>, <ast.Call object at 0x7da20e74beb0>, <ast.Call object at 0x7da20e74a890>, <ast.Call object at 0x7da20e74b0a0>, <ast.Call object at 0x7da20c6c6770>]] return[<ast.ListComp object at 0x7da20c6c5ff0>]
keyword[def] identifier[_get_config_dirs] ( identifier[self] ): literal[string] identifier[_cwd] = identifier[os] . identifier[path] . identifier[split] ( identifier[os] . identifier[path] . identifier[abspath] ( identifier[__file__] ))[ literal[int] ] identifier[_pdir] = identifier[os] . identifier[path] . identifier[split] ( identifier[_cwd] )[ literal[int] ] identifier[_etcdir] = literal[string] . identifier[join] (( identifier[_pdir] , literal[string] , literal[string] )) identifier[cfg_dirs] =[ identifier[self] . identifier[_fixpath] ( identifier[_cwd] ), identifier[self] . identifier[_fixpath] ( literal[string] ), identifier[self] . identifier[_fixpath] ( literal[string] ), identifier[self] . identifier[_fixpath] ( literal[string] ), identifier[self] . identifier[_fixpath] ( identifier[_etcdir] ), ] keyword[return] [ identifier[x] keyword[for] identifier[x] keyword[in] identifier[cfg_dirs] keyword[if] identifier[x] ]
def _get_config_dirs(self): """Return a list of directories where config files may be located. following directories are returned:: ./ ../etc ~/ /etc/zvmsdk/ """ _cwd = os.path.split(os.path.abspath(__file__))[0] _pdir = os.path.split(_cwd)[0] _etcdir = ''.join((_pdir, '/', 'etc/')) cfg_dirs = [self._fixpath(_cwd), self._fixpath('/etc/zvmsdk/'), self._fixpath('/etc/'), self._fixpath('~'), self._fixpath(_etcdir)] return [x for x in cfg_dirs if x]
def shutdown(self): """ Stop broker instance. Closes all connected session, stop listening on network socket and free resources. """ try: self._sessions = dict() self._subscriptions = dict() self._retained_messages = dict() self.transitions.shutdown() except (MachineError, ValueError) as exc: # Backwards compat: MachineError is raised by transitions < 0.5.0. self.logger.debug("Invalid method call at this moment: %s" % exc) raise BrokerException("Broker instance can't be stopped: %s" % exc) # Fire broker_shutdown event to plugins yield from self.plugins_manager.fire_event(EVENT_BROKER_PRE_SHUTDOWN) # Stop broadcast loop if self._broadcast_task: self._broadcast_task.cancel() if self._broadcast_queue.qsize() > 0: self.logger.warning("%d messages not broadcasted" % self._broadcast_queue.qsize()) for listener_name in self._servers: server = self._servers[listener_name] yield from server.close_instance() self.logger.debug("Broker closing") self.logger.info("Broker closed") yield from self.plugins_manager.fire_event(EVENT_BROKER_POST_SHUTDOWN) self.transitions.stopping_success()
def function[shutdown, parameter[self]]: constant[ Stop broker instance. Closes all connected session, stop listening on network socket and free resources. ] <ast.Try object at 0x7da18fe90760> <ast.YieldFrom object at 0x7da18fe90f70> if name[self]._broadcast_task begin[:] call[name[self]._broadcast_task.cancel, parameter[]] if compare[call[name[self]._broadcast_queue.qsize, parameter[]] greater[>] constant[0]] begin[:] call[name[self].logger.warning, parameter[binary_operation[constant[%d messages not broadcasted] <ast.Mod object at 0x7da2590d6920> call[name[self]._broadcast_queue.qsize, parameter[]]]]] for taget[name[listener_name]] in starred[name[self]._servers] begin[:] variable[server] assign[=] call[name[self]._servers][name[listener_name]] <ast.YieldFrom object at 0x7da18fe92a10> call[name[self].logger.debug, parameter[constant[Broker closing]]] call[name[self].logger.info, parameter[constant[Broker closed]]] <ast.YieldFrom object at 0x7da18fe92e90> call[name[self].transitions.stopping_success, parameter[]]
keyword[def] identifier[shutdown] ( identifier[self] ): literal[string] keyword[try] : identifier[self] . identifier[_sessions] = identifier[dict] () identifier[self] . identifier[_subscriptions] = identifier[dict] () identifier[self] . identifier[_retained_messages] = identifier[dict] () identifier[self] . identifier[transitions] . identifier[shutdown] () keyword[except] ( identifier[MachineError] , identifier[ValueError] ) keyword[as] identifier[exc] : identifier[self] . identifier[logger] . identifier[debug] ( literal[string] % identifier[exc] ) keyword[raise] identifier[BrokerException] ( literal[string] % identifier[exc] ) keyword[yield] keyword[from] identifier[self] . identifier[plugins_manager] . identifier[fire_event] ( identifier[EVENT_BROKER_PRE_SHUTDOWN] ) keyword[if] identifier[self] . identifier[_broadcast_task] : identifier[self] . identifier[_broadcast_task] . identifier[cancel] () keyword[if] identifier[self] . identifier[_broadcast_queue] . identifier[qsize] ()> literal[int] : identifier[self] . identifier[logger] . identifier[warning] ( literal[string] % identifier[self] . identifier[_broadcast_queue] . identifier[qsize] ()) keyword[for] identifier[listener_name] keyword[in] identifier[self] . identifier[_servers] : identifier[server] = identifier[self] . identifier[_servers] [ identifier[listener_name] ] keyword[yield] keyword[from] identifier[server] . identifier[close_instance] () identifier[self] . identifier[logger] . identifier[debug] ( literal[string] ) identifier[self] . identifier[logger] . identifier[info] ( literal[string] ) keyword[yield] keyword[from] identifier[self] . identifier[plugins_manager] . identifier[fire_event] ( identifier[EVENT_BROKER_POST_SHUTDOWN] ) identifier[self] . identifier[transitions] . identifier[stopping_success] ()
def shutdown(self): """ Stop broker instance. Closes all connected session, stop listening on network socket and free resources. """ try: self._sessions = dict() self._subscriptions = dict() self._retained_messages = dict() self.transitions.shutdown() # depends on [control=['try'], data=[]] except (MachineError, ValueError) as exc: # Backwards compat: MachineError is raised by transitions < 0.5.0. self.logger.debug('Invalid method call at this moment: %s' % exc) raise BrokerException("Broker instance can't be stopped: %s" % exc) # depends on [control=['except'], data=['exc']] # Fire broker_shutdown event to plugins yield from self.plugins_manager.fire_event(EVENT_BROKER_PRE_SHUTDOWN) # Stop broadcast loop if self._broadcast_task: self._broadcast_task.cancel() # depends on [control=['if'], data=[]] if self._broadcast_queue.qsize() > 0: self.logger.warning('%d messages not broadcasted' % self._broadcast_queue.qsize()) # depends on [control=['if'], data=[]] for listener_name in self._servers: server = self._servers[listener_name] yield from server.close_instance() # depends on [control=['for'], data=['listener_name']] self.logger.debug('Broker closing') self.logger.info('Broker closed') yield from self.plugins_manager.fire_event(EVENT_BROKER_POST_SHUTDOWN) self.transitions.stopping_success()