code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def fit_shifts(xy, uv): """ Performs a simple fit for the shift only between matched lists of positions 'xy' and 'uv'. Output: (same as for fit_arrays) ================================= DEVELOPMENT NOTE: Checks need to be put in place to verify that enough objects are available for a fit. ================================= """ diff_pts = xy - uv Pcoeffs = np.array([1.0,0.0,diff_pts[:,0].mean(dtype=np.float64)]) Qcoeffs = np.array([0.0,1.0,diff_pts[:,1].mean(dtype=np.float64)]) fit = build_fit(Pcoeffs, Qcoeffs, 'shift') resids = diff_pts - fit['offset'] fit['resids'] = resids fit['rms'] = resids.std(axis=0) fit['rmse'] = float(np.sqrt(np.mean(2 * resids**2))) fit['mae'] = float(np.mean(np.linalg.norm(resids, axis=1))) return fit
def function[fit_shifts, parameter[xy, uv]]: constant[ Performs a simple fit for the shift only between matched lists of positions 'xy' and 'uv'. Output: (same as for fit_arrays) ================================= DEVELOPMENT NOTE: Checks need to be put in place to verify that enough objects are available for a fit. ================================= ] variable[diff_pts] assign[=] binary_operation[name[xy] - name[uv]] variable[Pcoeffs] assign[=] call[name[np].array, parameter[list[[<ast.Constant object at 0x7da1b1baeec0>, <ast.Constant object at 0x7da1b1bac0a0>, <ast.Call object at 0x7da1b1bac610>]]]] variable[Qcoeffs] assign[=] call[name[np].array, parameter[list[[<ast.Constant object at 0x7da1b1bacf70>, <ast.Constant object at 0x7da1b1baef80>, <ast.Call object at 0x7da1b1baca00>]]]] variable[fit] assign[=] call[name[build_fit], parameter[name[Pcoeffs], name[Qcoeffs], constant[shift]]] variable[resids] assign[=] binary_operation[name[diff_pts] - call[name[fit]][constant[offset]]] call[name[fit]][constant[resids]] assign[=] name[resids] call[name[fit]][constant[rms]] assign[=] call[name[resids].std, parameter[]] call[name[fit]][constant[rmse]] assign[=] call[name[float], parameter[call[name[np].sqrt, parameter[call[name[np].mean, parameter[binary_operation[constant[2] * binary_operation[name[resids] ** constant[2]]]]]]]]] call[name[fit]][constant[mae]] assign[=] call[name[float], parameter[call[name[np].mean, parameter[call[name[np].linalg.norm, parameter[name[resids]]]]]]] return[name[fit]]
keyword[def] identifier[fit_shifts] ( identifier[xy] , identifier[uv] ): literal[string] identifier[diff_pts] = identifier[xy] - identifier[uv] identifier[Pcoeffs] = identifier[np] . identifier[array] ([ literal[int] , literal[int] , identifier[diff_pts] [:, literal[int] ]. identifier[mean] ( identifier[dtype] = identifier[np] . identifier[float64] )]) identifier[Qcoeffs] = identifier[np] . identifier[array] ([ literal[int] , literal[int] , identifier[diff_pts] [:, literal[int] ]. identifier[mean] ( identifier[dtype] = identifier[np] . identifier[float64] )]) identifier[fit] = identifier[build_fit] ( identifier[Pcoeffs] , identifier[Qcoeffs] , literal[string] ) identifier[resids] = identifier[diff_pts] - identifier[fit] [ literal[string] ] identifier[fit] [ literal[string] ]= identifier[resids] identifier[fit] [ literal[string] ]= identifier[resids] . identifier[std] ( identifier[axis] = literal[int] ) identifier[fit] [ literal[string] ]= identifier[float] ( identifier[np] . identifier[sqrt] ( identifier[np] . identifier[mean] ( literal[int] * identifier[resids] ** literal[int] ))) identifier[fit] [ literal[string] ]= identifier[float] ( identifier[np] . identifier[mean] ( identifier[np] . identifier[linalg] . identifier[norm] ( identifier[resids] , identifier[axis] = literal[int] ))) keyword[return] identifier[fit]
def fit_shifts(xy, uv): """ Performs a simple fit for the shift only between matched lists of positions 'xy' and 'uv'. Output: (same as for fit_arrays) ================================= DEVELOPMENT NOTE: Checks need to be put in place to verify that enough objects are available for a fit. ================================= """ diff_pts = xy - uv Pcoeffs = np.array([1.0, 0.0, diff_pts[:, 0].mean(dtype=np.float64)]) Qcoeffs = np.array([0.0, 1.0, diff_pts[:, 1].mean(dtype=np.float64)]) fit = build_fit(Pcoeffs, Qcoeffs, 'shift') resids = diff_pts - fit['offset'] fit['resids'] = resids fit['rms'] = resids.std(axis=0) fit['rmse'] = float(np.sqrt(np.mean(2 * resids ** 2))) fit['mae'] = float(np.mean(np.linalg.norm(resids, axis=1))) return fit
def _started_channels(self): """Reimplemented to make a history request and load %guiref.""" super(IPythonWidget, self)._started_channels() self._load_guiref_magic() self.kernel_manager.shell_channel.history(hist_access_type='tail', n=1000)
def function[_started_channels, parameter[self]]: constant[Reimplemented to make a history request and load %guiref.] call[call[name[super], parameter[name[IPythonWidget], name[self]]]._started_channels, parameter[]] call[name[self]._load_guiref_magic, parameter[]] call[name[self].kernel_manager.shell_channel.history, parameter[]]
keyword[def] identifier[_started_channels] ( identifier[self] ): literal[string] identifier[super] ( identifier[IPythonWidget] , identifier[self] ). identifier[_started_channels] () identifier[self] . identifier[_load_guiref_magic] () identifier[self] . identifier[kernel_manager] . identifier[shell_channel] . identifier[history] ( identifier[hist_access_type] = literal[string] , identifier[n] = literal[int] )
def _started_channels(self): """Reimplemented to make a history request and load %guiref.""" super(IPythonWidget, self)._started_channels() self._load_guiref_magic() self.kernel_manager.shell_channel.history(hist_access_type='tail', n=1000)
def show_firmware_version_output_show_firmware_version_firmware_full_version(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_firmware_version = ET.Element("show_firmware_version") config = show_firmware_version output = ET.SubElement(show_firmware_version, "output") show_firmware_version = ET.SubElement(output, "show-firmware-version") firmware_full_version = ET.SubElement(show_firmware_version, "firmware-full-version") firmware_full_version.text = kwargs.pop('firmware_full_version') callback = kwargs.pop('callback', self._callback) return callback(config)
def function[show_firmware_version_output_show_firmware_version_firmware_full_version, parameter[self]]: constant[Auto Generated Code ] variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]] variable[show_firmware_version] assign[=] call[name[ET].Element, parameter[constant[show_firmware_version]]] variable[config] assign[=] name[show_firmware_version] variable[output] assign[=] call[name[ET].SubElement, parameter[name[show_firmware_version], constant[output]]] variable[show_firmware_version] assign[=] call[name[ET].SubElement, parameter[name[output], constant[show-firmware-version]]] variable[firmware_full_version] assign[=] call[name[ET].SubElement, parameter[name[show_firmware_version], constant[firmware-full-version]]] name[firmware_full_version].text assign[=] call[name[kwargs].pop, parameter[constant[firmware_full_version]]] variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]] return[call[name[callback], parameter[name[config]]]]
keyword[def] identifier[show_firmware_version_output_show_firmware_version_firmware_full_version] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[config] = identifier[ET] . identifier[Element] ( literal[string] ) identifier[show_firmware_version] = identifier[ET] . identifier[Element] ( literal[string] ) identifier[config] = identifier[show_firmware_version] identifier[output] = identifier[ET] . identifier[SubElement] ( identifier[show_firmware_version] , literal[string] ) identifier[show_firmware_version] = identifier[ET] . identifier[SubElement] ( identifier[output] , literal[string] ) identifier[firmware_full_version] = identifier[ET] . identifier[SubElement] ( identifier[show_firmware_version] , literal[string] ) identifier[firmware_full_version] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] ) keyword[return] identifier[callback] ( identifier[config] )
def show_firmware_version_output_show_firmware_version_firmware_full_version(self, **kwargs): """Auto Generated Code """ config = ET.Element('config') show_firmware_version = ET.Element('show_firmware_version') config = show_firmware_version output = ET.SubElement(show_firmware_version, 'output') show_firmware_version = ET.SubElement(output, 'show-firmware-version') firmware_full_version = ET.SubElement(show_firmware_version, 'firmware-full-version') firmware_full_version.text = kwargs.pop('firmware_full_version') callback = kwargs.pop('callback', self._callback) return callback(config)
def compute_hash_speed(num, quiet=False): # type: (int, bool) -> float """ Hash time. """ namelist = NameList(num) os_fd, tmpfile_name = tempfile.mkstemp(text=True) schema = NameList.SCHEMA header_row = ','.join([f.identifier for f in schema.fields]) with open(tmpfile_name, 'wt') as f: f.write(header_row) f.write('\n') for person in namelist.names: print(','.join([str(field) for field in person]), file=f) with open(tmpfile_name, 'rt') as f: start = timer() generate_clk_from_csv(f, ('key1', 'key2'), schema, progress_bar=not quiet) end = timer() os.close(os_fd) os.remove(tmpfile_name) elapsed_time = end - start if not quiet: print("{:6d} hashes in {:.6f} seconds. {:.2f} KH/s".format(num, elapsed_time, num / (1000 * elapsed_time))) return num / elapsed_time
def function[compute_hash_speed, parameter[num, quiet]]: constant[ Hash time. ] variable[namelist] assign[=] call[name[NameList], parameter[name[num]]] <ast.Tuple object at 0x7da18ede6830> assign[=] call[name[tempfile].mkstemp, parameter[]] variable[schema] assign[=] name[NameList].SCHEMA variable[header_row] assign[=] call[constant[,].join, parameter[<ast.ListComp object at 0x7da2041da980>]] with call[name[open], parameter[name[tmpfile_name], constant[wt]]] begin[:] call[name[f].write, parameter[name[header_row]]] call[name[f].write, parameter[constant[ ]]] for taget[name[person]] in starred[name[namelist].names] begin[:] call[name[print], parameter[call[constant[,].join, parameter[<ast.ListComp object at 0x7da1b196ef80>]]]] with call[name[open], parameter[name[tmpfile_name], constant[rt]]] begin[:] variable[start] assign[=] call[name[timer], parameter[]] call[name[generate_clk_from_csv], parameter[name[f], tuple[[<ast.Constant object at 0x7da1b196d9f0>, <ast.Constant object at 0x7da1b196dc90>]], name[schema]]] variable[end] assign[=] call[name[timer], parameter[]] call[name[os].close, parameter[name[os_fd]]] call[name[os].remove, parameter[name[tmpfile_name]]] variable[elapsed_time] assign[=] binary_operation[name[end] - name[start]] if <ast.UnaryOp object at 0x7da18ede43a0> begin[:] call[name[print], parameter[call[constant[{:6d} hashes in {:.6f} seconds. {:.2f} KH/s].format, parameter[name[num], name[elapsed_time], binary_operation[name[num] / binary_operation[constant[1000] * name[elapsed_time]]]]]]] return[binary_operation[name[num] / name[elapsed_time]]]
keyword[def] identifier[compute_hash_speed] ( identifier[num] , identifier[quiet] = keyword[False] ): literal[string] identifier[namelist] = identifier[NameList] ( identifier[num] ) identifier[os_fd] , identifier[tmpfile_name] = identifier[tempfile] . identifier[mkstemp] ( identifier[text] = keyword[True] ) identifier[schema] = identifier[NameList] . identifier[SCHEMA] identifier[header_row] = literal[string] . identifier[join] ([ identifier[f] . identifier[identifier] keyword[for] identifier[f] keyword[in] identifier[schema] . identifier[fields] ]) keyword[with] identifier[open] ( identifier[tmpfile_name] , literal[string] ) keyword[as] identifier[f] : identifier[f] . identifier[write] ( identifier[header_row] ) identifier[f] . identifier[write] ( literal[string] ) keyword[for] identifier[person] keyword[in] identifier[namelist] . identifier[names] : identifier[print] ( literal[string] . identifier[join] ([ identifier[str] ( identifier[field] ) keyword[for] identifier[field] keyword[in] identifier[person] ]), identifier[file] = identifier[f] ) keyword[with] identifier[open] ( identifier[tmpfile_name] , literal[string] ) keyword[as] identifier[f] : identifier[start] = identifier[timer] () identifier[generate_clk_from_csv] ( identifier[f] ,( literal[string] , literal[string] ), identifier[schema] , identifier[progress_bar] = keyword[not] identifier[quiet] ) identifier[end] = identifier[timer] () identifier[os] . identifier[close] ( identifier[os_fd] ) identifier[os] . identifier[remove] ( identifier[tmpfile_name] ) identifier[elapsed_time] = identifier[end] - identifier[start] keyword[if] keyword[not] identifier[quiet] : identifier[print] ( literal[string] . identifier[format] ( identifier[num] , identifier[elapsed_time] , identifier[num] /( literal[int] * identifier[elapsed_time] ))) keyword[return] identifier[num] / identifier[elapsed_time]
def compute_hash_speed(num, quiet=False): # type: (int, bool) -> float ' Hash time.\n ' namelist = NameList(num) (os_fd, tmpfile_name) = tempfile.mkstemp(text=True) schema = NameList.SCHEMA header_row = ','.join([f.identifier for f in schema.fields]) with open(tmpfile_name, 'wt') as f: f.write(header_row) f.write('\n') for person in namelist.names: print(','.join([str(field) for field in person]), file=f) # depends on [control=['for'], data=['person']] # depends on [control=['with'], data=['f']] with open(tmpfile_name, 'rt') as f: start = timer() generate_clk_from_csv(f, ('key1', 'key2'), schema, progress_bar=not quiet) end = timer() # depends on [control=['with'], data=['f']] os.close(os_fd) os.remove(tmpfile_name) elapsed_time = end - start if not quiet: print('{:6d} hashes in {:.6f} seconds. {:.2f} KH/s'.format(num, elapsed_time, num / (1000 * elapsed_time))) # depends on [control=['if'], data=[]] return num / elapsed_time
def complete(self): """Mark a multipart object as complete.""" if Part.count(self) != self.last_part_number + 1: raise MultipartMissingParts() with db.session.begin_nested(): self.completed = True self.file.readable = True self.file.writable = False return self
def function[complete, parameter[self]]: constant[Mark a multipart object as complete.] if compare[call[name[Part].count, parameter[name[self]]] not_equal[!=] binary_operation[name[self].last_part_number + constant[1]]] begin[:] <ast.Raise object at 0x7da1b19a2f20> with call[name[db].session.begin_nested, parameter[]] begin[:] name[self].completed assign[=] constant[True] name[self].file.readable assign[=] constant[True] name[self].file.writable assign[=] constant[False] return[name[self]]
keyword[def] identifier[complete] ( identifier[self] ): literal[string] keyword[if] identifier[Part] . identifier[count] ( identifier[self] )!= identifier[self] . identifier[last_part_number] + literal[int] : keyword[raise] identifier[MultipartMissingParts] () keyword[with] identifier[db] . identifier[session] . identifier[begin_nested] (): identifier[self] . identifier[completed] = keyword[True] identifier[self] . identifier[file] . identifier[readable] = keyword[True] identifier[self] . identifier[file] . identifier[writable] = keyword[False] keyword[return] identifier[self]
def complete(self): """Mark a multipart object as complete.""" if Part.count(self) != self.last_part_number + 1: raise MultipartMissingParts() # depends on [control=['if'], data=[]] with db.session.begin_nested(): self.completed = True self.file.readable = True self.file.writable = False # depends on [control=['with'], data=[]] return self
def skip_build(self): """Check if build should be skipped """ skip_msg = self.config.get('skip', '[ci skip]') return ( os.environ.get('CODEBUILD_BUILD_SUCCEEDING') == '0' or self.info['current_tag'] or skip_msg in self.info['head']['message'] )
def function[skip_build, parameter[self]]: constant[Check if build should be skipped ] variable[skip_msg] assign[=] call[name[self].config.get, parameter[constant[skip], constant[[ci skip]]]] return[<ast.BoolOp object at 0x7da204566740>]
keyword[def] identifier[skip_build] ( identifier[self] ): literal[string] identifier[skip_msg] = identifier[self] . identifier[config] . identifier[get] ( literal[string] , literal[string] ) keyword[return] ( identifier[os] . identifier[environ] . identifier[get] ( literal[string] )== literal[string] keyword[or] identifier[self] . identifier[info] [ literal[string] ] keyword[or] identifier[skip_msg] keyword[in] identifier[self] . identifier[info] [ literal[string] ][ literal[string] ] )
def skip_build(self): """Check if build should be skipped """ skip_msg = self.config.get('skip', '[ci skip]') return os.environ.get('CODEBUILD_BUILD_SUCCEEDING') == '0' or self.info['current_tag'] or skip_msg in self.info['head']['message']
def _CronJobFromRow(self, row): """Creates a cronjob object from a database result row.""" (job, create_time, enabled, forced_run_requested, last_run_status, last_run_time, current_run_id, state, leased_until, leased_by) = row job = rdf_cronjobs.CronJob.FromSerializedString(job) job.current_run_id = db_utils.IntToCronJobRunID(current_run_id) job.enabled = enabled job.forced_run_requested = forced_run_requested job.last_run_status = last_run_status job.last_run_time = mysql_utils.TimestampToRDFDatetime(last_run_time) if state: job.state = rdf_protodict.AttributedDict.FromSerializedString(state) job.created_at = mysql_utils.TimestampToRDFDatetime(create_time) job.leased_until = mysql_utils.TimestampToRDFDatetime(leased_until) job.leased_by = leased_by return job
def function[_CronJobFromRow, parameter[self, row]]: constant[Creates a cronjob object from a database result row.] <ast.Tuple object at 0x7da1b1d90850> assign[=] name[row] variable[job] assign[=] call[name[rdf_cronjobs].CronJob.FromSerializedString, parameter[name[job]]] name[job].current_run_id assign[=] call[name[db_utils].IntToCronJobRunID, parameter[name[current_run_id]]] name[job].enabled assign[=] name[enabled] name[job].forced_run_requested assign[=] name[forced_run_requested] name[job].last_run_status assign[=] name[last_run_status] name[job].last_run_time assign[=] call[name[mysql_utils].TimestampToRDFDatetime, parameter[name[last_run_time]]] if name[state] begin[:] name[job].state assign[=] call[name[rdf_protodict].AttributedDict.FromSerializedString, parameter[name[state]]] name[job].created_at assign[=] call[name[mysql_utils].TimestampToRDFDatetime, parameter[name[create_time]]] name[job].leased_until assign[=] call[name[mysql_utils].TimestampToRDFDatetime, parameter[name[leased_until]]] name[job].leased_by assign[=] name[leased_by] return[name[job]]
keyword[def] identifier[_CronJobFromRow] ( identifier[self] , identifier[row] ): literal[string] ( identifier[job] , identifier[create_time] , identifier[enabled] , identifier[forced_run_requested] , identifier[last_run_status] , identifier[last_run_time] , identifier[current_run_id] , identifier[state] , identifier[leased_until] , identifier[leased_by] )= identifier[row] identifier[job] = identifier[rdf_cronjobs] . identifier[CronJob] . identifier[FromSerializedString] ( identifier[job] ) identifier[job] . identifier[current_run_id] = identifier[db_utils] . identifier[IntToCronJobRunID] ( identifier[current_run_id] ) identifier[job] . identifier[enabled] = identifier[enabled] identifier[job] . identifier[forced_run_requested] = identifier[forced_run_requested] identifier[job] . identifier[last_run_status] = identifier[last_run_status] identifier[job] . identifier[last_run_time] = identifier[mysql_utils] . identifier[TimestampToRDFDatetime] ( identifier[last_run_time] ) keyword[if] identifier[state] : identifier[job] . identifier[state] = identifier[rdf_protodict] . identifier[AttributedDict] . identifier[FromSerializedString] ( identifier[state] ) identifier[job] . identifier[created_at] = identifier[mysql_utils] . identifier[TimestampToRDFDatetime] ( identifier[create_time] ) identifier[job] . identifier[leased_until] = identifier[mysql_utils] . identifier[TimestampToRDFDatetime] ( identifier[leased_until] ) identifier[job] . identifier[leased_by] = identifier[leased_by] keyword[return] identifier[job]
def _CronJobFromRow(self, row): """Creates a cronjob object from a database result row.""" (job, create_time, enabled, forced_run_requested, last_run_status, last_run_time, current_run_id, state, leased_until, leased_by) = row job = rdf_cronjobs.CronJob.FromSerializedString(job) job.current_run_id = db_utils.IntToCronJobRunID(current_run_id) job.enabled = enabled job.forced_run_requested = forced_run_requested job.last_run_status = last_run_status job.last_run_time = mysql_utils.TimestampToRDFDatetime(last_run_time) if state: job.state = rdf_protodict.AttributedDict.FromSerializedString(state) # depends on [control=['if'], data=[]] job.created_at = mysql_utils.TimestampToRDFDatetime(create_time) job.leased_until = mysql_utils.TimestampToRDFDatetime(leased_until) job.leased_by = leased_by return job
def set_sorting(self, flag): """Enable result sorting after search is complete.""" self.sorting['status'] = flag self.header().setSectionsClickable(flag == ON)
def function[set_sorting, parameter[self, flag]]: constant[Enable result sorting after search is complete.] call[name[self].sorting][constant[status]] assign[=] name[flag] call[call[name[self].header, parameter[]].setSectionsClickable, parameter[compare[name[flag] equal[==] name[ON]]]]
keyword[def] identifier[set_sorting] ( identifier[self] , identifier[flag] ): literal[string] identifier[self] . identifier[sorting] [ literal[string] ]= identifier[flag] identifier[self] . identifier[header] (). identifier[setSectionsClickable] ( identifier[flag] == identifier[ON] )
def set_sorting(self, flag): """Enable result sorting after search is complete.""" self.sorting['status'] = flag self.header().setSectionsClickable(flag == ON)
def read_local_manifest(self): """ Read the file manifest, or create a new one if there isn't one already """ manifest = file_or_default(self.get_full_file_path(self.manifest_file), { 'format_version' : 2, 'root' : '/', 'have_revision' : 'root', 'files' : {}}, json.loads) if 'format_version' not in manifest or manifest['format_version'] < 2: raise SystemExit('Please update the client manifest format') return manifest
def function[read_local_manifest, parameter[self]]: constant[ Read the file manifest, or create a new one if there isn't one already ] variable[manifest] assign[=] call[name[file_or_default], parameter[call[name[self].get_full_file_path, parameter[name[self].manifest_file]], dictionary[[<ast.Constant object at 0x7da18f811e40>, <ast.Constant object at 0x7da18f812530>, <ast.Constant object at 0x7da18f813fd0>, <ast.Constant object at 0x7da18f811a80>], [<ast.Constant object at 0x7da18f810220>, <ast.Constant object at 0x7da18f812ef0>, <ast.Constant object at 0x7da18f8103d0>, <ast.Dict object at 0x7da18f811180>]], name[json].loads]] if <ast.BoolOp object at 0x7da18f811630> begin[:] <ast.Raise object at 0x7da18f8104c0> return[name[manifest]]
keyword[def] identifier[read_local_manifest] ( identifier[self] ): literal[string] identifier[manifest] = identifier[file_or_default] ( identifier[self] . identifier[get_full_file_path] ( identifier[self] . identifier[manifest_file] ),{ literal[string] : literal[int] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] :{}}, identifier[json] . identifier[loads] ) keyword[if] literal[string] keyword[not] keyword[in] identifier[manifest] keyword[or] identifier[manifest] [ literal[string] ]< literal[int] : keyword[raise] identifier[SystemExit] ( literal[string] ) keyword[return] identifier[manifest]
def read_local_manifest(self): """ Read the file manifest, or create a new one if there isn't one already """ manifest = file_or_default(self.get_full_file_path(self.manifest_file), {'format_version': 2, 'root': '/', 'have_revision': 'root', 'files': {}}, json.loads) if 'format_version' not in manifest or manifest['format_version'] < 2: raise SystemExit('Please update the client manifest format') # depends on [control=['if'], data=[]] return manifest
def _to_temperature(self, temperature): """ Step to a given temperature. :param temperature: Get to this temperature. """ self._to_value(self._temperature, temperature, self.command_set.temperature_steps, self._warmer, self._cooler)
def function[_to_temperature, parameter[self, temperature]]: constant[ Step to a given temperature. :param temperature: Get to this temperature. ] call[name[self]._to_value, parameter[name[self]._temperature, name[temperature], name[self].command_set.temperature_steps, name[self]._warmer, name[self]._cooler]]
keyword[def] identifier[_to_temperature] ( identifier[self] , identifier[temperature] ): literal[string] identifier[self] . identifier[_to_value] ( identifier[self] . identifier[_temperature] , identifier[temperature] , identifier[self] . identifier[command_set] . identifier[temperature_steps] , identifier[self] . identifier[_warmer] , identifier[self] . identifier[_cooler] )
def _to_temperature(self, temperature): """ Step to a given temperature. :param temperature: Get to this temperature. """ self._to_value(self._temperature, temperature, self.command_set.temperature_steps, self._warmer, self._cooler)
def value_derived_from_wavefunction(self, state: np.ndarray, qubit_map: Dict[raw_types.Qid, int] ) -> Any: """The value of the display, derived from the full wavefunction. Args: state: The wavefunction. qubit_map: A dictionary from qubit to qubit index in the ordering used to define the wavefunction. """
def function[value_derived_from_wavefunction, parameter[self, state, qubit_map]]: constant[The value of the display, derived from the full wavefunction. Args: state: The wavefunction. qubit_map: A dictionary from qubit to qubit index in the ordering used to define the wavefunction. ]
keyword[def] identifier[value_derived_from_wavefunction] ( identifier[self] , identifier[state] : identifier[np] . identifier[ndarray] , identifier[qubit_map] : identifier[Dict] [ identifier[raw_types] . identifier[Qid] , identifier[int] ] )-> identifier[Any] : literal[string]
def value_derived_from_wavefunction(self, state: np.ndarray, qubit_map: Dict[raw_types.Qid, int]) -> Any: """The value of the display, derived from the full wavefunction. Args: state: The wavefunction. qubit_map: A dictionary from qubit to qubit index in the ordering used to define the wavefunction. """
def main(): """ program entry point """ parser = create_parser() options = vars(parser.parse_args()) HASH_STORE.IGNORE_CACHE_FILE = options[constants.LABEL_FORCE] moban_file = options[constants.LABEL_MOBANFILE] load_engine_factory_and_engines() # Error: jinja2 if removed if moban_file is None: moban_file = mobanfile.find_default_moban_file() if moban_file: try: count = handle_moban_file(moban_file, options) moban_exit(options[constants.LABEL_EXIT_CODE], count) except ( exceptions.DirectoryNotFound, exceptions.NoThirdPartyEngine, exceptions.MobanfileGrammarException, ) as e: reporter.report_error_message(str(e)) moban_exit(options[constants.LABEL_EXIT_CODE], constants.ERROR) else: try: count = handle_command_line(options) moban_exit(options[constants.LABEL_EXIT_CODE], count) except exceptions.NoTemplate as e: reporter.report_error_message(str(e)) moban_exit(options[constants.LABEL_EXIT_CODE], constants.ERROR)
def function[main, parameter[]]: constant[ program entry point ] variable[parser] assign[=] call[name[create_parser], parameter[]] variable[options] assign[=] call[name[vars], parameter[call[name[parser].parse_args, parameter[]]]] name[HASH_STORE].IGNORE_CACHE_FILE assign[=] call[name[options]][name[constants].LABEL_FORCE] variable[moban_file] assign[=] call[name[options]][name[constants].LABEL_MOBANFILE] call[name[load_engine_factory_and_engines], parameter[]] if compare[name[moban_file] is constant[None]] begin[:] variable[moban_file] assign[=] call[name[mobanfile].find_default_moban_file, parameter[]] if name[moban_file] begin[:] <ast.Try object at 0x7da1b0b50e20>
keyword[def] identifier[main] (): literal[string] identifier[parser] = identifier[create_parser] () identifier[options] = identifier[vars] ( identifier[parser] . identifier[parse_args] ()) identifier[HASH_STORE] . identifier[IGNORE_CACHE_FILE] = identifier[options] [ identifier[constants] . identifier[LABEL_FORCE] ] identifier[moban_file] = identifier[options] [ identifier[constants] . identifier[LABEL_MOBANFILE] ] identifier[load_engine_factory_and_engines] () keyword[if] identifier[moban_file] keyword[is] keyword[None] : identifier[moban_file] = identifier[mobanfile] . identifier[find_default_moban_file] () keyword[if] identifier[moban_file] : keyword[try] : identifier[count] = identifier[handle_moban_file] ( identifier[moban_file] , identifier[options] ) identifier[moban_exit] ( identifier[options] [ identifier[constants] . identifier[LABEL_EXIT_CODE] ], identifier[count] ) keyword[except] ( identifier[exceptions] . identifier[DirectoryNotFound] , identifier[exceptions] . identifier[NoThirdPartyEngine] , identifier[exceptions] . identifier[MobanfileGrammarException] , ) keyword[as] identifier[e] : identifier[reporter] . identifier[report_error_message] ( identifier[str] ( identifier[e] )) identifier[moban_exit] ( identifier[options] [ identifier[constants] . identifier[LABEL_EXIT_CODE] ], identifier[constants] . identifier[ERROR] ) keyword[else] : keyword[try] : identifier[count] = identifier[handle_command_line] ( identifier[options] ) identifier[moban_exit] ( identifier[options] [ identifier[constants] . identifier[LABEL_EXIT_CODE] ], identifier[count] ) keyword[except] identifier[exceptions] . identifier[NoTemplate] keyword[as] identifier[e] : identifier[reporter] . identifier[report_error_message] ( identifier[str] ( identifier[e] )) identifier[moban_exit] ( identifier[options] [ identifier[constants] . identifier[LABEL_EXIT_CODE] ], identifier[constants] . identifier[ERROR] )
def main(): """ program entry point """ parser = create_parser() options = vars(parser.parse_args()) HASH_STORE.IGNORE_CACHE_FILE = options[constants.LABEL_FORCE] moban_file = options[constants.LABEL_MOBANFILE] load_engine_factory_and_engines() # Error: jinja2 if removed if moban_file is None: moban_file = mobanfile.find_default_moban_file() # depends on [control=['if'], data=['moban_file']] if moban_file: try: count = handle_moban_file(moban_file, options) moban_exit(options[constants.LABEL_EXIT_CODE], count) # depends on [control=['try'], data=[]] except (exceptions.DirectoryNotFound, exceptions.NoThirdPartyEngine, exceptions.MobanfileGrammarException) as e: reporter.report_error_message(str(e)) moban_exit(options[constants.LABEL_EXIT_CODE], constants.ERROR) # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]] else: try: count = handle_command_line(options) moban_exit(options[constants.LABEL_EXIT_CODE], count) # depends on [control=['try'], data=[]] except exceptions.NoTemplate as e: reporter.report_error_message(str(e)) moban_exit(options[constants.LABEL_EXIT_CODE], constants.ERROR) # depends on [control=['except'], data=['e']]
def write_long(self, n): """Write an integer as an unsigned2 32-bit value.""" if n < 0 or n >= 4294967296: raise FrameSyntaxError( 'Octet {0!r} out of range 0..2**31-1'.format(n)) self._flushbits() self.out.write(pack('>I', n))
def function[write_long, parameter[self, n]]: constant[Write an integer as an unsigned2 32-bit value.] if <ast.BoolOp object at 0x7da1b17bb970> begin[:] <ast.Raise object at 0x7da1b17bbca0> call[name[self]._flushbits, parameter[]] call[name[self].out.write, parameter[call[name[pack], parameter[constant[>I], name[n]]]]]
keyword[def] identifier[write_long] ( identifier[self] , identifier[n] ): literal[string] keyword[if] identifier[n] < literal[int] keyword[or] identifier[n] >= literal[int] : keyword[raise] identifier[FrameSyntaxError] ( literal[string] . identifier[format] ( identifier[n] )) identifier[self] . identifier[_flushbits] () identifier[self] . identifier[out] . identifier[write] ( identifier[pack] ( literal[string] , identifier[n] ))
def write_long(self, n): """Write an integer as an unsigned2 32-bit value.""" if n < 0 or n >= 4294967296: raise FrameSyntaxError('Octet {0!r} out of range 0..2**31-1'.format(n)) # depends on [control=['if'], data=[]] self._flushbits() self.out.write(pack('>I', n))
def do_query(self, query, timeout=DEFAULT_TIMEOUT, tz=pytz.timezone("US/Pacific")): """ Query structure is as follows: query = { # We bind UUIDs found as the result of a Brick query to a variable name # that we can use later. # Each variable definition has the following: # - name: how we will refer to this group of UUIDs # - definition: a Brick query. The SELECT clause should return variables that end in '_uuid', which can be found as the # object of a 'bf:uuid' relationship # - units: what units we want to retrieve this stream as. Currently supports W/kW, Wh/kWh, F/C, Lux "Variables": [ {"Name": "meter", "Definition": "SELECT ?meter_uuid WHERE { ?meter rdf:type/rdfs:subClassOf* brick:Electric_Meter . ?meter bf:uuid ?meter_uuid . };", "Units": "kW", }, {"Name": "temp", "Definition": "SELECT ?temp_uuid WHERE { ?temp rdf:type/rdfs:subClassOf* brick:Temperature_Sensor . ?temp bf:uuid ?temp_uuid . };", "Units": "F", }, ], # this is the composition of the data matrix we are returning. Below, all the uuids for the "meter" variable will be placed before # all of the uuids for the "temp" variable. We cannot guarantee order of uuids within those groups, but the ordering of the groups # will be preserved. Explicit UUIDs can also be used here "Composition": ["meter", "temp"], # If we are retrieving statistical data, then we need to say which statistical elements we want to download. # The options are RAW, MEAN, MIN, MAX and COUNT. To query multiple, you can OR them together (e.g. MEAN|MAX). # This maps 1-1 to the "Composition" field "Selectors": [MEAN, MEAN], # Themporal parameters for the query. Retrieves data in the range [T0, T1]. By convention, T0 < T1, # but MDAL will take care of it if this is reversed. # WindowSize is the size of the resample window in nanoseconds # if Aligned is true, then MDAL will snap all data to the begining of the window (e.g. if 5min window + Aligned=true, # then all timestamps will be on 00:05:00, 00:10:00, 00:15:00, etc) "Time": { "T0": "2017-08-01 00:00:00", "T1": "2017-08-08 00:00:00", "WindowSize": '2h', "Aligned": True, }, } """ nonce = str(random.randint(0, 2**32)) query['Nonce'] = nonce ev = threading.Event() response = {} def _handleresult(msg): got_response = False for po in msg.payload_objects: if po.type_dotted != (2,0,10,4): continue data = msgpack.unpackb(po.content) if data['Nonce'] != query['Nonce']: continue if 'error' in data: response['error'] = data['error'] response['df'] = None got_response=True continue uuids = [str(uuid.UUID(bytes=x)) for x in data['Rows']] data = data_capnp.StreamCollection.from_bytes_packed(data['Data']) if hasattr(data, 'times') and len(data.times): times = list(data.times) if len(times) == 0: response['df'] = pd.DataFrame(columns=uuids) got_response = True break df = pd.DataFrame(index=pd.to_datetime(times, unit='ns', utc=False)) for idx, s in enumerate(data.streams): if len(s.values) == 0: df[uuids[idx]] = None else: df[uuids[idx]] = s.values df.index = df.index.tz_localize(pytz.utc).tz_convert(tz) response['df'] = df got_response = True else: df = pd.DataFrame() for idx, s in enumerate(data.streams): if hasattr(s, 'times'): newdf = pd.DataFrame(list(s.values), index=list(s.times), columns=[uuids[idx]]) newdf.index = pd.to_datetime(newdf.index, unit='ns').tz_localize(pytz.utc).tz_convert(tz) df = df.join(newdf, how='outer') else: raise Exception("Does this ever happen? Tell gabe!") response['df'] = df got_response = True df = response.get('df') if df is not None: response['df'] = df#[df.index.duplicated(keep='first')] if got_response: ev.set() h = self.c.subscribe("{0}/s.mdal/_/i.mdal/signal/{1}".format(self.url, self.vk[:-1]), _handleresult) po = PayloadObject((2,0,10,3), None, msgpack.packb(query)) self.c.publish("{0}/s.mdal/_/i.mdal/slot/query".format(self.url), payload_objects=(po,)) ev.wait(timeout) self.c.unsubscribe(h) if 'error' in response: raise Exception(response['error']) return response
def function[do_query, parameter[self, query, timeout, tz]]: constant[ Query structure is as follows: query = { # We bind UUIDs found as the result of a Brick query to a variable name # that we can use later. # Each variable definition has the following: # - name: how we will refer to this group of UUIDs # - definition: a Brick query. The SELECT clause should return variables that end in '_uuid', which can be found as the # object of a 'bf:uuid' relationship # - units: what units we want to retrieve this stream as. Currently supports W/kW, Wh/kWh, F/C, Lux "Variables": [ {"Name": "meter", "Definition": "SELECT ?meter_uuid WHERE { ?meter rdf:type/rdfs:subClassOf* brick:Electric_Meter . ?meter bf:uuid ?meter_uuid . };", "Units": "kW", }, {"Name": "temp", "Definition": "SELECT ?temp_uuid WHERE { ?temp rdf:type/rdfs:subClassOf* brick:Temperature_Sensor . ?temp bf:uuid ?temp_uuid . };", "Units": "F", }, ], # this is the composition of the data matrix we are returning. Below, all the uuids for the "meter" variable will be placed before # all of the uuids for the "temp" variable. We cannot guarantee order of uuids within those groups, but the ordering of the groups # will be preserved. Explicit UUIDs can also be used here "Composition": ["meter", "temp"], # If we are retrieving statistical data, then we need to say which statistical elements we want to download. # The options are RAW, MEAN, MIN, MAX and COUNT. To query multiple, you can OR them together (e.g. MEAN|MAX). # This maps 1-1 to the "Composition" field "Selectors": [MEAN, MEAN], # Themporal parameters for the query. Retrieves data in the range [T0, T1]. By convention, T0 < T1, # but MDAL will take care of it if this is reversed. # WindowSize is the size of the resample window in nanoseconds # if Aligned is true, then MDAL will snap all data to the begining of the window (e.g. if 5min window + Aligned=true, # then all timestamps will be on 00:05:00, 00:10:00, 00:15:00, etc) "Time": { "T0": "2017-08-01 00:00:00", "T1": "2017-08-08 00:00:00", "WindowSize": '2h', "Aligned": True, }, } ] variable[nonce] assign[=] call[name[str], parameter[call[name[random].randint, parameter[constant[0], binary_operation[constant[2] ** constant[32]]]]]] call[name[query]][constant[Nonce]] assign[=] name[nonce] variable[ev] assign[=] call[name[threading].Event, parameter[]] variable[response] assign[=] dictionary[[], []] def function[_handleresult, parameter[msg]]: variable[got_response] assign[=] constant[False] for taget[name[po]] in starred[name[msg].payload_objects] begin[:] if compare[name[po].type_dotted not_equal[!=] tuple[[<ast.Constant object at 0x7da18f00f610>, <ast.Constant object at 0x7da18f00e290>, <ast.Constant object at 0x7da18f00e650>, <ast.Constant object at 0x7da18f00d3f0>]]] begin[:] continue variable[data] assign[=] call[name[msgpack].unpackb, parameter[name[po].content]] if compare[call[name[data]][constant[Nonce]] not_equal[!=] call[name[query]][constant[Nonce]]] begin[:] continue if compare[constant[error] in name[data]] begin[:] call[name[response]][constant[error]] assign[=] call[name[data]][constant[error]] call[name[response]][constant[df]] assign[=] constant[None] variable[got_response] assign[=] constant[True] continue variable[uuids] assign[=] <ast.ListComp object at 0x7da18f00e6b0> variable[data] assign[=] call[name[data_capnp].StreamCollection.from_bytes_packed, parameter[call[name[data]][constant[Data]]]] if <ast.BoolOp object at 0x7da18f00ea70> begin[:] variable[times] assign[=] call[name[list], parameter[name[data].times]] if compare[call[name[len], parameter[name[times]]] equal[==] constant[0]] begin[:] call[name[response]][constant[df]] assign[=] call[name[pd].DataFrame, parameter[]] variable[got_response] assign[=] constant[True] break variable[df] assign[=] call[name[pd].DataFrame, parameter[]] for taget[tuple[[<ast.Name object at 0x7da18f00cfd0>, <ast.Name object at 0x7da18f00d750>]]] in starred[call[name[enumerate], parameter[name[data].streams]]] begin[:] if compare[call[name[len], parameter[name[s].values]] equal[==] constant[0]] begin[:] call[name[df]][call[name[uuids]][name[idx]]] assign[=] constant[None] name[df].index assign[=] call[call[name[df].index.tz_localize, parameter[name[pytz].utc]].tz_convert, parameter[name[tz]]] call[name[response]][constant[df]] assign[=] name[df] variable[got_response] assign[=] constant[True] variable[df] assign[=] call[name[response].get, parameter[constant[df]]] if compare[name[df] is_not constant[None]] begin[:] call[name[response]][constant[df]] assign[=] name[df] if name[got_response] begin[:] call[name[ev].set, parameter[]] variable[h] assign[=] call[name[self].c.subscribe, parameter[call[constant[{0}/s.mdal/_/i.mdal/signal/{1}].format, parameter[name[self].url, call[name[self].vk][<ast.Slice object at 0x7da18f00da80>]]], name[_handleresult]]] variable[po] assign[=] call[name[PayloadObject], parameter[tuple[[<ast.Constant object at 0x7da2044c1d50>, <ast.Constant object at 0x7da2044c27d0>, <ast.Constant object at 0x7da2044c1ff0>, <ast.Constant object at 0x7da2044c2b60>]], constant[None], call[name[msgpack].packb, parameter[name[query]]]]] call[name[self].c.publish, parameter[call[constant[{0}/s.mdal/_/i.mdal/slot/query].format, parameter[name[self].url]]]] call[name[ev].wait, parameter[name[timeout]]] call[name[self].c.unsubscribe, parameter[name[h]]] if compare[constant[error] in name[response]] begin[:] <ast.Raise object at 0x7da2044c2890> return[name[response]]
keyword[def] identifier[do_query] ( identifier[self] , identifier[query] , identifier[timeout] = identifier[DEFAULT_TIMEOUT] , identifier[tz] = identifier[pytz] . identifier[timezone] ( literal[string] )): literal[string] identifier[nonce] = identifier[str] ( identifier[random] . identifier[randint] ( literal[int] , literal[int] ** literal[int] )) identifier[query] [ literal[string] ]= identifier[nonce] identifier[ev] = identifier[threading] . identifier[Event] () identifier[response] ={} keyword[def] identifier[_handleresult] ( identifier[msg] ): identifier[got_response] = keyword[False] keyword[for] identifier[po] keyword[in] identifier[msg] . identifier[payload_objects] : keyword[if] identifier[po] . identifier[type_dotted] !=( literal[int] , literal[int] , literal[int] , literal[int] ): keyword[continue] identifier[data] = identifier[msgpack] . identifier[unpackb] ( identifier[po] . identifier[content] ) keyword[if] identifier[data] [ literal[string] ]!= identifier[query] [ literal[string] ]: keyword[continue] keyword[if] literal[string] keyword[in] identifier[data] : identifier[response] [ literal[string] ]= identifier[data] [ literal[string] ] identifier[response] [ literal[string] ]= keyword[None] identifier[got_response] = keyword[True] keyword[continue] identifier[uuids] =[ identifier[str] ( identifier[uuid] . identifier[UUID] ( identifier[bytes] = identifier[x] )) keyword[for] identifier[x] keyword[in] identifier[data] [ literal[string] ]] identifier[data] = identifier[data_capnp] . identifier[StreamCollection] . identifier[from_bytes_packed] ( identifier[data] [ literal[string] ]) keyword[if] identifier[hasattr] ( identifier[data] , literal[string] ) keyword[and] identifier[len] ( identifier[data] . identifier[times] ): identifier[times] = identifier[list] ( identifier[data] . identifier[times] ) keyword[if] identifier[len] ( identifier[times] )== literal[int] : identifier[response] [ literal[string] ]= identifier[pd] . identifier[DataFrame] ( identifier[columns] = identifier[uuids] ) identifier[got_response] = keyword[True] keyword[break] identifier[df] = identifier[pd] . identifier[DataFrame] ( identifier[index] = identifier[pd] . identifier[to_datetime] ( identifier[times] , identifier[unit] = literal[string] , identifier[utc] = keyword[False] )) keyword[for] identifier[idx] , identifier[s] keyword[in] identifier[enumerate] ( identifier[data] . identifier[streams] ): keyword[if] identifier[len] ( identifier[s] . identifier[values] )== literal[int] : identifier[df] [ identifier[uuids] [ identifier[idx] ]]= keyword[None] keyword[else] : identifier[df] [ identifier[uuids] [ identifier[idx] ]]= identifier[s] . identifier[values] identifier[df] . identifier[index] = identifier[df] . identifier[index] . identifier[tz_localize] ( identifier[pytz] . identifier[utc] ). identifier[tz_convert] ( identifier[tz] ) identifier[response] [ literal[string] ]= identifier[df] identifier[got_response] = keyword[True] keyword[else] : identifier[df] = identifier[pd] . identifier[DataFrame] () keyword[for] identifier[idx] , identifier[s] keyword[in] identifier[enumerate] ( identifier[data] . identifier[streams] ): keyword[if] identifier[hasattr] ( identifier[s] , literal[string] ): identifier[newdf] = identifier[pd] . identifier[DataFrame] ( identifier[list] ( identifier[s] . identifier[values] ), identifier[index] = identifier[list] ( identifier[s] . identifier[times] ), identifier[columns] =[ identifier[uuids] [ identifier[idx] ]]) identifier[newdf] . identifier[index] = identifier[pd] . identifier[to_datetime] ( identifier[newdf] . identifier[index] , identifier[unit] = literal[string] ). identifier[tz_localize] ( identifier[pytz] . identifier[utc] ). identifier[tz_convert] ( identifier[tz] ) identifier[df] = identifier[df] . identifier[join] ( identifier[newdf] , identifier[how] = literal[string] ) keyword[else] : keyword[raise] identifier[Exception] ( literal[string] ) identifier[response] [ literal[string] ]= identifier[df] identifier[got_response] = keyword[True] identifier[df] = identifier[response] . identifier[get] ( literal[string] ) keyword[if] identifier[df] keyword[is] keyword[not] keyword[None] : identifier[response] [ literal[string] ]= identifier[df] keyword[if] identifier[got_response] : identifier[ev] . identifier[set] () identifier[h] = identifier[self] . identifier[c] . identifier[subscribe] ( literal[string] . identifier[format] ( identifier[self] . identifier[url] , identifier[self] . identifier[vk] [:- literal[int] ]), identifier[_handleresult] ) identifier[po] = identifier[PayloadObject] (( literal[int] , literal[int] , literal[int] , literal[int] ), keyword[None] , identifier[msgpack] . identifier[packb] ( identifier[query] )) identifier[self] . identifier[c] . identifier[publish] ( literal[string] . identifier[format] ( identifier[self] . identifier[url] ), identifier[payload_objects] =( identifier[po] ,)) identifier[ev] . identifier[wait] ( identifier[timeout] ) identifier[self] . identifier[c] . identifier[unsubscribe] ( identifier[h] ) keyword[if] literal[string] keyword[in] identifier[response] : keyword[raise] identifier[Exception] ( identifier[response] [ literal[string] ]) keyword[return] identifier[response]
def do_query(self, query, timeout=DEFAULT_TIMEOUT, tz=pytz.timezone('US/Pacific')): """ Query structure is as follows: query = { # We bind UUIDs found as the result of a Brick query to a variable name # that we can use later. # Each variable definition has the following: # - name: how we will refer to this group of UUIDs # - definition: a Brick query. The SELECT clause should return variables that end in '_uuid', which can be found as the # object of a 'bf:uuid' relationship # - units: what units we want to retrieve this stream as. Currently supports W/kW, Wh/kWh, F/C, Lux "Variables": [ {"Name": "meter", "Definition": "SELECT ?meter_uuid WHERE { ?meter rdf:type/rdfs:subClassOf* brick:Electric_Meter . ?meter bf:uuid ?meter_uuid . };", "Units": "kW", }, {"Name": "temp", "Definition": "SELECT ?temp_uuid WHERE { ?temp rdf:type/rdfs:subClassOf* brick:Temperature_Sensor . ?temp bf:uuid ?temp_uuid . };", "Units": "F", }, ], # this is the composition of the data matrix we are returning. Below, all the uuids for the "meter" variable will be placed before # all of the uuids for the "temp" variable. We cannot guarantee order of uuids within those groups, but the ordering of the groups # will be preserved. Explicit UUIDs can also be used here "Composition": ["meter", "temp"], # If we are retrieving statistical data, then we need to say which statistical elements we want to download. # The options are RAW, MEAN, MIN, MAX and COUNT. To query multiple, you can OR them together (e.g. MEAN|MAX). # This maps 1-1 to the "Composition" field "Selectors": [MEAN, MEAN], # Themporal parameters for the query. Retrieves data in the range [T0, T1]. By convention, T0 < T1, # but MDAL will take care of it if this is reversed. # WindowSize is the size of the resample window in nanoseconds # if Aligned is true, then MDAL will snap all data to the begining of the window (e.g. if 5min window + Aligned=true, # then all timestamps will be on 00:05:00, 00:10:00, 00:15:00, etc) "Time": { "T0": "2017-08-01 00:00:00", "T1": "2017-08-08 00:00:00", "WindowSize": '2h', "Aligned": True, }, } """ nonce = str(random.randint(0, 2 ** 32)) query['Nonce'] = nonce ev = threading.Event() response = {} def _handleresult(msg): got_response = False for po in msg.payload_objects: if po.type_dotted != (2, 0, 10, 4): continue # depends on [control=['if'], data=[]] data = msgpack.unpackb(po.content) if data['Nonce'] != query['Nonce']: continue # depends on [control=['if'], data=[]] if 'error' in data: response['error'] = data['error'] response['df'] = None got_response = True continue # depends on [control=['if'], data=['data']] uuids = [str(uuid.UUID(bytes=x)) for x in data['Rows']] data = data_capnp.StreamCollection.from_bytes_packed(data['Data']) if hasattr(data, 'times') and len(data.times): times = list(data.times) if len(times) == 0: response['df'] = pd.DataFrame(columns=uuids) got_response = True break # depends on [control=['if'], data=[]] df = pd.DataFrame(index=pd.to_datetime(times, unit='ns', utc=False)) for (idx, s) in enumerate(data.streams): if len(s.values) == 0: df[uuids[idx]] = None # depends on [control=['if'], data=[]] else: df[uuids[idx]] = s.values # depends on [control=['for'], data=[]] df.index = df.index.tz_localize(pytz.utc).tz_convert(tz) response['df'] = df got_response = True # depends on [control=['if'], data=[]] else: df = pd.DataFrame() for (idx, s) in enumerate(data.streams): if hasattr(s, 'times'): newdf = pd.DataFrame(list(s.values), index=list(s.times), columns=[uuids[idx]]) newdf.index = pd.to_datetime(newdf.index, unit='ns').tz_localize(pytz.utc).tz_convert(tz) df = df.join(newdf, how='outer') # depends on [control=['if'], data=[]] else: raise Exception('Does this ever happen? Tell gabe!') # depends on [control=['for'], data=[]] response['df'] = df got_response = True # depends on [control=['for'], data=['po']] df = response.get('df') if df is not None: response['df'] = df #[df.index.duplicated(keep='first')] # depends on [control=['if'], data=['df']] if got_response: ev.set() # depends on [control=['if'], data=[]] h = self.c.subscribe('{0}/s.mdal/_/i.mdal/signal/{1}'.format(self.url, self.vk[:-1]), _handleresult) po = PayloadObject((2, 0, 10, 3), None, msgpack.packb(query)) self.c.publish('{0}/s.mdal/_/i.mdal/slot/query'.format(self.url), payload_objects=(po,)) ev.wait(timeout) self.c.unsubscribe(h) if 'error' in response: raise Exception(response['error']) # depends on [control=['if'], data=['response']] return response
def check_files(self, paths=None): """Run all checks on the paths.""" if paths is None: paths = self.paths report = self.options.report runner = self.runner report.start() try: for path in paths: if os.path.isdir(path): self.input_dir(path) elif not self.excluded(path): runner(path) except KeyboardInterrupt: print('... stopped') report.stop() return report
def function[check_files, parameter[self, paths]]: constant[Run all checks on the paths.] if compare[name[paths] is constant[None]] begin[:] variable[paths] assign[=] name[self].paths variable[report] assign[=] name[self].options.report variable[runner] assign[=] name[self].runner call[name[report].start, parameter[]] <ast.Try object at 0x7da1b0760790> call[name[report].stop, parameter[]] return[name[report]]
keyword[def] identifier[check_files] ( identifier[self] , identifier[paths] = keyword[None] ): literal[string] keyword[if] identifier[paths] keyword[is] keyword[None] : identifier[paths] = identifier[self] . identifier[paths] identifier[report] = identifier[self] . identifier[options] . identifier[report] identifier[runner] = identifier[self] . identifier[runner] identifier[report] . identifier[start] () keyword[try] : keyword[for] identifier[path] keyword[in] identifier[paths] : keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[path] ): identifier[self] . identifier[input_dir] ( identifier[path] ) keyword[elif] keyword[not] identifier[self] . identifier[excluded] ( identifier[path] ): identifier[runner] ( identifier[path] ) keyword[except] identifier[KeyboardInterrupt] : identifier[print] ( literal[string] ) identifier[report] . identifier[stop] () keyword[return] identifier[report]
def check_files(self, paths=None): """Run all checks on the paths.""" if paths is None: paths = self.paths # depends on [control=['if'], data=['paths']] report = self.options.report runner = self.runner report.start() try: for path in paths: if os.path.isdir(path): self.input_dir(path) # depends on [control=['if'], data=[]] elif not self.excluded(path): runner(path) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['path']] # depends on [control=['try'], data=[]] except KeyboardInterrupt: print('... stopped') # depends on [control=['except'], data=[]] report.stop() return report
def FromFile(self, filePath: str) -> bool: """ Load image from a file. filePath: str. Return bool, True if succeed otherwise False. """ self.Release() self._bitmap = _DllClient.instance().dll.BitmapFromFile(ctypes.c_wchar_p(filePath)) self._getsize() return self._bitmap > 0
def function[FromFile, parameter[self, filePath]]: constant[ Load image from a file. filePath: str. Return bool, True if succeed otherwise False. ] call[name[self].Release, parameter[]] name[self]._bitmap assign[=] call[call[name[_DllClient].instance, parameter[]].dll.BitmapFromFile, parameter[call[name[ctypes].c_wchar_p, parameter[name[filePath]]]]] call[name[self]._getsize, parameter[]] return[compare[name[self]._bitmap greater[>] constant[0]]]
keyword[def] identifier[FromFile] ( identifier[self] , identifier[filePath] : identifier[str] )-> identifier[bool] : literal[string] identifier[self] . identifier[Release] () identifier[self] . identifier[_bitmap] = identifier[_DllClient] . identifier[instance] (). identifier[dll] . identifier[BitmapFromFile] ( identifier[ctypes] . identifier[c_wchar_p] ( identifier[filePath] )) identifier[self] . identifier[_getsize] () keyword[return] identifier[self] . identifier[_bitmap] > literal[int]
def FromFile(self, filePath: str) -> bool: """ Load image from a file. filePath: str. Return bool, True if succeed otherwise False. """ self.Release() self._bitmap = _DllClient.instance().dll.BitmapFromFile(ctypes.c_wchar_p(filePath)) self._getsize() return self._bitmap > 0
def update_pull_request(self, git_pull_request_to_update, repository_id, pull_request_id, project=None): """UpdatePullRequest. [Preview API] Update a pull request :param :class:`<GitPullRequest> <azure.devops.v5_1.git.models.GitPullRequest>` git_pull_request_to_update: The pull request content that should be updated. :param str repository_id: The repository ID of the pull request's target branch. :param int pull_request_id: ID of the pull request to update. :param str project: Project ID or project name :rtype: :class:`<GitPullRequest> <azure.devops.v5_1.git.models.GitPullRequest>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if repository_id is not None: route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str') if pull_request_id is not None: route_values['pullRequestId'] = self._serialize.url('pull_request_id', pull_request_id, 'int') content = self._serialize.body(git_pull_request_to_update, 'GitPullRequest') response = self._send(http_method='PATCH', location_id='9946fd70-0d40-406e-b686-b4744cbbcc37', version='5.1-preview.1', route_values=route_values, content=content) return self._deserialize('GitPullRequest', response)
def function[update_pull_request, parameter[self, git_pull_request_to_update, repository_id, pull_request_id, project]]: constant[UpdatePullRequest. [Preview API] Update a pull request :param :class:`<GitPullRequest> <azure.devops.v5_1.git.models.GitPullRequest>` git_pull_request_to_update: The pull request content that should be updated. :param str repository_id: The repository ID of the pull request's target branch. :param int pull_request_id: ID of the pull request to update. :param str project: Project ID or project name :rtype: :class:`<GitPullRequest> <azure.devops.v5_1.git.models.GitPullRequest>` ] variable[route_values] assign[=] dictionary[[], []] if compare[name[project] is_not constant[None]] begin[:] call[name[route_values]][constant[project]] assign[=] call[name[self]._serialize.url, parameter[constant[project], name[project], constant[str]]] if compare[name[repository_id] is_not constant[None]] begin[:] call[name[route_values]][constant[repositoryId]] assign[=] call[name[self]._serialize.url, parameter[constant[repository_id], name[repository_id], constant[str]]] if compare[name[pull_request_id] is_not constant[None]] begin[:] call[name[route_values]][constant[pullRequestId]] assign[=] call[name[self]._serialize.url, parameter[constant[pull_request_id], name[pull_request_id], constant[int]]] variable[content] assign[=] call[name[self]._serialize.body, parameter[name[git_pull_request_to_update], constant[GitPullRequest]]] variable[response] assign[=] call[name[self]._send, parameter[]] return[call[name[self]._deserialize, parameter[constant[GitPullRequest], name[response]]]]
keyword[def] identifier[update_pull_request] ( identifier[self] , identifier[git_pull_request_to_update] , identifier[repository_id] , identifier[pull_request_id] , identifier[project] = keyword[None] ): literal[string] identifier[route_values] ={} keyword[if] identifier[project] keyword[is] keyword[not] keyword[None] : identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[project] , literal[string] ) keyword[if] identifier[repository_id] keyword[is] keyword[not] keyword[None] : identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[repository_id] , literal[string] ) keyword[if] identifier[pull_request_id] keyword[is] keyword[not] keyword[None] : identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[pull_request_id] , literal[string] ) identifier[content] = identifier[self] . identifier[_serialize] . identifier[body] ( identifier[git_pull_request_to_update] , literal[string] ) identifier[response] = identifier[self] . identifier[_send] ( identifier[http_method] = literal[string] , identifier[location_id] = literal[string] , identifier[version] = literal[string] , identifier[route_values] = identifier[route_values] , identifier[content] = identifier[content] ) keyword[return] identifier[self] . identifier[_deserialize] ( literal[string] , identifier[response] )
def update_pull_request(self, git_pull_request_to_update, repository_id, pull_request_id, project=None): """UpdatePullRequest. [Preview API] Update a pull request :param :class:`<GitPullRequest> <azure.devops.v5_1.git.models.GitPullRequest>` git_pull_request_to_update: The pull request content that should be updated. :param str repository_id: The repository ID of the pull request's target branch. :param int pull_request_id: ID of the pull request to update. :param str project: Project ID or project name :rtype: :class:`<GitPullRequest> <azure.devops.v5_1.git.models.GitPullRequest>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') # depends on [control=['if'], data=['project']] if repository_id is not None: route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str') # depends on [control=['if'], data=['repository_id']] if pull_request_id is not None: route_values['pullRequestId'] = self._serialize.url('pull_request_id', pull_request_id, 'int') # depends on [control=['if'], data=['pull_request_id']] content = self._serialize.body(git_pull_request_to_update, 'GitPullRequest') response = self._send(http_method='PATCH', location_id='9946fd70-0d40-406e-b686-b4744cbbcc37', version='5.1-preview.1', route_values=route_values, content=content) return self._deserialize('GitPullRequest', response)
def send_extended(self, address, timestamp, value): """Queue an extended datapoint (ie. a string), return True/False for success. Arguments: address -- uint64_t representing a unique metric. timestamp -- uint64_t representing number of nanoseconds (10^-9) since epoch. value -- string value being stored. """ if self.marquise_ctx is None: raise ValueError("Attempted to write to a closed Marquise handle.") self.__debug("Supplied address: %s" % address) if value is None: raise TypeError("Can't store None as a value.") value = str(value) if timestamp is None: timestamp = self.current_timestamp() # Use cast() here to make up the C datatypes for dispatch. # FFI will take care of converting them to the right endianness. I think. c_address = FFI.cast("uint64_t", address) c_timestamp = FFI.cast("uint64_t", timestamp) # c_value needs to be a byte array with a length in bytes c_value = cstring(value) c_length = FFI.cast("size_t", len_cstring(value)) self.__debug("Sending extended value '%s' with length of %d" % (value, c_length)) success = MARQUISE_SEND_EXTENDED(self.marquise_ctx, c_address, c_timestamp, c_value, c_length) if success != 0: self.__debug("send_extended returned %d, raising exception" % success) raise RuntimeError("send_extended was unsuccessful, errno is %d" % FFI.errno) self.__debug("send_extended returned %d" % success) return True
def function[send_extended, parameter[self, address, timestamp, value]]: constant[Queue an extended datapoint (ie. a string), return True/False for success. Arguments: address -- uint64_t representing a unique metric. timestamp -- uint64_t representing number of nanoseconds (10^-9) since epoch. value -- string value being stored. ] if compare[name[self].marquise_ctx is constant[None]] begin[:] <ast.Raise object at 0x7da1b14c1fc0> call[name[self].__debug, parameter[binary_operation[constant[Supplied address: %s] <ast.Mod object at 0x7da2590d6920> name[address]]]] if compare[name[value] is constant[None]] begin[:] <ast.Raise object at 0x7da1b14c1ba0> variable[value] assign[=] call[name[str], parameter[name[value]]] if compare[name[timestamp] is constant[None]] begin[:] variable[timestamp] assign[=] call[name[self].current_timestamp, parameter[]] variable[c_address] assign[=] call[name[FFI].cast, parameter[constant[uint64_t], name[address]]] variable[c_timestamp] assign[=] call[name[FFI].cast, parameter[constant[uint64_t], name[timestamp]]] variable[c_value] assign[=] call[name[cstring], parameter[name[value]]] variable[c_length] assign[=] call[name[FFI].cast, parameter[constant[size_t], call[name[len_cstring], parameter[name[value]]]]] call[name[self].__debug, parameter[binary_operation[constant[Sending extended value '%s' with length of %d] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b13659c0>, <ast.Name object at 0x7da1b1367700>]]]]] variable[success] assign[=] call[name[MARQUISE_SEND_EXTENDED], parameter[name[self].marquise_ctx, name[c_address], name[c_timestamp], name[c_value], name[c_length]]] if compare[name[success] not_equal[!=] constant[0]] begin[:] call[name[self].__debug, parameter[binary_operation[constant[send_extended returned %d, raising exception] <ast.Mod object at 0x7da2590d6920> name[success]]]] <ast.Raise object at 0x7da1b1366200> call[name[self].__debug, parameter[binary_operation[constant[send_extended returned %d] <ast.Mod object at 0x7da2590d6920> name[success]]]] return[constant[True]]
keyword[def] identifier[send_extended] ( identifier[self] , identifier[address] , identifier[timestamp] , identifier[value] ): literal[string] keyword[if] identifier[self] . identifier[marquise_ctx] keyword[is] keyword[None] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[self] . identifier[__debug] ( literal[string] % identifier[address] ) keyword[if] identifier[value] keyword[is] keyword[None] : keyword[raise] identifier[TypeError] ( literal[string] ) identifier[value] = identifier[str] ( identifier[value] ) keyword[if] identifier[timestamp] keyword[is] keyword[None] : identifier[timestamp] = identifier[self] . identifier[current_timestamp] () identifier[c_address] = identifier[FFI] . identifier[cast] ( literal[string] , identifier[address] ) identifier[c_timestamp] = identifier[FFI] . identifier[cast] ( literal[string] , identifier[timestamp] ) identifier[c_value] = identifier[cstring] ( identifier[value] ) identifier[c_length] = identifier[FFI] . identifier[cast] ( literal[string] , identifier[len_cstring] ( identifier[value] )) identifier[self] . identifier[__debug] ( literal[string] %( identifier[value] , identifier[c_length] )) identifier[success] = identifier[MARQUISE_SEND_EXTENDED] ( identifier[self] . identifier[marquise_ctx] , identifier[c_address] , identifier[c_timestamp] , identifier[c_value] , identifier[c_length] ) keyword[if] identifier[success] != literal[int] : identifier[self] . identifier[__debug] ( literal[string] % identifier[success] ) keyword[raise] identifier[RuntimeError] ( literal[string] % identifier[FFI] . identifier[errno] ) identifier[self] . identifier[__debug] ( literal[string] % identifier[success] ) keyword[return] keyword[True]
def send_extended(self, address, timestamp, value): """Queue an extended datapoint (ie. a string), return True/False for success. Arguments: address -- uint64_t representing a unique metric. timestamp -- uint64_t representing number of nanoseconds (10^-9) since epoch. value -- string value being stored. """ if self.marquise_ctx is None: raise ValueError('Attempted to write to a closed Marquise handle.') # depends on [control=['if'], data=[]] self.__debug('Supplied address: %s' % address) if value is None: raise TypeError("Can't store None as a value.") # depends on [control=['if'], data=[]] value = str(value) if timestamp is None: timestamp = self.current_timestamp() # depends on [control=['if'], data=['timestamp']] # Use cast() here to make up the C datatypes for dispatch. # FFI will take care of converting them to the right endianness. I think. c_address = FFI.cast('uint64_t', address) c_timestamp = FFI.cast('uint64_t', timestamp) # c_value needs to be a byte array with a length in bytes c_value = cstring(value) c_length = FFI.cast('size_t', len_cstring(value)) self.__debug("Sending extended value '%s' with length of %d" % (value, c_length)) success = MARQUISE_SEND_EXTENDED(self.marquise_ctx, c_address, c_timestamp, c_value, c_length) if success != 0: self.__debug('send_extended returned %d, raising exception' % success) raise RuntimeError('send_extended was unsuccessful, errno is %d' % FFI.errno) # depends on [control=['if'], data=['success']] self.__debug('send_extended returned %d' % success) return True
def balance(self, account): """ Return the balance, a tuple with the eth and ocn balance. :param account: Account instance to return the balance of :return: Balance tuple of (eth, ocn) """ return Balance(self._keeper.get_ether_balance(account.address), self._keeper.token.get_token_balance(account.address))
def function[balance, parameter[self, account]]: constant[ Return the balance, a tuple with the eth and ocn balance. :param account: Account instance to return the balance of :return: Balance tuple of (eth, ocn) ] return[call[name[Balance], parameter[call[name[self]._keeper.get_ether_balance, parameter[name[account].address]], call[name[self]._keeper.token.get_token_balance, parameter[name[account].address]]]]]
keyword[def] identifier[balance] ( identifier[self] , identifier[account] ): literal[string] keyword[return] identifier[Balance] ( identifier[self] . identifier[_keeper] . identifier[get_ether_balance] ( identifier[account] . identifier[address] ), identifier[self] . identifier[_keeper] . identifier[token] . identifier[get_token_balance] ( identifier[account] . identifier[address] ))
def balance(self, account): """ Return the balance, a tuple with the eth and ocn balance. :param account: Account instance to return the balance of :return: Balance tuple of (eth, ocn) """ return Balance(self._keeper.get_ether_balance(account.address), self._keeper.token.get_token_balance(account.address))
def _notify_delete(self, index_or_slice): """Notify about a deletion at an index_or_slice. :return: a function that notifies about an add at the same place. """ if isinstance(index_or_slice, int): length = len(self) if -length <= index_or_slice < length: self._notify_remove_at(index_or_slice) return lambda: self._notify_add_at(index_or_slice) elif isinstance(index_or_slice, slice): slice_ = slice(*index_or_slice.indices(len(self))) self._notify_remove(slice_) return lambda: self._notify_add(index_or_slice)
def function[_notify_delete, parameter[self, index_or_slice]]: constant[Notify about a deletion at an index_or_slice. :return: a function that notifies about an add at the same place. ] if call[name[isinstance], parameter[name[index_or_slice], name[int]]] begin[:] variable[length] assign[=] call[name[len], parameter[name[self]]] if compare[<ast.UnaryOp object at 0x7da18dc98e50> less_or_equal[<=] name[index_or_slice]] begin[:] call[name[self]._notify_remove_at, parameter[name[index_or_slice]]] return[<ast.Lambda object at 0x7da18dc9b460>]
keyword[def] identifier[_notify_delete] ( identifier[self] , identifier[index_or_slice] ): literal[string] keyword[if] identifier[isinstance] ( identifier[index_or_slice] , identifier[int] ): identifier[length] = identifier[len] ( identifier[self] ) keyword[if] - identifier[length] <= identifier[index_or_slice] < identifier[length] : identifier[self] . identifier[_notify_remove_at] ( identifier[index_or_slice] ) keyword[return] keyword[lambda] : identifier[self] . identifier[_notify_add_at] ( identifier[index_or_slice] ) keyword[elif] identifier[isinstance] ( identifier[index_or_slice] , identifier[slice] ): identifier[slice_] = identifier[slice] (* identifier[index_or_slice] . identifier[indices] ( identifier[len] ( identifier[self] ))) identifier[self] . identifier[_notify_remove] ( identifier[slice_] ) keyword[return] keyword[lambda] : identifier[self] . identifier[_notify_add] ( identifier[index_or_slice] )
def _notify_delete(self, index_or_slice): """Notify about a deletion at an index_or_slice. :return: a function that notifies about an add at the same place. """ if isinstance(index_or_slice, int): length = len(self) if -length <= index_or_slice < length: self._notify_remove_at(index_or_slice) return lambda : self._notify_add_at(index_or_slice) # depends on [control=['if'], data=['index_or_slice']] # depends on [control=['if'], data=[]] elif isinstance(index_or_slice, slice): slice_ = slice(*index_or_slice.indices(len(self))) self._notify_remove(slice_) return lambda : self._notify_add(index_or_slice) # depends on [control=['if'], data=[]]
def get_network_project(network_id, **kwargs): """ get the project that a network is in """ net_proj = db.DBSession.query(Project).join(Network, and_(Project.id==Network.id, Network.id==network_id)).first() if net_proj is None: raise HydraError("Network %s not found"% network_id) return net_proj
def function[get_network_project, parameter[network_id]]: constant[ get the project that a network is in ] variable[net_proj] assign[=] call[call[call[name[db].DBSession.query, parameter[name[Project]]].join, parameter[name[Network], call[name[and_], parameter[compare[name[Project].id equal[==] name[Network].id], compare[name[Network].id equal[==] name[network_id]]]]]].first, parameter[]] if compare[name[net_proj] is constant[None]] begin[:] <ast.Raise object at 0x7da18f810340> return[name[net_proj]]
keyword[def] identifier[get_network_project] ( identifier[network_id] ,** identifier[kwargs] ): literal[string] identifier[net_proj] = identifier[db] . identifier[DBSession] . identifier[query] ( identifier[Project] ). identifier[join] ( identifier[Network] , identifier[and_] ( identifier[Project] . identifier[id] == identifier[Network] . identifier[id] , identifier[Network] . identifier[id] == identifier[network_id] )). identifier[first] () keyword[if] identifier[net_proj] keyword[is] keyword[None] : keyword[raise] identifier[HydraError] ( literal[string] % identifier[network_id] ) keyword[return] identifier[net_proj]
def get_network_project(network_id, **kwargs): """ get the project that a network is in """ net_proj = db.DBSession.query(Project).join(Network, and_(Project.id == Network.id, Network.id == network_id)).first() if net_proj is None: raise HydraError('Network %s not found' % network_id) # depends on [control=['if'], data=[]] return net_proj
def get_source_class(self, _class): """ Return the Java source code of a whole class :param _class: `ClassDefItem` object, to get the source from :return: """ if not _class.get_name() in self.classes: return "" return self.classes[_class.get_name()]
def function[get_source_class, parameter[self, _class]]: constant[ Return the Java source code of a whole class :param _class: `ClassDefItem` object, to get the source from :return: ] if <ast.UnaryOp object at 0x7da20c7ca7a0> begin[:] return[constant[]] return[call[name[self].classes][call[name[_class].get_name, parameter[]]]]
keyword[def] identifier[get_source_class] ( identifier[self] , identifier[_class] ): literal[string] keyword[if] keyword[not] identifier[_class] . identifier[get_name] () keyword[in] identifier[self] . identifier[classes] : keyword[return] literal[string] keyword[return] identifier[self] . identifier[classes] [ identifier[_class] . identifier[get_name] ()]
def get_source_class(self, _class): """ Return the Java source code of a whole class :param _class: `ClassDefItem` object, to get the source from :return: """ if not _class.get_name() in self.classes: return '' # depends on [control=['if'], data=[]] return self.classes[_class.get_name()]
def _set_overlay_service_policy_brief_state(self, v, load=False): """ Setter method for overlay_service_policy_brief_state, mapped from YANG variable /overlay_service_policy_brief_state (container) If this variable is read-only (config: false) in the source YANG file, then _set_overlay_service_policy_brief_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_overlay_service_policy_brief_state() directly. YANG Description: Overlay Service Policy Brief """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=overlay_service_policy_brief_state.overlay_service_policy_brief_state, is_container='container', presence=False, yang_name="overlay-service-policy-brief-state", rest_name="overlay-service-policy-brief-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'ssm-overlay-service-policy-brief', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """overlay_service_policy_brief_state must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=overlay_service_policy_brief_state.overlay_service_policy_brief_state, is_container='container', presence=False, yang_name="overlay-service-policy-brief-state", rest_name="overlay-service-policy-brief-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'ssm-overlay-service-policy-brief', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='container', is_config=True)""", }) self.__overlay_service_policy_brief_state = t if hasattr(self, '_set'): self._set()
def function[_set_overlay_service_policy_brief_state, parameter[self, v, load]]: constant[ Setter method for overlay_service_policy_brief_state, mapped from YANG variable /overlay_service_policy_brief_state (container) If this variable is read-only (config: false) in the source YANG file, then _set_overlay_service_policy_brief_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_overlay_service_policy_brief_state() directly. YANG Description: Overlay Service Policy Brief ] if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:] variable[v] assign[=] call[name[v]._utype, parameter[name[v]]] <ast.Try object at 0x7da2045663b0> name[self].__overlay_service_policy_brief_state assign[=] name[t] if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:] call[name[self]._set, parameter[]]
keyword[def] identifier[_set_overlay_service_policy_brief_state] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ): literal[string] keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ): identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] ) keyword[try] : identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[overlay_service_policy_brief_state] . identifier[overlay_service_policy_brief_state] , identifier[is_container] = literal[string] , identifier[presence] = keyword[False] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : keyword[None] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] ) keyword[except] ( identifier[TypeError] , identifier[ValueError] ): keyword[raise] identifier[ValueError] ({ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , }) identifier[self] . identifier[__overlay_service_policy_brief_state] = identifier[t] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ): identifier[self] . identifier[_set] ()
def _set_overlay_service_policy_brief_state(self, v, load=False): """ Setter method for overlay_service_policy_brief_state, mapped from YANG variable /overlay_service_policy_brief_state (container) If this variable is read-only (config: false) in the source YANG file, then _set_overlay_service_policy_brief_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_overlay_service_policy_brief_state() directly. YANG Description: Overlay Service Policy Brief """ if hasattr(v, '_utype'): v = v._utype(v) # depends on [control=['if'], data=[]] try: t = YANGDynClass(v, base=overlay_service_policy_brief_state.overlay_service_policy_brief_state, is_container='container', presence=False, yang_name='overlay-service-policy-brief-state', rest_name='overlay-service-policy-brief-state', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'ssm-overlay-service-policy-brief', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='container', is_config=True) # depends on [control=['try'], data=[]] except (TypeError, ValueError): raise ValueError({'error-string': 'overlay_service_policy_brief_state must be of a type compatible with container', 'defined-type': 'container', 'generated-type': 'YANGDynClass(base=overlay_service_policy_brief_state.overlay_service_policy_brief_state, is_container=\'container\', presence=False, yang_name="overlay-service-policy-brief-state", rest_name="overlay-service-policy-brief-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'callpoint\': u\'ssm-overlay-service-policy-brief\', u\'cli-suppress-show-path\': None}}, namespace=\'urn:brocade.com:mgmt:brocade-ssm-operational\', defining_module=\'brocade-ssm-operational\', yang_type=\'container\', is_config=True)'}) # depends on [control=['except'], data=[]] self.__overlay_service_policy_brief_state = t if hasattr(self, '_set'): self._set() # depends on [control=['if'], data=[]]
def initialize_layers(self, layers=None): """Sets up the Lasagne layers :param layers: The dictionary of layers, or a :class:`lasagne.Layers` instance, describing the underlying network :return: the output layer of the underlying lasagne network. :seealso: :ref:`layer-def` """ if layers is not None: self.layers = layers self.layers_ = Layers() #If a Layer, or a list of Layers was passed in if isinstance(self.layers[0], Layer): for out_layer in self.layers: for i, layer in enumerate(get_all_layers(out_layer)): if layer not in self.layers_.values(): name = layer.name or self._layer_name(layer.__class__, i) self.layers_[name] = layer if self._get_params_for(name) != {}: raise ValueError( "You can't use keyword params when passing a Lasagne " "instance object as the 'layers' parameter of " "'NeuralNet'." ) self._output_layers = self.layers return self.layers # 'self.layers' are a list of '(Layer class, kwargs)', so # we'll have to actually instantiate the layers given the # arguments: layer = None for i, layer_def in enumerate(self.layers): if isinstance(layer_def[1], dict): # Newer format: (Layer, {'layer': 'kwargs'}) layer_factory, layer_kw = layer_def layer_kw = layer_kw.copy() else: # The legacy format: ('name', Layer) layer_name, layer_factory = layer_def layer_kw = {'name': layer_name} if isinstance(layer_factory, str): layer_factory = locate(layer_factory) assert layer_factory is not None if 'name' not in layer_kw: layer_kw['name'] = self._layer_name(layer_factory, i) more_params = self._get_params_for(layer_kw['name']) layer_kw.update(more_params) if layer_kw['name'] in self.layers_: raise ValueError( "Two layers with name {}.".format(layer_kw['name'])) # Any layers that aren't subclasses of InputLayer are # assumed to require an 'incoming' paramter. By default, # we'll use the previous layer as input: try: is_input_layer = issubclass(layer_factory, InputLayer) except TypeError: is_input_layer = False if not is_input_layer: if 'incoming' in layer_kw: layer_kw['incoming'] = self.layers_[ layer_kw['incoming']] elif 'incomings' in layer_kw: layer_kw['incomings'] = [ self.layers_[name] for name in layer_kw['incomings']] else: layer_kw['incoming'] = layer # Deal with additional string parameters that may # reference other layers; currently only 'mask_input'. for param in self.layer_reference_params: if param in layer_kw: val = layer_kw[param] if isinstance(val, basestring): layer_kw[param] = self.layers_[val] for attr in ('W', 'b'): if isinstance(layer_kw.get(attr), str): name = layer_kw[attr] layer_kw[attr] = getattr(self.layers_[name], attr, None) try: layer_wrapper = layer_kw.pop('layer_wrapper', None) layer = layer_factory(**layer_kw) except TypeError as e: msg = ("Failed to instantiate {} with args {}.\n" "Maybe parameter names have changed?".format( layer_factory, layer_kw)) chain_exception(TypeError(msg), e) self.layers_[layer_kw['name']] = layer if layer_wrapper is not None: layer = layer_wrapper(layer) self.layers_["LW_%s" % layer_kw['name']] = layer self._output_layers = [layer] return [layer]
def function[initialize_layers, parameter[self, layers]]: constant[Sets up the Lasagne layers :param layers: The dictionary of layers, or a :class:`lasagne.Layers` instance, describing the underlying network :return: the output layer of the underlying lasagne network. :seealso: :ref:`layer-def` ] if compare[name[layers] is_not constant[None]] begin[:] name[self].layers assign[=] name[layers] name[self].layers_ assign[=] call[name[Layers], parameter[]] if call[name[isinstance], parameter[call[name[self].layers][constant[0]], name[Layer]]] begin[:] for taget[name[out_layer]] in starred[name[self].layers] begin[:] for taget[tuple[[<ast.Name object at 0x7da20c6e78b0>, <ast.Name object at 0x7da20c6e5fc0>]]] in starred[call[name[enumerate], parameter[call[name[get_all_layers], parameter[name[out_layer]]]]]] begin[:] if compare[name[layer] <ast.NotIn object at 0x7da2590d7190> call[name[self].layers_.values, parameter[]]] begin[:] variable[name] assign[=] <ast.BoolOp object at 0x7da20c6e6440> call[name[self].layers_][name[name]] assign[=] name[layer] if compare[call[name[self]._get_params_for, parameter[name[name]]] not_equal[!=] dictionary[[], []]] begin[:] <ast.Raise object at 0x7da20c6e67a0> name[self]._output_layers assign[=] name[self].layers return[name[self].layers] variable[layer] assign[=] constant[None] for taget[tuple[[<ast.Name object at 0x7da20c6e52a0>, <ast.Name object at 0x7da20c6e46a0>]]] in starred[call[name[enumerate], parameter[name[self].layers]]] begin[:] if call[name[isinstance], parameter[call[name[layer_def]][constant[1]], name[dict]]] begin[:] <ast.Tuple object at 0x7da20c6e5000> assign[=] name[layer_def] variable[layer_kw] assign[=] call[name[layer_kw].copy, parameter[]] if call[name[isinstance], parameter[name[layer_factory], name[str]]] begin[:] variable[layer_factory] assign[=] call[name[locate], parameter[name[layer_factory]]] assert[compare[name[layer_factory] is_not constant[None]]] if compare[constant[name] <ast.NotIn object at 0x7da2590d7190> name[layer_kw]] begin[:] call[name[layer_kw]][constant[name]] assign[=] call[name[self]._layer_name, parameter[name[layer_factory], name[i]]] variable[more_params] assign[=] call[name[self]._get_params_for, parameter[call[name[layer_kw]][constant[name]]]] call[name[layer_kw].update, parameter[name[more_params]]] if compare[call[name[layer_kw]][constant[name]] in name[self].layers_] begin[:] <ast.Raise object at 0x7da20c6e66b0> <ast.Try object at 0x7da18c4cc460> if <ast.UnaryOp object at 0x7da18c4cd870> begin[:] if compare[constant[incoming] in name[layer_kw]] begin[:] call[name[layer_kw]][constant[incoming]] assign[=] call[name[self].layers_][call[name[layer_kw]][constant[incoming]]] for taget[name[param]] in starred[name[self].layer_reference_params] begin[:] if compare[name[param] in name[layer_kw]] begin[:] variable[val] assign[=] call[name[layer_kw]][name[param]] if call[name[isinstance], parameter[name[val], name[basestring]]] begin[:] call[name[layer_kw]][name[param]] assign[=] call[name[self].layers_][name[val]] for taget[name[attr]] in starred[tuple[[<ast.Constant object at 0x7da18f58fb50>, <ast.Constant object at 0x7da18f58f160>]]] begin[:] if call[name[isinstance], parameter[call[name[layer_kw].get, parameter[name[attr]]], name[str]]] begin[:] variable[name] assign[=] call[name[layer_kw]][name[attr]] call[name[layer_kw]][name[attr]] assign[=] call[name[getattr], parameter[call[name[self].layers_][name[name]], name[attr], constant[None]]] <ast.Try object at 0x7da18f58d720> call[name[self].layers_][call[name[layer_kw]][constant[name]]] assign[=] name[layer] if compare[name[layer_wrapper] is_not constant[None]] begin[:] variable[layer] assign[=] call[name[layer_wrapper], parameter[name[layer]]] call[name[self].layers_][binary_operation[constant[LW_%s] <ast.Mod object at 0x7da2590d6920> call[name[layer_kw]][constant[name]]]] assign[=] name[layer] name[self]._output_layers assign[=] list[[<ast.Name object at 0x7da18f58e680>]] return[list[[<ast.Name object at 0x7da18f58e7a0>]]]
keyword[def] identifier[initialize_layers] ( identifier[self] , identifier[layers] = keyword[None] ): literal[string] keyword[if] identifier[layers] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[layers] = identifier[layers] identifier[self] . identifier[layers_] = identifier[Layers] () keyword[if] identifier[isinstance] ( identifier[self] . identifier[layers] [ literal[int] ], identifier[Layer] ): keyword[for] identifier[out_layer] keyword[in] identifier[self] . identifier[layers] : keyword[for] identifier[i] , identifier[layer] keyword[in] identifier[enumerate] ( identifier[get_all_layers] ( identifier[out_layer] )): keyword[if] identifier[layer] keyword[not] keyword[in] identifier[self] . identifier[layers_] . identifier[values] (): identifier[name] = identifier[layer] . identifier[name] keyword[or] identifier[self] . identifier[_layer_name] ( identifier[layer] . identifier[__class__] , identifier[i] ) identifier[self] . identifier[layers_] [ identifier[name] ]= identifier[layer] keyword[if] identifier[self] . identifier[_get_params_for] ( identifier[name] )!={}: keyword[raise] identifier[ValueError] ( literal[string] literal[string] literal[string] ) identifier[self] . identifier[_output_layers] = identifier[self] . identifier[layers] keyword[return] identifier[self] . identifier[layers] identifier[layer] = keyword[None] keyword[for] identifier[i] , identifier[layer_def] keyword[in] identifier[enumerate] ( identifier[self] . identifier[layers] ): keyword[if] identifier[isinstance] ( identifier[layer_def] [ literal[int] ], identifier[dict] ): identifier[layer_factory] , identifier[layer_kw] = identifier[layer_def] identifier[layer_kw] = identifier[layer_kw] . identifier[copy] () keyword[else] : identifier[layer_name] , identifier[layer_factory] = identifier[layer_def] identifier[layer_kw] ={ literal[string] : identifier[layer_name] } keyword[if] identifier[isinstance] ( identifier[layer_factory] , identifier[str] ): identifier[layer_factory] = identifier[locate] ( identifier[layer_factory] ) keyword[assert] identifier[layer_factory] keyword[is] keyword[not] keyword[None] keyword[if] literal[string] keyword[not] keyword[in] identifier[layer_kw] : identifier[layer_kw] [ literal[string] ]= identifier[self] . identifier[_layer_name] ( identifier[layer_factory] , identifier[i] ) identifier[more_params] = identifier[self] . identifier[_get_params_for] ( identifier[layer_kw] [ literal[string] ]) identifier[layer_kw] . identifier[update] ( identifier[more_params] ) keyword[if] identifier[layer_kw] [ literal[string] ] keyword[in] identifier[self] . identifier[layers_] : keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[layer_kw] [ literal[string] ])) keyword[try] : identifier[is_input_layer] = identifier[issubclass] ( identifier[layer_factory] , identifier[InputLayer] ) keyword[except] identifier[TypeError] : identifier[is_input_layer] = keyword[False] keyword[if] keyword[not] identifier[is_input_layer] : keyword[if] literal[string] keyword[in] identifier[layer_kw] : identifier[layer_kw] [ literal[string] ]= identifier[self] . identifier[layers_] [ identifier[layer_kw] [ literal[string] ]] keyword[elif] literal[string] keyword[in] identifier[layer_kw] : identifier[layer_kw] [ literal[string] ]=[ identifier[self] . identifier[layers_] [ identifier[name] ] keyword[for] identifier[name] keyword[in] identifier[layer_kw] [ literal[string] ]] keyword[else] : identifier[layer_kw] [ literal[string] ]= identifier[layer] keyword[for] identifier[param] keyword[in] identifier[self] . identifier[layer_reference_params] : keyword[if] identifier[param] keyword[in] identifier[layer_kw] : identifier[val] = identifier[layer_kw] [ identifier[param] ] keyword[if] identifier[isinstance] ( identifier[val] , identifier[basestring] ): identifier[layer_kw] [ identifier[param] ]= identifier[self] . identifier[layers_] [ identifier[val] ] keyword[for] identifier[attr] keyword[in] ( literal[string] , literal[string] ): keyword[if] identifier[isinstance] ( identifier[layer_kw] . identifier[get] ( identifier[attr] ), identifier[str] ): identifier[name] = identifier[layer_kw] [ identifier[attr] ] identifier[layer_kw] [ identifier[attr] ]= identifier[getattr] ( identifier[self] . identifier[layers_] [ identifier[name] ], identifier[attr] , keyword[None] ) keyword[try] : identifier[layer_wrapper] = identifier[layer_kw] . identifier[pop] ( literal[string] , keyword[None] ) identifier[layer] = identifier[layer_factory] (** identifier[layer_kw] ) keyword[except] identifier[TypeError] keyword[as] identifier[e] : identifier[msg] =( literal[string] literal[string] . identifier[format] ( identifier[layer_factory] , identifier[layer_kw] )) identifier[chain_exception] ( identifier[TypeError] ( identifier[msg] ), identifier[e] ) identifier[self] . identifier[layers_] [ identifier[layer_kw] [ literal[string] ]]= identifier[layer] keyword[if] identifier[layer_wrapper] keyword[is] keyword[not] keyword[None] : identifier[layer] = identifier[layer_wrapper] ( identifier[layer] ) identifier[self] . identifier[layers_] [ literal[string] % identifier[layer_kw] [ literal[string] ]]= identifier[layer] identifier[self] . identifier[_output_layers] =[ identifier[layer] ] keyword[return] [ identifier[layer] ]
def initialize_layers(self, layers=None): """Sets up the Lasagne layers :param layers: The dictionary of layers, or a :class:`lasagne.Layers` instance, describing the underlying network :return: the output layer of the underlying lasagne network. :seealso: :ref:`layer-def` """ if layers is not None: self.layers = layers # depends on [control=['if'], data=['layers']] self.layers_ = Layers() #If a Layer, or a list of Layers was passed in if isinstance(self.layers[0], Layer): for out_layer in self.layers: for (i, layer) in enumerate(get_all_layers(out_layer)): if layer not in self.layers_.values(): name = layer.name or self._layer_name(layer.__class__, i) self.layers_[name] = layer if self._get_params_for(name) != {}: raise ValueError("You can't use keyword params when passing a Lasagne instance object as the 'layers' parameter of 'NeuralNet'.") # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['layer']] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['out_layer']] self._output_layers = self.layers return self.layers # depends on [control=['if'], data=[]] # 'self.layers' are a list of '(Layer class, kwargs)', so # we'll have to actually instantiate the layers given the # arguments: layer = None for (i, layer_def) in enumerate(self.layers): if isinstance(layer_def[1], dict): # Newer format: (Layer, {'layer': 'kwargs'}) (layer_factory, layer_kw) = layer_def layer_kw = layer_kw.copy() # depends on [control=['if'], data=[]] else: # The legacy format: ('name', Layer) (layer_name, layer_factory) = layer_def layer_kw = {'name': layer_name} if isinstance(layer_factory, str): layer_factory = locate(layer_factory) assert layer_factory is not None # depends on [control=['if'], data=[]] if 'name' not in layer_kw: layer_kw['name'] = self._layer_name(layer_factory, i) # depends on [control=['if'], data=['layer_kw']] more_params = self._get_params_for(layer_kw['name']) layer_kw.update(more_params) if layer_kw['name'] in self.layers_: raise ValueError('Two layers with name {}.'.format(layer_kw['name'])) # depends on [control=['if'], data=[]] # Any layers that aren't subclasses of InputLayer are # assumed to require an 'incoming' paramter. By default, # we'll use the previous layer as input: try: is_input_layer = issubclass(layer_factory, InputLayer) # depends on [control=['try'], data=[]] except TypeError: is_input_layer = False # depends on [control=['except'], data=[]] if not is_input_layer: if 'incoming' in layer_kw: layer_kw['incoming'] = self.layers_[layer_kw['incoming']] # depends on [control=['if'], data=['layer_kw']] elif 'incomings' in layer_kw: layer_kw['incomings'] = [self.layers_[name] for name in layer_kw['incomings']] # depends on [control=['if'], data=['layer_kw']] else: layer_kw['incoming'] = layer # depends on [control=['if'], data=[]] # Deal with additional string parameters that may # reference other layers; currently only 'mask_input'. for param in self.layer_reference_params: if param in layer_kw: val = layer_kw[param] if isinstance(val, basestring): layer_kw[param] = self.layers_[val] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['param', 'layer_kw']] # depends on [control=['for'], data=['param']] for attr in ('W', 'b'): if isinstance(layer_kw.get(attr), str): name = layer_kw[attr] layer_kw[attr] = getattr(self.layers_[name], attr, None) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['attr']] try: layer_wrapper = layer_kw.pop('layer_wrapper', None) layer = layer_factory(**layer_kw) # depends on [control=['try'], data=[]] except TypeError as e: msg = 'Failed to instantiate {} with args {}.\nMaybe parameter names have changed?'.format(layer_factory, layer_kw) chain_exception(TypeError(msg), e) # depends on [control=['except'], data=['e']] self.layers_[layer_kw['name']] = layer if layer_wrapper is not None: layer = layer_wrapper(layer) self.layers_['LW_%s' % layer_kw['name']] = layer # depends on [control=['if'], data=['layer_wrapper']] # depends on [control=['for'], data=[]] self._output_layers = [layer] return [layer]
def add_to_typedef(typedef_curr, obo_line): """Add new fields to the current typedef.""" if obo_line[:4] == "id: ": assert not typedef_curr.item_id item_id = obo_line[4:] typedef_curr.item_id = item_id elif obo_line[:6] == "name: ": assert not typedef_curr.name typedef_curr.name = obo_line[6:] elif obo_line[:11] == "namespace: ": assert not typedef_curr.namespace typedef_curr.namespace = obo_line[11:] elif obo_line[17:] == "transitive_over: ": field_value = obo_line[17:].split('!')[0].rstrip() typedef_curr.transitive_over.append(field_value) elif obo_line[12:] == "inverse_of": assert not typedef_curr.inverse_of field_value = obo_line[12:].split('!')[0].rstrip() typedef_curr.inverse_of = field_value
def function[add_to_typedef, parameter[typedef_curr, obo_line]]: constant[Add new fields to the current typedef.] if compare[call[name[obo_line]][<ast.Slice object at 0x7da18bc73190>] equal[==] constant[id: ]] begin[:] assert[<ast.UnaryOp object at 0x7da18bc70460>] variable[item_id] assign[=] call[name[obo_line]][<ast.Slice object at 0x7da18bc73490>] name[typedef_curr].item_id assign[=] name[item_id]
keyword[def] identifier[add_to_typedef] ( identifier[typedef_curr] , identifier[obo_line] ): literal[string] keyword[if] identifier[obo_line] [: literal[int] ]== literal[string] : keyword[assert] keyword[not] identifier[typedef_curr] . identifier[item_id] identifier[item_id] = identifier[obo_line] [ literal[int] :] identifier[typedef_curr] . identifier[item_id] = identifier[item_id] keyword[elif] identifier[obo_line] [: literal[int] ]== literal[string] : keyword[assert] keyword[not] identifier[typedef_curr] . identifier[name] identifier[typedef_curr] . identifier[name] = identifier[obo_line] [ literal[int] :] keyword[elif] identifier[obo_line] [: literal[int] ]== literal[string] : keyword[assert] keyword[not] identifier[typedef_curr] . identifier[namespace] identifier[typedef_curr] . identifier[namespace] = identifier[obo_line] [ literal[int] :] keyword[elif] identifier[obo_line] [ literal[int] :]== literal[string] : identifier[field_value] = identifier[obo_line] [ literal[int] :]. identifier[split] ( literal[string] )[ literal[int] ]. identifier[rstrip] () identifier[typedef_curr] . identifier[transitive_over] . identifier[append] ( identifier[field_value] ) keyword[elif] identifier[obo_line] [ literal[int] :]== literal[string] : keyword[assert] keyword[not] identifier[typedef_curr] . identifier[inverse_of] identifier[field_value] = identifier[obo_line] [ literal[int] :]. identifier[split] ( literal[string] )[ literal[int] ]. identifier[rstrip] () identifier[typedef_curr] . identifier[inverse_of] = identifier[field_value]
def add_to_typedef(typedef_curr, obo_line): """Add new fields to the current typedef.""" if obo_line[:4] == 'id: ': assert not typedef_curr.item_id item_id = obo_line[4:] typedef_curr.item_id = item_id # depends on [control=['if'], data=[]] elif obo_line[:6] == 'name: ': assert not typedef_curr.name typedef_curr.name = obo_line[6:] # depends on [control=['if'], data=[]] elif obo_line[:11] == 'namespace: ': assert not typedef_curr.namespace typedef_curr.namespace = obo_line[11:] # depends on [control=['if'], data=[]] elif obo_line[17:] == 'transitive_over: ': field_value = obo_line[17:].split('!')[0].rstrip() typedef_curr.transitive_over.append(field_value) # depends on [control=['if'], data=[]] elif obo_line[12:] == 'inverse_of': assert not typedef_curr.inverse_of field_value = obo_line[12:].split('!')[0].rstrip() typedef_curr.inverse_of = field_value # depends on [control=['if'], data=[]]
def periodogram_auto(self, oversampling=5, nyquist_factor=3, return_periods=True): """Compute the periodogram on an automatically-determined grid This function uses heuristic arguments to choose a suitable frequency grid for the data. Note that depending on the data window function, the model may be sensitive to periodicity at higher frequencies than this function returns! The final number of frequencies will be Nf = oversampling * nyquist_factor * len(t) / 2 Parameters ---------- oversampling : float the number of samples per approximate peak width nyquist_factor : float the highest frequency, in units of the nyquist frequency for points spread uniformly through the data range. Returns ------- period : ndarray the grid of periods power : ndarray the power at each frequency """ N = len(self.t) T = np.max(self.t) - np.min(self.t) df = 1. / T / oversampling f0 = df Nf = int(0.5 * oversampling * nyquist_factor * N) freq = f0 + df * np.arange(Nf) return 1. / freq, self._score_frequency_grid(f0, df, Nf)
def function[periodogram_auto, parameter[self, oversampling, nyquist_factor, return_periods]]: constant[Compute the periodogram on an automatically-determined grid This function uses heuristic arguments to choose a suitable frequency grid for the data. Note that depending on the data window function, the model may be sensitive to periodicity at higher frequencies than this function returns! The final number of frequencies will be Nf = oversampling * nyquist_factor * len(t) / 2 Parameters ---------- oversampling : float the number of samples per approximate peak width nyquist_factor : float the highest frequency, in units of the nyquist frequency for points spread uniformly through the data range. Returns ------- period : ndarray the grid of periods power : ndarray the power at each frequency ] variable[N] assign[=] call[name[len], parameter[name[self].t]] variable[T] assign[=] binary_operation[call[name[np].max, parameter[name[self].t]] - call[name[np].min, parameter[name[self].t]]] variable[df] assign[=] binary_operation[binary_operation[constant[1.0] / name[T]] / name[oversampling]] variable[f0] assign[=] name[df] variable[Nf] assign[=] call[name[int], parameter[binary_operation[binary_operation[binary_operation[constant[0.5] * name[oversampling]] * name[nyquist_factor]] * name[N]]]] variable[freq] assign[=] binary_operation[name[f0] + binary_operation[name[df] * call[name[np].arange, parameter[name[Nf]]]]] return[tuple[[<ast.BinOp object at 0x7da1b06517e0>, <ast.Call object at 0x7da1b0653280>]]]
keyword[def] identifier[periodogram_auto] ( identifier[self] , identifier[oversampling] = literal[int] , identifier[nyquist_factor] = literal[int] , identifier[return_periods] = keyword[True] ): literal[string] identifier[N] = identifier[len] ( identifier[self] . identifier[t] ) identifier[T] = identifier[np] . identifier[max] ( identifier[self] . identifier[t] )- identifier[np] . identifier[min] ( identifier[self] . identifier[t] ) identifier[df] = literal[int] / identifier[T] / identifier[oversampling] identifier[f0] = identifier[df] identifier[Nf] = identifier[int] ( literal[int] * identifier[oversampling] * identifier[nyquist_factor] * identifier[N] ) identifier[freq] = identifier[f0] + identifier[df] * identifier[np] . identifier[arange] ( identifier[Nf] ) keyword[return] literal[int] / identifier[freq] , identifier[self] . identifier[_score_frequency_grid] ( identifier[f0] , identifier[df] , identifier[Nf] )
def periodogram_auto(self, oversampling=5, nyquist_factor=3, return_periods=True): """Compute the periodogram on an automatically-determined grid This function uses heuristic arguments to choose a suitable frequency grid for the data. Note that depending on the data window function, the model may be sensitive to periodicity at higher frequencies than this function returns! The final number of frequencies will be Nf = oversampling * nyquist_factor * len(t) / 2 Parameters ---------- oversampling : float the number of samples per approximate peak width nyquist_factor : float the highest frequency, in units of the nyquist frequency for points spread uniformly through the data range. Returns ------- period : ndarray the grid of periods power : ndarray the power at each frequency """ N = len(self.t) T = np.max(self.t) - np.min(self.t) df = 1.0 / T / oversampling f0 = df Nf = int(0.5 * oversampling * nyquist_factor * N) freq = f0 + df * np.arange(Nf) return (1.0 / freq, self._score_frequency_grid(f0, df, Nf))
def parent(self): """ Return the parent scope. :return: FoldScope or None """ if TextBlockHelper.get_fold_lvl(self._trigger) > 0 and \ self._trigger.blockNumber(): block = self._trigger.previous() ref_lvl = self.trigger_level - 1 while (block.blockNumber() and (not TextBlockHelper.is_fold_trigger(block) or TextBlockHelper.get_fold_lvl(block) > ref_lvl)): block = block.previous() try: return FoldScope(block) except ValueError: return None return None
def function[parent, parameter[self]]: constant[ Return the parent scope. :return: FoldScope or None ] if <ast.BoolOp object at 0x7da18f721420> begin[:] variable[block] assign[=] call[name[self]._trigger.previous, parameter[]] variable[ref_lvl] assign[=] binary_operation[name[self].trigger_level - constant[1]] while <ast.BoolOp object at 0x7da20cabd990> begin[:] variable[block] assign[=] call[name[block].previous, parameter[]] <ast.Try object at 0x7da2041da0e0> return[constant[None]]
keyword[def] identifier[parent] ( identifier[self] ): literal[string] keyword[if] identifier[TextBlockHelper] . identifier[get_fold_lvl] ( identifier[self] . identifier[_trigger] )> literal[int] keyword[and] identifier[self] . identifier[_trigger] . identifier[blockNumber] (): identifier[block] = identifier[self] . identifier[_trigger] . identifier[previous] () identifier[ref_lvl] = identifier[self] . identifier[trigger_level] - literal[int] keyword[while] ( identifier[block] . identifier[blockNumber] () keyword[and] ( keyword[not] identifier[TextBlockHelper] . identifier[is_fold_trigger] ( identifier[block] ) keyword[or] identifier[TextBlockHelper] . identifier[get_fold_lvl] ( identifier[block] )> identifier[ref_lvl] )): identifier[block] = identifier[block] . identifier[previous] () keyword[try] : keyword[return] identifier[FoldScope] ( identifier[block] ) keyword[except] identifier[ValueError] : keyword[return] keyword[None] keyword[return] keyword[None]
def parent(self): """ Return the parent scope. :return: FoldScope or None """ if TextBlockHelper.get_fold_lvl(self._trigger) > 0 and self._trigger.blockNumber(): block = self._trigger.previous() ref_lvl = self.trigger_level - 1 while block.blockNumber() and (not TextBlockHelper.is_fold_trigger(block) or TextBlockHelper.get_fold_lvl(block) > ref_lvl): block = block.previous() # depends on [control=['while'], data=[]] try: return FoldScope(block) # depends on [control=['try'], data=[]] except ValueError: return None # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] return None
def p_expression_lessthan(self, p): 'expression : expression LT expression' p[0] = LessThan(p[1], p[3], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
def function[p_expression_lessthan, parameter[self, p]]: constant[expression : expression LT expression] call[name[p]][constant[0]] assign[=] call[name[LessThan], parameter[call[name[p]][constant[1]], call[name[p]][constant[3]]]] call[name[p].set_lineno, parameter[constant[0], call[name[p].lineno, parameter[constant[1]]]]]
keyword[def] identifier[p_expression_lessthan] ( identifier[self] , identifier[p] ): literal[string] identifier[p] [ literal[int] ]= identifier[LessThan] ( identifier[p] [ literal[int] ], identifier[p] [ literal[int] ], identifier[lineno] = identifier[p] . identifier[lineno] ( literal[int] )) identifier[p] . identifier[set_lineno] ( literal[int] , identifier[p] . identifier[lineno] ( literal[int] ))
def p_expression_lessthan(self, p): """expression : expression LT expression""" p[0] = LessThan(p[1], p[3], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
def raise_not_enough_arguments(self, string): """ Raises an errors.ArgumentError if not enough arguments were supplied. Takes care of formatting for detailed error messages. Arguments: string (str): The string of the phrase for which there weren't enough arguments. Raises: errors.ArgumentError with a detailed error message. """ requested = errors.number(self.counter + 1) number = len(self.positional) verb = "was" if number == 1 else "were" what = "Requested {} formatting argument for "\ "'{}' but only {} {} supplied!" what = what.format(requested, string, number, verb) raise errors.ArgumentError(what)
def function[raise_not_enough_arguments, parameter[self, string]]: constant[ Raises an errors.ArgumentError if not enough arguments were supplied. Takes care of formatting for detailed error messages. Arguments: string (str): The string of the phrase for which there weren't enough arguments. Raises: errors.ArgumentError with a detailed error message. ] variable[requested] assign[=] call[name[errors].number, parameter[binary_operation[name[self].counter + constant[1]]]] variable[number] assign[=] call[name[len], parameter[name[self].positional]] variable[verb] assign[=] <ast.IfExp object at 0x7da20c796830> variable[what] assign[=] constant[Requested {} formatting argument for '{}' but only {} {} supplied!] variable[what] assign[=] call[name[what].format, parameter[name[requested], name[string], name[number], name[verb]]] <ast.Raise object at 0x7da20c796860>
keyword[def] identifier[raise_not_enough_arguments] ( identifier[self] , identifier[string] ): literal[string] identifier[requested] = identifier[errors] . identifier[number] ( identifier[self] . identifier[counter] + literal[int] ) identifier[number] = identifier[len] ( identifier[self] . identifier[positional] ) identifier[verb] = literal[string] keyword[if] identifier[number] == literal[int] keyword[else] literal[string] identifier[what] = literal[string] literal[string] identifier[what] = identifier[what] . identifier[format] ( identifier[requested] , identifier[string] , identifier[number] , identifier[verb] ) keyword[raise] identifier[errors] . identifier[ArgumentError] ( identifier[what] )
def raise_not_enough_arguments(self, string): """ Raises an errors.ArgumentError if not enough arguments were supplied. Takes care of formatting for detailed error messages. Arguments: string (str): The string of the phrase for which there weren't enough arguments. Raises: errors.ArgumentError with a detailed error message. """ requested = errors.number(self.counter + 1) number = len(self.positional) verb = 'was' if number == 1 else 'were' what = "Requested {} formatting argument for '{}' but only {} {} supplied!" what = what.format(requested, string, number, verb) raise errors.ArgumentError(what)
def configValue(self): """ Creates a QPen made of the children's config values. """ if not self.data: return None else: pen = QtGui.QPen() pen.setCosmetic(True) pen.setColor(self.colorCti.configValue) style = self.styleCti.configValue if style is not None: pen.setStyle(style) pen.setWidthF(self.widthCti.configValue) return pen
def function[configValue, parameter[self]]: constant[ Creates a QPen made of the children's config values. ] if <ast.UnaryOp object at 0x7da1b04d1b10> begin[:] return[constant[None]]
keyword[def] identifier[configValue] ( identifier[self] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[data] : keyword[return] keyword[None] keyword[else] : identifier[pen] = identifier[QtGui] . identifier[QPen] () identifier[pen] . identifier[setCosmetic] ( keyword[True] ) identifier[pen] . identifier[setColor] ( identifier[self] . identifier[colorCti] . identifier[configValue] ) identifier[style] = identifier[self] . identifier[styleCti] . identifier[configValue] keyword[if] identifier[style] keyword[is] keyword[not] keyword[None] : identifier[pen] . identifier[setStyle] ( identifier[style] ) identifier[pen] . identifier[setWidthF] ( identifier[self] . identifier[widthCti] . identifier[configValue] ) keyword[return] identifier[pen]
def configValue(self): """ Creates a QPen made of the children's config values. """ if not self.data: return None # depends on [control=['if'], data=[]] else: pen = QtGui.QPen() pen.setCosmetic(True) pen.setColor(self.colorCti.configValue) style = self.styleCti.configValue if style is not None: pen.setStyle(style) # depends on [control=['if'], data=['style']] pen.setWidthF(self.widthCti.configValue) return pen
def _get_base_component(self): """Returns Component protobuf message""" comp = topology_pb2.Component() comp.name = self.name comp.spec = topology_pb2.ComponentObjectSpec.Value("PYTHON_CLASS_NAME") comp.class_name = self.python_class_path comp.config.CopyFrom(self._get_comp_config()) return comp
def function[_get_base_component, parameter[self]]: constant[Returns Component protobuf message] variable[comp] assign[=] call[name[topology_pb2].Component, parameter[]] name[comp].name assign[=] name[self].name name[comp].spec assign[=] call[name[topology_pb2].ComponentObjectSpec.Value, parameter[constant[PYTHON_CLASS_NAME]]] name[comp].class_name assign[=] name[self].python_class_path call[name[comp].config.CopyFrom, parameter[call[name[self]._get_comp_config, parameter[]]]] return[name[comp]]
keyword[def] identifier[_get_base_component] ( identifier[self] ): literal[string] identifier[comp] = identifier[topology_pb2] . identifier[Component] () identifier[comp] . identifier[name] = identifier[self] . identifier[name] identifier[comp] . identifier[spec] = identifier[topology_pb2] . identifier[ComponentObjectSpec] . identifier[Value] ( literal[string] ) identifier[comp] . identifier[class_name] = identifier[self] . identifier[python_class_path] identifier[comp] . identifier[config] . identifier[CopyFrom] ( identifier[self] . identifier[_get_comp_config] ()) keyword[return] identifier[comp]
def _get_base_component(self): """Returns Component protobuf message""" comp = topology_pb2.Component() comp.name = self.name comp.spec = topology_pb2.ComponentObjectSpec.Value('PYTHON_CLASS_NAME') comp.class_name = self.python_class_path comp.config.CopyFrom(self._get_comp_config()) return comp
def cdsparse(self, record): """ Finds core genes, and records gene names and sequences in dictionaries :param record: SeqIO record """ try: # Find genes that are present in all strains of interest - the number of times the gene is found is # equal to the number of strains. Earlier parsing ensures that the same gene is not present in a strain # more than once if self.genes[self.genenames[record.id]] == len(self.runmetadata.samples): # Add the gene names and sequences to the appropriate dictionaries try: self.genesequence[self.genenames[record.id]].add(str(record.seq)) # Initialise the dictionary as required, then populate as above except KeyError: self.genesequence[self.genenames[record.id]] = set() self.genesequence[self.genenames[record.id]].add(str(record.seq)) try: self.coresequence[str(record.seq)].add(record.id) except KeyError: self.coresequence[str(record.seq)] = set() self.coresequence[str(record.seq)].add(record.id) except KeyError: pass
def function[cdsparse, parameter[self, record]]: constant[ Finds core genes, and records gene names and sequences in dictionaries :param record: SeqIO record ] <ast.Try object at 0x7da1b1e094b0>
keyword[def] identifier[cdsparse] ( identifier[self] , identifier[record] ): literal[string] keyword[try] : keyword[if] identifier[self] . identifier[genes] [ identifier[self] . identifier[genenames] [ identifier[record] . identifier[id] ]]== identifier[len] ( identifier[self] . identifier[runmetadata] . identifier[samples] ): keyword[try] : identifier[self] . identifier[genesequence] [ identifier[self] . identifier[genenames] [ identifier[record] . identifier[id] ]]. identifier[add] ( identifier[str] ( identifier[record] . identifier[seq] )) keyword[except] identifier[KeyError] : identifier[self] . identifier[genesequence] [ identifier[self] . identifier[genenames] [ identifier[record] . identifier[id] ]]= identifier[set] () identifier[self] . identifier[genesequence] [ identifier[self] . identifier[genenames] [ identifier[record] . identifier[id] ]]. identifier[add] ( identifier[str] ( identifier[record] . identifier[seq] )) keyword[try] : identifier[self] . identifier[coresequence] [ identifier[str] ( identifier[record] . identifier[seq] )]. identifier[add] ( identifier[record] . identifier[id] ) keyword[except] identifier[KeyError] : identifier[self] . identifier[coresequence] [ identifier[str] ( identifier[record] . identifier[seq] )]= identifier[set] () identifier[self] . identifier[coresequence] [ identifier[str] ( identifier[record] . identifier[seq] )]. identifier[add] ( identifier[record] . identifier[id] ) keyword[except] identifier[KeyError] : keyword[pass]
def cdsparse(self, record): """ Finds core genes, and records gene names and sequences in dictionaries :param record: SeqIO record """ try: # Find genes that are present in all strains of interest - the number of times the gene is found is # equal to the number of strains. Earlier parsing ensures that the same gene is not present in a strain # more than once if self.genes[self.genenames[record.id]] == len(self.runmetadata.samples): # Add the gene names and sequences to the appropriate dictionaries try: self.genesequence[self.genenames[record.id]].add(str(record.seq)) # depends on [control=['try'], data=[]] # Initialise the dictionary as required, then populate as above except KeyError: self.genesequence[self.genenames[record.id]] = set() self.genesequence[self.genenames[record.id]].add(str(record.seq)) # depends on [control=['except'], data=[]] try: self.coresequence[str(record.seq)].add(record.id) # depends on [control=['try'], data=[]] except KeyError: self.coresequence[str(record.seq)] = set() self.coresequence[str(record.seq)].add(record.id) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except KeyError: pass # depends on [control=['except'], data=[]]
def get_by_provider_display_name(self, provider_display_name): """ Gets a SAN Manager by provider display name. Args: provider_display_name: Name of the Provider Display Name Returns: dict: SAN Manager. """ san_managers = self._client.get_all() result = [x for x in san_managers if x['providerDisplayName'] == provider_display_name] return result[0] if result else None
def function[get_by_provider_display_name, parameter[self, provider_display_name]]: constant[ Gets a SAN Manager by provider display name. Args: provider_display_name: Name of the Provider Display Name Returns: dict: SAN Manager. ] variable[san_managers] assign[=] call[name[self]._client.get_all, parameter[]] variable[result] assign[=] <ast.ListComp object at 0x7da20c76d000> return[<ast.IfExp object at 0x7da204961930>]
keyword[def] identifier[get_by_provider_display_name] ( identifier[self] , identifier[provider_display_name] ): literal[string] identifier[san_managers] = identifier[self] . identifier[_client] . identifier[get_all] () identifier[result] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[san_managers] keyword[if] identifier[x] [ literal[string] ]== identifier[provider_display_name] ] keyword[return] identifier[result] [ literal[int] ] keyword[if] identifier[result] keyword[else] keyword[None]
def get_by_provider_display_name(self, provider_display_name): """ Gets a SAN Manager by provider display name. Args: provider_display_name: Name of the Provider Display Name Returns: dict: SAN Manager. """ san_managers = self._client.get_all() result = [x for x in san_managers if x['providerDisplayName'] == provider_display_name] return result[0] if result else None
async def executemany(self, command: str, args, *, timeout: float=None): """Execute an SQL *command* for each sequence of arguments in *args*. Example: .. code-block:: pycon >>> await con.executemany(''' ... INSERT INTO mytab (a) VALUES ($1, $2, $3); ... ''', [(1, 2, 3), (4, 5, 6)]) :param command: Command to execute. :param args: An iterable containing sequences of arguments. :param float timeout: Optional timeout value in seconds. :return None: This method discards the results of the operations. .. note:: When inserting a large number of rows, use :meth:`Connection.copy_records_to_table()` instead, it is much more efficient for this purpose. .. versionadded:: 0.7.0 .. versionchanged:: 0.11.0 `timeout` became a keyword-only parameter. """ self._check_open() return await self._executemany(command, args, timeout)
<ast.AsyncFunctionDef object at 0x7da1b1957040>
keyword[async] keyword[def] identifier[executemany] ( identifier[self] , identifier[command] : identifier[str] , identifier[args] ,*, identifier[timeout] : identifier[float] = keyword[None] ): literal[string] identifier[self] . identifier[_check_open] () keyword[return] keyword[await] identifier[self] . identifier[_executemany] ( identifier[command] , identifier[args] , identifier[timeout] )
async def executemany(self, command: str, args, *, timeout: float=None): """Execute an SQL *command* for each sequence of arguments in *args*. Example: .. code-block:: pycon >>> await con.executemany(''' ... INSERT INTO mytab (a) VALUES ($1, $2, $3); ... ''', [(1, 2, 3), (4, 5, 6)]) :param command: Command to execute. :param args: An iterable containing sequences of arguments. :param float timeout: Optional timeout value in seconds. :return None: This method discards the results of the operations. .. note:: When inserting a large number of rows, use :meth:`Connection.copy_records_to_table()` instead, it is much more efficient for this purpose. .. versionadded:: 0.7.0 .. versionchanged:: 0.11.0 `timeout` became a keyword-only parameter. """ self._check_open() return await self._executemany(command, args, timeout)
def pctile(self,pct,res=1000): """Returns the desired percentile of the distribution. Will only work if properly normalized. Designed to mimic the `ppf` method of the `scipy.stats` random variate objects. Works by gridding the CDF at a given resolution and matching the nearest point. NB, this is of course not as precise as an analytic ppf. Parameters ---------- pct : float Percentile between 0 and 1. res : int, optional The resolution at which to grid the CDF to find the percentile. Returns ------- percentile : float """ grid = np.linspace(self.minval,self.maxval,res) return grid[np.argmin(np.absolute(pct-self.cdf(grid)))]
def function[pctile, parameter[self, pct, res]]: constant[Returns the desired percentile of the distribution. Will only work if properly normalized. Designed to mimic the `ppf` method of the `scipy.stats` random variate objects. Works by gridding the CDF at a given resolution and matching the nearest point. NB, this is of course not as precise as an analytic ppf. Parameters ---------- pct : float Percentile between 0 and 1. res : int, optional The resolution at which to grid the CDF to find the percentile. Returns ------- percentile : float ] variable[grid] assign[=] call[name[np].linspace, parameter[name[self].minval, name[self].maxval, name[res]]] return[call[name[grid]][call[name[np].argmin, parameter[call[name[np].absolute, parameter[binary_operation[name[pct] - call[name[self].cdf, parameter[name[grid]]]]]]]]]]
keyword[def] identifier[pctile] ( identifier[self] , identifier[pct] , identifier[res] = literal[int] ): literal[string] identifier[grid] = identifier[np] . identifier[linspace] ( identifier[self] . identifier[minval] , identifier[self] . identifier[maxval] , identifier[res] ) keyword[return] identifier[grid] [ identifier[np] . identifier[argmin] ( identifier[np] . identifier[absolute] ( identifier[pct] - identifier[self] . identifier[cdf] ( identifier[grid] )))]
def pctile(self, pct, res=1000): """Returns the desired percentile of the distribution. Will only work if properly normalized. Designed to mimic the `ppf` method of the `scipy.stats` random variate objects. Works by gridding the CDF at a given resolution and matching the nearest point. NB, this is of course not as precise as an analytic ppf. Parameters ---------- pct : float Percentile between 0 and 1. res : int, optional The resolution at which to grid the CDF to find the percentile. Returns ------- percentile : float """ grid = np.linspace(self.minval, self.maxval, res) return grid[np.argmin(np.absolute(pct - self.cdf(grid)))]
def _finaliseRequest(self, request, status, content, mimetype='text/plain'): """ Finalises the request. @param request: The HTTP Request. @type request: C{http.Request} @param status: The HTTP status code. @type status: C{int} @param content: The content of the response. @type content: C{str} @param mimetype: The MIME type of the request. @type mimetype: C{str} """ request.setResponseCode(status) request.setHeader("Content-Type", mimetype) request.setHeader("Content-Length", str(len(content))) request.setHeader("Server", gateway.SERVER_NAME) request.write(content) request.finish()
def function[_finaliseRequest, parameter[self, request, status, content, mimetype]]: constant[ Finalises the request. @param request: The HTTP Request. @type request: C{http.Request} @param status: The HTTP status code. @type status: C{int} @param content: The content of the response. @type content: C{str} @param mimetype: The MIME type of the request. @type mimetype: C{str} ] call[name[request].setResponseCode, parameter[name[status]]] call[name[request].setHeader, parameter[constant[Content-Type], name[mimetype]]] call[name[request].setHeader, parameter[constant[Content-Length], call[name[str], parameter[call[name[len], parameter[name[content]]]]]]] call[name[request].setHeader, parameter[constant[Server], name[gateway].SERVER_NAME]] call[name[request].write, parameter[name[content]]] call[name[request].finish, parameter[]]
keyword[def] identifier[_finaliseRequest] ( identifier[self] , identifier[request] , identifier[status] , identifier[content] , identifier[mimetype] = literal[string] ): literal[string] identifier[request] . identifier[setResponseCode] ( identifier[status] ) identifier[request] . identifier[setHeader] ( literal[string] , identifier[mimetype] ) identifier[request] . identifier[setHeader] ( literal[string] , identifier[str] ( identifier[len] ( identifier[content] ))) identifier[request] . identifier[setHeader] ( literal[string] , identifier[gateway] . identifier[SERVER_NAME] ) identifier[request] . identifier[write] ( identifier[content] ) identifier[request] . identifier[finish] ()
def _finaliseRequest(self, request, status, content, mimetype='text/plain'): """ Finalises the request. @param request: The HTTP Request. @type request: C{http.Request} @param status: The HTTP status code. @type status: C{int} @param content: The content of the response. @type content: C{str} @param mimetype: The MIME type of the request. @type mimetype: C{str} """ request.setResponseCode(status) request.setHeader('Content-Type', mimetype) request.setHeader('Content-Length', str(len(content))) request.setHeader('Server', gateway.SERVER_NAME) request.write(content) request.finish()
def add_error(self, group, term, sub_term, value): """For records that are not defined as terms, either add it to the errors list.""" self._errors[(group, term, sub_term)] = value
def function[add_error, parameter[self, group, term, sub_term, value]]: constant[For records that are not defined as terms, either add it to the errors list.] call[name[self]._errors][tuple[[<ast.Name object at 0x7da20c7943d0>, <ast.Name object at 0x7da20c795de0>, <ast.Name object at 0x7da20c796350>]]] assign[=] name[value]
keyword[def] identifier[add_error] ( identifier[self] , identifier[group] , identifier[term] , identifier[sub_term] , identifier[value] ): literal[string] identifier[self] . identifier[_errors] [( identifier[group] , identifier[term] , identifier[sub_term] )]= identifier[value]
def add_error(self, group, term, sub_term, value): """For records that are not defined as terms, either add it to the errors list.""" self._errors[group, term, sub_term] = value
def get_gif_frames(img): """ Extracts the frames from an animated gif. :param img: A PIL Image object :return: An array of PIL image objects, each corresponding to a frame in the animation. """ gif_frames = [] n = 0 while img: if img.mode != "RGB": image = img.convert(mode="RGB") else: image = img gif_frames.append(image) n += 1 try: img.seek(n) except EOFError: break return gif_frames
def function[get_gif_frames, parameter[img]]: constant[ Extracts the frames from an animated gif. :param img: A PIL Image object :return: An array of PIL image objects, each corresponding to a frame in the animation. ] variable[gif_frames] assign[=] list[[]] variable[n] assign[=] constant[0] while name[img] begin[:] if compare[name[img].mode not_equal[!=] constant[RGB]] begin[:] variable[image] assign[=] call[name[img].convert, parameter[]] call[name[gif_frames].append, parameter[name[image]]] <ast.AugAssign object at 0x7da1b1395f60> <ast.Try object at 0x7da1b1395240> return[name[gif_frames]]
keyword[def] identifier[get_gif_frames] ( identifier[img] ): literal[string] identifier[gif_frames] =[] identifier[n] = literal[int] keyword[while] identifier[img] : keyword[if] identifier[img] . identifier[mode] != literal[string] : identifier[image] = identifier[img] . identifier[convert] ( identifier[mode] = literal[string] ) keyword[else] : identifier[image] = identifier[img] identifier[gif_frames] . identifier[append] ( identifier[image] ) identifier[n] += literal[int] keyword[try] : identifier[img] . identifier[seek] ( identifier[n] ) keyword[except] identifier[EOFError] : keyword[break] keyword[return] identifier[gif_frames]
def get_gif_frames(img): """ Extracts the frames from an animated gif. :param img: A PIL Image object :return: An array of PIL image objects, each corresponding to a frame in the animation. """ gif_frames = [] n = 0 while img: if img.mode != 'RGB': image = img.convert(mode='RGB') # depends on [control=['if'], data=[]] else: image = img gif_frames.append(image) n += 1 try: img.seek(n) # depends on [control=['try'], data=[]] except EOFError: break # depends on [control=['except'], data=[]] # depends on [control=['while'], data=[]] return gif_frames
def namedb_create(path, genesis_block): """ Create a sqlite3 db at the given path. Create all the tables and indexes we need. """ global BLOCKSTACK_DB_SCRIPT if os.path.exists( path ): raise Exception("Database '%s' already exists" % path) lines = [l + ";" for l in BLOCKSTACK_DB_SCRIPT.split(";")] con = sqlite3.connect( path, isolation_level=None, timeout=2**30 ) for line in lines: db_query_execute(con, line, ()) con.row_factory = namedb_row_factory # create genesis block namedb_create_token_genesis(con, genesis_block['rows'], genesis_block['history']) return con
def function[namedb_create, parameter[path, genesis_block]]: constant[ Create a sqlite3 db at the given path. Create all the tables and indexes we need. ] <ast.Global object at 0x7da1b17235b0> if call[name[os].path.exists, parameter[name[path]]] begin[:] <ast.Raise object at 0x7da1b1722200> variable[lines] assign[=] <ast.ListComp object at 0x7da20c6e59c0> variable[con] assign[=] call[name[sqlite3].connect, parameter[name[path]]] for taget[name[line]] in starred[name[lines]] begin[:] call[name[db_query_execute], parameter[name[con], name[line], tuple[[]]]] name[con].row_factory assign[=] name[namedb_row_factory] call[name[namedb_create_token_genesis], parameter[name[con], call[name[genesis_block]][constant[rows]], call[name[genesis_block]][constant[history]]]] return[name[con]]
keyword[def] identifier[namedb_create] ( identifier[path] , identifier[genesis_block] ): literal[string] keyword[global] identifier[BLOCKSTACK_DB_SCRIPT] keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[path] ): keyword[raise] identifier[Exception] ( literal[string] % identifier[path] ) identifier[lines] =[ identifier[l] + literal[string] keyword[for] identifier[l] keyword[in] identifier[BLOCKSTACK_DB_SCRIPT] . identifier[split] ( literal[string] )] identifier[con] = identifier[sqlite3] . identifier[connect] ( identifier[path] , identifier[isolation_level] = keyword[None] , identifier[timeout] = literal[int] ** literal[int] ) keyword[for] identifier[line] keyword[in] identifier[lines] : identifier[db_query_execute] ( identifier[con] , identifier[line] ,()) identifier[con] . identifier[row_factory] = identifier[namedb_row_factory] identifier[namedb_create_token_genesis] ( identifier[con] , identifier[genesis_block] [ literal[string] ], identifier[genesis_block] [ literal[string] ]) keyword[return] identifier[con]
def namedb_create(path, genesis_block): """ Create a sqlite3 db at the given path. Create all the tables and indexes we need. """ global BLOCKSTACK_DB_SCRIPT if os.path.exists(path): raise Exception("Database '%s' already exists" % path) # depends on [control=['if'], data=[]] lines = [l + ';' for l in BLOCKSTACK_DB_SCRIPT.split(';')] con = sqlite3.connect(path, isolation_level=None, timeout=2 ** 30) for line in lines: db_query_execute(con, line, ()) # depends on [control=['for'], data=['line']] con.row_factory = namedb_row_factory # create genesis block namedb_create_token_genesis(con, genesis_block['rows'], genesis_block['history']) return con
def cuboid(target, throat_diameter='throat.diameter', throat_length='throat.length'): r""" Calculate surface area for a cuboid throat Parameters ---------- target : OpenPNM Object The object which this model is associated with. This controls the length of the calculated array, and also provides access to other necessary properties. throat_diameter : string Dictionary key to the throat diameter array. Default is 'throat.diameter'. throat_length : string Dictionary key to the throat length array. Default is 'throat.length'. """ D = target[throat_diameter] L = target[throat_length] value = 4*D*L return value
def function[cuboid, parameter[target, throat_diameter, throat_length]]: constant[ Calculate surface area for a cuboid throat Parameters ---------- target : OpenPNM Object The object which this model is associated with. This controls the length of the calculated array, and also provides access to other necessary properties. throat_diameter : string Dictionary key to the throat diameter array. Default is 'throat.diameter'. throat_length : string Dictionary key to the throat length array. Default is 'throat.length'. ] variable[D] assign[=] call[name[target]][name[throat_diameter]] variable[L] assign[=] call[name[target]][name[throat_length]] variable[value] assign[=] binary_operation[binary_operation[constant[4] * name[D]] * name[L]] return[name[value]]
keyword[def] identifier[cuboid] ( identifier[target] , identifier[throat_diameter] = literal[string] , identifier[throat_length] = literal[string] ): literal[string] identifier[D] = identifier[target] [ identifier[throat_diameter] ] identifier[L] = identifier[target] [ identifier[throat_length] ] identifier[value] = literal[int] * identifier[D] * identifier[L] keyword[return] identifier[value]
def cuboid(target, throat_diameter='throat.diameter', throat_length='throat.length'): """ Calculate surface area for a cuboid throat Parameters ---------- target : OpenPNM Object The object which this model is associated with. This controls the length of the calculated array, and also provides access to other necessary properties. throat_diameter : string Dictionary key to the throat diameter array. Default is 'throat.diameter'. throat_length : string Dictionary key to the throat length array. Default is 'throat.length'. """ D = target[throat_diameter] L = target[throat_length] value = 4 * D * L return value
def move_group_in_parent(self, group = None, index = None): """Move group to another position in group's parent. index must be a valid index of group.parent.groups """ if group is None or index is None: raise KPError("group and index must be set") elif type(group) is not v1Group or type(index) is not int: raise KPError("group must be a v1Group-instance and index " "must be an integer.") elif group not in self.groups: raise KPError("Given group doesn't exist") elif index < 0 or index >= len(group.parent.children): raise KPError("index must be a valid index if group.parent.groups") else: group_at_index = group.parent.children[index] pos_in_parent = group.parent.children.index(group) pos_in_groups = self.groups.index(group) pos_in_groups2 = self.groups.index(group_at_index) group.parent.children[index] = group group.parent.children[pos_in_parent] = group_at_index self.groups[pos_in_groups2] = group self.groups[pos_in_groups] = group_at_index if group.children: self._move_group_helper(group) if group_at_index.children: self._move_group_helper(group_at_index) group.last_mod = datetime.now().replace(microsecond=0) return True
def function[move_group_in_parent, parameter[self, group, index]]: constant[Move group to another position in group's parent. index must be a valid index of group.parent.groups ] if <ast.BoolOp object at 0x7da1b2519570> begin[:] <ast.Raise object at 0x7da1b25196c0>
keyword[def] identifier[move_group_in_parent] ( identifier[self] , identifier[group] = keyword[None] , identifier[index] = keyword[None] ): literal[string] keyword[if] identifier[group] keyword[is] keyword[None] keyword[or] identifier[index] keyword[is] keyword[None] : keyword[raise] identifier[KPError] ( literal[string] ) keyword[elif] identifier[type] ( identifier[group] ) keyword[is] keyword[not] identifier[v1Group] keyword[or] identifier[type] ( identifier[index] ) keyword[is] keyword[not] identifier[int] : keyword[raise] identifier[KPError] ( literal[string] literal[string] ) keyword[elif] identifier[group] keyword[not] keyword[in] identifier[self] . identifier[groups] : keyword[raise] identifier[KPError] ( literal[string] ) keyword[elif] identifier[index] < literal[int] keyword[or] identifier[index] >= identifier[len] ( identifier[group] . identifier[parent] . identifier[children] ): keyword[raise] identifier[KPError] ( literal[string] ) keyword[else] : identifier[group_at_index] = identifier[group] . identifier[parent] . identifier[children] [ identifier[index] ] identifier[pos_in_parent] = identifier[group] . identifier[parent] . identifier[children] . identifier[index] ( identifier[group] ) identifier[pos_in_groups] = identifier[self] . identifier[groups] . identifier[index] ( identifier[group] ) identifier[pos_in_groups2] = identifier[self] . identifier[groups] . identifier[index] ( identifier[group_at_index] ) identifier[group] . identifier[parent] . identifier[children] [ identifier[index] ]= identifier[group] identifier[group] . identifier[parent] . identifier[children] [ identifier[pos_in_parent] ]= identifier[group_at_index] identifier[self] . identifier[groups] [ identifier[pos_in_groups2] ]= identifier[group] identifier[self] . identifier[groups] [ identifier[pos_in_groups] ]= identifier[group_at_index] keyword[if] identifier[group] . identifier[children] : identifier[self] . identifier[_move_group_helper] ( identifier[group] ) keyword[if] identifier[group_at_index] . identifier[children] : identifier[self] . identifier[_move_group_helper] ( identifier[group_at_index] ) identifier[group] . identifier[last_mod] = identifier[datetime] . identifier[now] (). identifier[replace] ( identifier[microsecond] = literal[int] ) keyword[return] keyword[True]
def move_group_in_parent(self, group=None, index=None): """Move group to another position in group's parent. index must be a valid index of group.parent.groups """ if group is None or index is None: raise KPError('group and index must be set') # depends on [control=['if'], data=[]] elif type(group) is not v1Group or type(index) is not int: raise KPError('group must be a v1Group-instance and index must be an integer.') # depends on [control=['if'], data=[]] elif group not in self.groups: raise KPError("Given group doesn't exist") # depends on [control=['if'], data=[]] elif index < 0 or index >= len(group.parent.children): raise KPError('index must be a valid index if group.parent.groups') # depends on [control=['if'], data=[]] else: group_at_index = group.parent.children[index] pos_in_parent = group.parent.children.index(group) pos_in_groups = self.groups.index(group) pos_in_groups2 = self.groups.index(group_at_index) group.parent.children[index] = group group.parent.children[pos_in_parent] = group_at_index self.groups[pos_in_groups2] = group self.groups[pos_in_groups] = group_at_index if group.children: self._move_group_helper(group) # depends on [control=['if'], data=[]] if group_at_index.children: self._move_group_helper(group_at_index) # depends on [control=['if'], data=[]] group.last_mod = datetime.now().replace(microsecond=0) return True
def _init_application(self, application=None): """Initialize application object for torext app, if a existed application is passed, then just use this one without make a new one""" if application: self.application = application else: self.application = self.make_application()
def function[_init_application, parameter[self, application]]: constant[Initialize application object for torext app, if a existed application is passed, then just use this one without make a new one] if name[application] begin[:] name[self].application assign[=] name[application]
keyword[def] identifier[_init_application] ( identifier[self] , identifier[application] = keyword[None] ): literal[string] keyword[if] identifier[application] : identifier[self] . identifier[application] = identifier[application] keyword[else] : identifier[self] . identifier[application] = identifier[self] . identifier[make_application] ()
def _init_application(self, application=None): """Initialize application object for torext app, if a existed application is passed, then just use this one without make a new one""" if application: self.application = application # depends on [control=['if'], data=[]] else: self.application = self.make_application()
def _write_content_types_stream(phys_writer, parts): """ Write ``[Content_Types].xml`` part to the physical package with an appropriate content type lookup target for each part in *parts*. """ content_types_blob = serialize_part_xml( _ContentTypesItem.xml_for(parts) ) phys_writer.write(CONTENT_TYPES_URI, content_types_blob)
def function[_write_content_types_stream, parameter[phys_writer, parts]]: constant[ Write ``[Content_Types].xml`` part to the physical package with an appropriate content type lookup target for each part in *parts*. ] variable[content_types_blob] assign[=] call[name[serialize_part_xml], parameter[call[name[_ContentTypesItem].xml_for, parameter[name[parts]]]]] call[name[phys_writer].write, parameter[name[CONTENT_TYPES_URI], name[content_types_blob]]]
keyword[def] identifier[_write_content_types_stream] ( identifier[phys_writer] , identifier[parts] ): literal[string] identifier[content_types_blob] = identifier[serialize_part_xml] ( identifier[_ContentTypesItem] . identifier[xml_for] ( identifier[parts] ) ) identifier[phys_writer] . identifier[write] ( identifier[CONTENT_TYPES_URI] , identifier[content_types_blob] )
def _write_content_types_stream(phys_writer, parts): """ Write ``[Content_Types].xml`` part to the physical package with an appropriate content type lookup target for each part in *parts*. """ content_types_blob = serialize_part_xml(_ContentTypesItem.xml_for(parts)) phys_writer.write(CONTENT_TYPES_URI, content_types_blob)
def stmt_lambdef_handle(self, original, loc, tokens): """Process multi-line lambdef statements.""" if len(tokens) == 2: params, stmts = tokens elif len(tokens) == 3: params, stmts, last = tokens if "tests" in tokens: stmts = stmts.asList() + ["return " + last] else: stmts = stmts.asList() + [last] else: raise CoconutInternalException("invalid statement lambda tokens", tokens) name = self.stmt_lambda_name() body = openindent + self.stmt_lambda_proc("\n".join(stmts)) + closeindent if isinstance(params, str): self.stmt_lambdas.append( "def " + name + params + ":\n" + body, ) else: params.insert(0, name) # construct match tokens self.stmt_lambdas.append( "".join(self.name_match_funcdef_handle(original, loc, params)) + body, ) return name
def function[stmt_lambdef_handle, parameter[self, original, loc, tokens]]: constant[Process multi-line lambdef statements.] if compare[call[name[len], parameter[name[tokens]]] equal[==] constant[2]] begin[:] <ast.Tuple object at 0x7da20c6c7ac0> assign[=] name[tokens] variable[name] assign[=] call[name[self].stmt_lambda_name, parameter[]] variable[body] assign[=] binary_operation[binary_operation[name[openindent] + call[name[self].stmt_lambda_proc, parameter[call[constant[ ].join, parameter[name[stmts]]]]]] + name[closeindent]] if call[name[isinstance], parameter[name[params], name[str]]] begin[:] call[name[self].stmt_lambdas.append, parameter[binary_operation[binary_operation[binary_operation[binary_operation[constant[def ] + name[name]] + name[params]] + constant[: ]] + name[body]]]] return[name[name]]
keyword[def] identifier[stmt_lambdef_handle] ( identifier[self] , identifier[original] , identifier[loc] , identifier[tokens] ): literal[string] keyword[if] identifier[len] ( identifier[tokens] )== literal[int] : identifier[params] , identifier[stmts] = identifier[tokens] keyword[elif] identifier[len] ( identifier[tokens] )== literal[int] : identifier[params] , identifier[stmts] , identifier[last] = identifier[tokens] keyword[if] literal[string] keyword[in] identifier[tokens] : identifier[stmts] = identifier[stmts] . identifier[asList] ()+[ literal[string] + identifier[last] ] keyword[else] : identifier[stmts] = identifier[stmts] . identifier[asList] ()+[ identifier[last] ] keyword[else] : keyword[raise] identifier[CoconutInternalException] ( literal[string] , identifier[tokens] ) identifier[name] = identifier[self] . identifier[stmt_lambda_name] () identifier[body] = identifier[openindent] + identifier[self] . identifier[stmt_lambda_proc] ( literal[string] . identifier[join] ( identifier[stmts] ))+ identifier[closeindent] keyword[if] identifier[isinstance] ( identifier[params] , identifier[str] ): identifier[self] . identifier[stmt_lambdas] . identifier[append] ( literal[string] + identifier[name] + identifier[params] + literal[string] + identifier[body] , ) keyword[else] : identifier[params] . identifier[insert] ( literal[int] , identifier[name] ) identifier[self] . identifier[stmt_lambdas] . identifier[append] ( literal[string] . identifier[join] ( identifier[self] . identifier[name_match_funcdef_handle] ( identifier[original] , identifier[loc] , identifier[params] )) + identifier[body] , ) keyword[return] identifier[name]
def stmt_lambdef_handle(self, original, loc, tokens): """Process multi-line lambdef statements.""" if len(tokens) == 2: (params, stmts) = tokens # depends on [control=['if'], data=[]] elif len(tokens) == 3: (params, stmts, last) = tokens if 'tests' in tokens: stmts = stmts.asList() + ['return ' + last] # depends on [control=['if'], data=[]] else: stmts = stmts.asList() + [last] # depends on [control=['if'], data=[]] else: raise CoconutInternalException('invalid statement lambda tokens', tokens) name = self.stmt_lambda_name() body = openindent + self.stmt_lambda_proc('\n'.join(stmts)) + closeindent if isinstance(params, str): self.stmt_lambdas.append('def ' + name + params + ':\n' + body) # depends on [control=['if'], data=[]] else: params.insert(0, name) # construct match tokens self.stmt_lambdas.append(''.join(self.name_match_funcdef_handle(original, loc, params)) + body) return name
def add_filter_rule( self, name, condition, filters, actions, active=1, way='in'): """ :param: name filter name :param: condition allof or anyof :param: filters dict of filters :param: actions dict of actions :param: way string discribing if filter is for 'in' or 'out' messages :returns: list of user's zobjects.FilterRule """ filters['condition'] = condition new_rule = { 'name': name, 'active': active, 'filterTests': filters, 'filterActions': actions } new_rules = [zobjects.FilterRule.from_dict(new_rule)] prev_rules = self.get_filter_rules(way=way) # if there is already some rules if prev_rules: for rule in prev_rules: # don't add rule if it already exist if rule.name == new_rules[0].name: raise ZimSOAPException( 'filter %s already exists' % rule.name) new_rules = new_rules + prev_rules content = { 'filterRules': { 'filterRule': [r._full_data for r in new_rules] } } if way == 'in': self.request('ModifyFilterRules', content) elif way == 'out': self.request('ModifyOutgoingFilterRules', content) return new_rules
def function[add_filter_rule, parameter[self, name, condition, filters, actions, active, way]]: constant[ :param: name filter name :param: condition allof or anyof :param: filters dict of filters :param: actions dict of actions :param: way string discribing if filter is for 'in' or 'out' messages :returns: list of user's zobjects.FilterRule ] call[name[filters]][constant[condition]] assign[=] name[condition] variable[new_rule] assign[=] dictionary[[<ast.Constant object at 0x7da18dc06e30>, <ast.Constant object at 0x7da18dc07df0>, <ast.Constant object at 0x7da18dc07e80>, <ast.Constant object at 0x7da18dc045e0>], [<ast.Name object at 0x7da18dc06fb0>, <ast.Name object at 0x7da18dc06d10>, <ast.Name object at 0x7da18dc041f0>, <ast.Name object at 0x7da18dc050f0>]] variable[new_rules] assign[=] list[[<ast.Call object at 0x7da18dc06530>]] variable[prev_rules] assign[=] call[name[self].get_filter_rules, parameter[]] if name[prev_rules] begin[:] for taget[name[rule]] in starred[name[prev_rules]] begin[:] if compare[name[rule].name equal[==] call[name[new_rules]][constant[0]].name] begin[:] <ast.Raise object at 0x7da18dc050c0> variable[new_rules] assign[=] binary_operation[name[new_rules] + name[prev_rules]] variable[content] assign[=] dictionary[[<ast.Constant object at 0x7da18dc078b0>], [<ast.Dict object at 0x7da18dc05240>]] if compare[name[way] equal[==] constant[in]] begin[:] call[name[self].request, parameter[constant[ModifyFilterRules], name[content]]] return[name[new_rules]]
keyword[def] identifier[add_filter_rule] ( identifier[self] , identifier[name] , identifier[condition] , identifier[filters] , identifier[actions] , identifier[active] = literal[int] , identifier[way] = literal[string] ): literal[string] identifier[filters] [ literal[string] ]= identifier[condition] identifier[new_rule] ={ literal[string] : identifier[name] , literal[string] : identifier[active] , literal[string] : identifier[filters] , literal[string] : identifier[actions] } identifier[new_rules] =[ identifier[zobjects] . identifier[FilterRule] . identifier[from_dict] ( identifier[new_rule] )] identifier[prev_rules] = identifier[self] . identifier[get_filter_rules] ( identifier[way] = identifier[way] ) keyword[if] identifier[prev_rules] : keyword[for] identifier[rule] keyword[in] identifier[prev_rules] : keyword[if] identifier[rule] . identifier[name] == identifier[new_rules] [ literal[int] ]. identifier[name] : keyword[raise] identifier[ZimSOAPException] ( literal[string] % identifier[rule] . identifier[name] ) identifier[new_rules] = identifier[new_rules] + identifier[prev_rules] identifier[content] ={ literal[string] :{ literal[string] :[ identifier[r] . identifier[_full_data] keyword[for] identifier[r] keyword[in] identifier[new_rules] ] } } keyword[if] identifier[way] == literal[string] : identifier[self] . identifier[request] ( literal[string] , identifier[content] ) keyword[elif] identifier[way] == literal[string] : identifier[self] . identifier[request] ( literal[string] , identifier[content] ) keyword[return] identifier[new_rules]
def add_filter_rule(self, name, condition, filters, actions, active=1, way='in'): """ :param: name filter name :param: condition allof or anyof :param: filters dict of filters :param: actions dict of actions :param: way string discribing if filter is for 'in' or 'out' messages :returns: list of user's zobjects.FilterRule """ filters['condition'] = condition new_rule = {'name': name, 'active': active, 'filterTests': filters, 'filterActions': actions} new_rules = [zobjects.FilterRule.from_dict(new_rule)] prev_rules = self.get_filter_rules(way=way) # if there is already some rules if prev_rules: for rule in prev_rules: # don't add rule if it already exist if rule.name == new_rules[0].name: raise ZimSOAPException('filter %s already exists' % rule.name) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['rule']] new_rules = new_rules + prev_rules # depends on [control=['if'], data=[]] content = {'filterRules': {'filterRule': [r._full_data for r in new_rules]}} if way == 'in': self.request('ModifyFilterRules', content) # depends on [control=['if'], data=[]] elif way == 'out': self.request('ModifyOutgoingFilterRules', content) # depends on [control=['if'], data=[]] return new_rules
def replace(self, **kw): """Return datetime with new specified fields given as arguments. For example, dt.replace(days=4) would return a new datetime_tz object with exactly the same as dt but with the days attribute equal to 4. Any attribute can be replaced, but tzinfo can not be set to None. Args: Any datetime_tz attribute. Returns: A datetime_tz object with the attributes replaced. Raises: TypeError: If the given replacement is invalid. """ if "tzinfo" in kw: if kw["tzinfo"] is None: raise TypeError("Can not remove the timezone use asdatetime()") else: tzinfo = kw["tzinfo"] del kw["tzinfo"] else: tzinfo = None is_dst = None if "is_dst" in kw: is_dst = kw["is_dst"] del kw["is_dst"] else: # Use our own DST setting.. is_dst = self.is_dst replaced = self.asdatetime().replace(**kw) return type(self)( replaced, tzinfo=tzinfo or self.tzinfo.zone, is_dst=is_dst)
def function[replace, parameter[self]]: constant[Return datetime with new specified fields given as arguments. For example, dt.replace(days=4) would return a new datetime_tz object with exactly the same as dt but with the days attribute equal to 4. Any attribute can be replaced, but tzinfo can not be set to None. Args: Any datetime_tz attribute. Returns: A datetime_tz object with the attributes replaced. Raises: TypeError: If the given replacement is invalid. ] if compare[constant[tzinfo] in name[kw]] begin[:] if compare[call[name[kw]][constant[tzinfo]] is constant[None]] begin[:] <ast.Raise object at 0x7da1b0ebc1c0> variable[is_dst] assign[=] constant[None] if compare[constant[is_dst] in name[kw]] begin[:] variable[is_dst] assign[=] call[name[kw]][constant[is_dst]] <ast.Delete object at 0x7da1b0ebe830> variable[replaced] assign[=] call[call[name[self].asdatetime, parameter[]].replace, parameter[]] return[call[call[name[type], parameter[name[self]]], parameter[name[replaced]]]]
keyword[def] identifier[replace] ( identifier[self] ,** identifier[kw] ): literal[string] keyword[if] literal[string] keyword[in] identifier[kw] : keyword[if] identifier[kw] [ literal[string] ] keyword[is] keyword[None] : keyword[raise] identifier[TypeError] ( literal[string] ) keyword[else] : identifier[tzinfo] = identifier[kw] [ literal[string] ] keyword[del] identifier[kw] [ literal[string] ] keyword[else] : identifier[tzinfo] = keyword[None] identifier[is_dst] = keyword[None] keyword[if] literal[string] keyword[in] identifier[kw] : identifier[is_dst] = identifier[kw] [ literal[string] ] keyword[del] identifier[kw] [ literal[string] ] keyword[else] : identifier[is_dst] = identifier[self] . identifier[is_dst] identifier[replaced] = identifier[self] . identifier[asdatetime] (). identifier[replace] (** identifier[kw] ) keyword[return] identifier[type] ( identifier[self] )( identifier[replaced] , identifier[tzinfo] = identifier[tzinfo] keyword[or] identifier[self] . identifier[tzinfo] . identifier[zone] , identifier[is_dst] = identifier[is_dst] )
def replace(self, **kw): """Return datetime with new specified fields given as arguments. For example, dt.replace(days=4) would return a new datetime_tz object with exactly the same as dt but with the days attribute equal to 4. Any attribute can be replaced, but tzinfo can not be set to None. Args: Any datetime_tz attribute. Returns: A datetime_tz object with the attributes replaced. Raises: TypeError: If the given replacement is invalid. """ if 'tzinfo' in kw: if kw['tzinfo'] is None: raise TypeError('Can not remove the timezone use asdatetime()') # depends on [control=['if'], data=[]] else: tzinfo = kw['tzinfo'] del kw['tzinfo'] # depends on [control=['if'], data=['kw']] else: tzinfo = None is_dst = None if 'is_dst' in kw: is_dst = kw['is_dst'] del kw['is_dst'] # depends on [control=['if'], data=['kw']] else: # Use our own DST setting.. is_dst = self.is_dst replaced = self.asdatetime().replace(**kw) return type(self)(replaced, tzinfo=tzinfo or self.tzinfo.zone, is_dst=is_dst)
def wrap(self, wrapper): """ Returns the first function passed as an argument to the second, allowing you to adjust arguments, run code before and after, and conditionally execute the original function. """ def wrapped(*args, **kwargs): if kwargs: kwargs["object"] = self.obj else: args = list(args) args.insert(0, self.obj) return wrapper(*args, **kwargs) return self._wrap(wrapped)
def function[wrap, parameter[self, wrapper]]: constant[ Returns the first function passed as an argument to the second, allowing you to adjust arguments, run code before and after, and conditionally execute the original function. ] def function[wrapped, parameter[]]: if name[kwargs] begin[:] call[name[kwargs]][constant[object]] assign[=] name[self].obj return[call[name[wrapper], parameter[<ast.Starred object at 0x7da2041d9f60>]]] return[call[name[self]._wrap, parameter[name[wrapped]]]]
keyword[def] identifier[wrap] ( identifier[self] , identifier[wrapper] ): literal[string] keyword[def] identifier[wrapped] (* identifier[args] ,** identifier[kwargs] ): keyword[if] identifier[kwargs] : identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[obj] keyword[else] : identifier[args] = identifier[list] ( identifier[args] ) identifier[args] . identifier[insert] ( literal[int] , identifier[self] . identifier[obj] ) keyword[return] identifier[wrapper] (* identifier[args] ,** identifier[kwargs] ) keyword[return] identifier[self] . identifier[_wrap] ( identifier[wrapped] )
def wrap(self, wrapper): """ Returns the first function passed as an argument to the second, allowing you to adjust arguments, run code before and after, and conditionally execute the original function. """ def wrapped(*args, **kwargs): if kwargs: kwargs['object'] = self.obj # depends on [control=['if'], data=[]] else: args = list(args) args.insert(0, self.obj) return wrapper(*args, **kwargs) return self._wrap(wrapped)
def _load_defaults(self, default='settings.py'): ''' Load the default settings ''' if default[-3:] == '.py': default = default[:-3] self.my_settings = {} try: settings = importlib.import_module(default) self.my_settings = self._convert_to_dict(settings) except ImportError: log.warning("No default settings found")
def function[_load_defaults, parameter[self, default]]: constant[ Load the default settings ] if compare[call[name[default]][<ast.Slice object at 0x7da1b19ee890>] equal[==] constant[.py]] begin[:] variable[default] assign[=] call[name[default]][<ast.Slice object at 0x7da1b19eded0>] name[self].my_settings assign[=] dictionary[[], []] <ast.Try object at 0x7da1b19eeb60>
keyword[def] identifier[_load_defaults] ( identifier[self] , identifier[default] = literal[string] ): literal[string] keyword[if] identifier[default] [- literal[int] :]== literal[string] : identifier[default] = identifier[default] [:- literal[int] ] identifier[self] . identifier[my_settings] ={} keyword[try] : identifier[settings] = identifier[importlib] . identifier[import_module] ( identifier[default] ) identifier[self] . identifier[my_settings] = identifier[self] . identifier[_convert_to_dict] ( identifier[settings] ) keyword[except] identifier[ImportError] : identifier[log] . identifier[warning] ( literal[string] )
def _load_defaults(self, default='settings.py'): """ Load the default settings """ if default[-3:] == '.py': default = default[:-3] # depends on [control=['if'], data=[]] self.my_settings = {} try: settings = importlib.import_module(default) self.my_settings = self._convert_to_dict(settings) # depends on [control=['try'], data=[]] except ImportError: log.warning('No default settings found') # depends on [control=['except'], data=[]]
def _build_predict(self, Xnew, full_cov=False): """ Compute the mean and variance of the latent function at some new points. Note that this is very similar to the SGPR prediction, for which there are notes in the SGPR notebook. :param Xnew: Point to predict at. """ pX = DiagonalGaussian(self.X_mean, self.X_var) num_inducing = len(self.feature) psi1 = expectation(pX, (self.kern, self.feature)) psi2 = tf.reduce_sum(expectation(pX, (self.kern, self.feature), (self.kern, self.feature)), axis=0) Kuu = features.Kuu(self.feature, self.kern, jitter=settings.numerics.jitter_level) Kus = features.Kuf(self.feature, self.kern, Xnew) sigma2 = self.likelihood.variance sigma = tf.sqrt(sigma2) L = tf.cholesky(Kuu) A = tf.matrix_triangular_solve(L, tf.transpose(psi1), lower=True) / sigma tmp = tf.matrix_triangular_solve(L, psi2, lower=True) AAT = tf.matrix_triangular_solve(L, tf.transpose(tmp), lower=True) / sigma2 B = AAT + tf.eye(num_inducing, dtype=settings.float_type) LB = tf.cholesky(B) c = tf.matrix_triangular_solve(LB, tf.matmul(A, self.Y), lower=True) / sigma tmp1 = tf.matrix_triangular_solve(L, Kus, lower=True) tmp2 = tf.matrix_triangular_solve(LB, tmp1, lower=True) mean = tf.matmul(tmp2, c, transpose_a=True) if full_cov: var = self.kern.K(Xnew) + tf.matmul(tmp2, tmp2, transpose_a=True) \ - tf.matmul(tmp1, tmp1, transpose_a=True) shape = tf.stack([1, 1, tf.shape(self.Y)[1]]) var = tf.tile(tf.expand_dims(var, 2), shape) else: var = self.kern.Kdiag(Xnew) + tf.reduce_sum(tf.square(tmp2), 0) \ - tf.reduce_sum(tf.square(tmp1), 0) shape = tf.stack([1, tf.shape(self.Y)[1]]) var = tf.tile(tf.expand_dims(var, 1), shape) return mean + self.mean_function(Xnew), var
def function[_build_predict, parameter[self, Xnew, full_cov]]: constant[ Compute the mean and variance of the latent function at some new points. Note that this is very similar to the SGPR prediction, for which there are notes in the SGPR notebook. :param Xnew: Point to predict at. ] variable[pX] assign[=] call[name[DiagonalGaussian], parameter[name[self].X_mean, name[self].X_var]] variable[num_inducing] assign[=] call[name[len], parameter[name[self].feature]] variable[psi1] assign[=] call[name[expectation], parameter[name[pX], tuple[[<ast.Attribute object at 0x7da1b1ced120>, <ast.Attribute object at 0x7da1b1cecf40>]]]] variable[psi2] assign[=] call[name[tf].reduce_sum, parameter[call[name[expectation], parameter[name[pX], tuple[[<ast.Attribute object at 0x7da1b1cefe50>, <ast.Attribute object at 0x7da1b1cef0a0>]], tuple[[<ast.Attribute object at 0x7da1b1cee410>, <ast.Attribute object at 0x7da1b1ceef80>]]]]]] variable[Kuu] assign[=] call[name[features].Kuu, parameter[name[self].feature, name[self].kern]] variable[Kus] assign[=] call[name[features].Kuf, parameter[name[self].feature, name[self].kern, name[Xnew]]] variable[sigma2] assign[=] name[self].likelihood.variance variable[sigma] assign[=] call[name[tf].sqrt, parameter[name[sigma2]]] variable[L] assign[=] call[name[tf].cholesky, parameter[name[Kuu]]] variable[A] assign[=] binary_operation[call[name[tf].matrix_triangular_solve, parameter[name[L], call[name[tf].transpose, parameter[name[psi1]]]]] / name[sigma]] variable[tmp] assign[=] call[name[tf].matrix_triangular_solve, parameter[name[L], name[psi2]]] variable[AAT] assign[=] binary_operation[call[name[tf].matrix_triangular_solve, parameter[name[L], call[name[tf].transpose, parameter[name[tmp]]]]] / name[sigma2]] variable[B] assign[=] binary_operation[name[AAT] + call[name[tf].eye, parameter[name[num_inducing]]]] variable[LB] assign[=] call[name[tf].cholesky, parameter[name[B]]] variable[c] assign[=] binary_operation[call[name[tf].matrix_triangular_solve, parameter[name[LB], call[name[tf].matmul, parameter[name[A], name[self].Y]]]] / name[sigma]] variable[tmp1] assign[=] call[name[tf].matrix_triangular_solve, parameter[name[L], name[Kus]]] variable[tmp2] assign[=] call[name[tf].matrix_triangular_solve, parameter[name[LB], name[tmp1]]] variable[mean] assign[=] call[name[tf].matmul, parameter[name[tmp2], name[c]]] if name[full_cov] begin[:] variable[var] assign[=] binary_operation[binary_operation[call[name[self].kern.K, parameter[name[Xnew]]] + call[name[tf].matmul, parameter[name[tmp2], name[tmp2]]]] - call[name[tf].matmul, parameter[name[tmp1], name[tmp1]]]] variable[shape] assign[=] call[name[tf].stack, parameter[list[[<ast.Constant object at 0x7da1b1b027a0>, <ast.Constant object at 0x7da1b1b006d0>, <ast.Subscript object at 0x7da1b1b01f00>]]]] variable[var] assign[=] call[name[tf].tile, parameter[call[name[tf].expand_dims, parameter[name[var], constant[2]]], name[shape]]] return[tuple[[<ast.BinOp object at 0x7da20cabf2b0>, <ast.Name object at 0x7da20cabe560>]]]
keyword[def] identifier[_build_predict] ( identifier[self] , identifier[Xnew] , identifier[full_cov] = keyword[False] ): literal[string] identifier[pX] = identifier[DiagonalGaussian] ( identifier[self] . identifier[X_mean] , identifier[self] . identifier[X_var] ) identifier[num_inducing] = identifier[len] ( identifier[self] . identifier[feature] ) identifier[psi1] = identifier[expectation] ( identifier[pX] ,( identifier[self] . identifier[kern] , identifier[self] . identifier[feature] )) identifier[psi2] = identifier[tf] . identifier[reduce_sum] ( identifier[expectation] ( identifier[pX] ,( identifier[self] . identifier[kern] , identifier[self] . identifier[feature] ),( identifier[self] . identifier[kern] , identifier[self] . identifier[feature] )), identifier[axis] = literal[int] ) identifier[Kuu] = identifier[features] . identifier[Kuu] ( identifier[self] . identifier[feature] , identifier[self] . identifier[kern] , identifier[jitter] = identifier[settings] . identifier[numerics] . identifier[jitter_level] ) identifier[Kus] = identifier[features] . identifier[Kuf] ( identifier[self] . identifier[feature] , identifier[self] . identifier[kern] , identifier[Xnew] ) identifier[sigma2] = identifier[self] . identifier[likelihood] . identifier[variance] identifier[sigma] = identifier[tf] . identifier[sqrt] ( identifier[sigma2] ) identifier[L] = identifier[tf] . identifier[cholesky] ( identifier[Kuu] ) identifier[A] = identifier[tf] . identifier[matrix_triangular_solve] ( identifier[L] , identifier[tf] . identifier[transpose] ( identifier[psi1] ), identifier[lower] = keyword[True] )/ identifier[sigma] identifier[tmp] = identifier[tf] . identifier[matrix_triangular_solve] ( identifier[L] , identifier[psi2] , identifier[lower] = keyword[True] ) identifier[AAT] = identifier[tf] . identifier[matrix_triangular_solve] ( identifier[L] , identifier[tf] . identifier[transpose] ( identifier[tmp] ), identifier[lower] = keyword[True] )/ identifier[sigma2] identifier[B] = identifier[AAT] + identifier[tf] . identifier[eye] ( identifier[num_inducing] , identifier[dtype] = identifier[settings] . identifier[float_type] ) identifier[LB] = identifier[tf] . identifier[cholesky] ( identifier[B] ) identifier[c] = identifier[tf] . identifier[matrix_triangular_solve] ( identifier[LB] , identifier[tf] . identifier[matmul] ( identifier[A] , identifier[self] . identifier[Y] ), identifier[lower] = keyword[True] )/ identifier[sigma] identifier[tmp1] = identifier[tf] . identifier[matrix_triangular_solve] ( identifier[L] , identifier[Kus] , identifier[lower] = keyword[True] ) identifier[tmp2] = identifier[tf] . identifier[matrix_triangular_solve] ( identifier[LB] , identifier[tmp1] , identifier[lower] = keyword[True] ) identifier[mean] = identifier[tf] . identifier[matmul] ( identifier[tmp2] , identifier[c] , identifier[transpose_a] = keyword[True] ) keyword[if] identifier[full_cov] : identifier[var] = identifier[self] . identifier[kern] . identifier[K] ( identifier[Xnew] )+ identifier[tf] . identifier[matmul] ( identifier[tmp2] , identifier[tmp2] , identifier[transpose_a] = keyword[True] )- identifier[tf] . identifier[matmul] ( identifier[tmp1] , identifier[tmp1] , identifier[transpose_a] = keyword[True] ) identifier[shape] = identifier[tf] . identifier[stack] ([ literal[int] , literal[int] , identifier[tf] . identifier[shape] ( identifier[self] . identifier[Y] )[ literal[int] ]]) identifier[var] = identifier[tf] . identifier[tile] ( identifier[tf] . identifier[expand_dims] ( identifier[var] , literal[int] ), identifier[shape] ) keyword[else] : identifier[var] = identifier[self] . identifier[kern] . identifier[Kdiag] ( identifier[Xnew] )+ identifier[tf] . identifier[reduce_sum] ( identifier[tf] . identifier[square] ( identifier[tmp2] ), literal[int] )- identifier[tf] . identifier[reduce_sum] ( identifier[tf] . identifier[square] ( identifier[tmp1] ), literal[int] ) identifier[shape] = identifier[tf] . identifier[stack] ([ literal[int] , identifier[tf] . identifier[shape] ( identifier[self] . identifier[Y] )[ literal[int] ]]) identifier[var] = identifier[tf] . identifier[tile] ( identifier[tf] . identifier[expand_dims] ( identifier[var] , literal[int] ), identifier[shape] ) keyword[return] identifier[mean] + identifier[self] . identifier[mean_function] ( identifier[Xnew] ), identifier[var]
def _build_predict(self, Xnew, full_cov=False): """ Compute the mean and variance of the latent function at some new points. Note that this is very similar to the SGPR prediction, for which there are notes in the SGPR notebook. :param Xnew: Point to predict at. """ pX = DiagonalGaussian(self.X_mean, self.X_var) num_inducing = len(self.feature) psi1 = expectation(pX, (self.kern, self.feature)) psi2 = tf.reduce_sum(expectation(pX, (self.kern, self.feature), (self.kern, self.feature)), axis=0) Kuu = features.Kuu(self.feature, self.kern, jitter=settings.numerics.jitter_level) Kus = features.Kuf(self.feature, self.kern, Xnew) sigma2 = self.likelihood.variance sigma = tf.sqrt(sigma2) L = tf.cholesky(Kuu) A = tf.matrix_triangular_solve(L, tf.transpose(psi1), lower=True) / sigma tmp = tf.matrix_triangular_solve(L, psi2, lower=True) AAT = tf.matrix_triangular_solve(L, tf.transpose(tmp), lower=True) / sigma2 B = AAT + tf.eye(num_inducing, dtype=settings.float_type) LB = tf.cholesky(B) c = tf.matrix_triangular_solve(LB, tf.matmul(A, self.Y), lower=True) / sigma tmp1 = tf.matrix_triangular_solve(L, Kus, lower=True) tmp2 = tf.matrix_triangular_solve(LB, tmp1, lower=True) mean = tf.matmul(tmp2, c, transpose_a=True) if full_cov: var = self.kern.K(Xnew) + tf.matmul(tmp2, tmp2, transpose_a=True) - tf.matmul(tmp1, tmp1, transpose_a=True) shape = tf.stack([1, 1, tf.shape(self.Y)[1]]) var = tf.tile(tf.expand_dims(var, 2), shape) # depends on [control=['if'], data=[]] else: var = self.kern.Kdiag(Xnew) + tf.reduce_sum(tf.square(tmp2), 0) - tf.reduce_sum(tf.square(tmp1), 0) shape = tf.stack([1, tf.shape(self.Y)[1]]) var = tf.tile(tf.expand_dims(var, 1), shape) return (mean + self.mean_function(Xnew), var)
def plot_sfs(s, yscale='log', bins=None, n=None, clip_endpoints=True, label=None, plot_kwargs=None, ax=None): """Plot a site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes,) Site frequency spectrum. yscale : string, optional Y axis scale. bins : int or array_like, int, optional Allele count bins. n : int, optional Number of chromosomes sampled. If provided, X axis will be plotted as allele frequency, otherwise as allele count. clip_endpoints : bool, optional If True, do not plot first and last values from frequency spectrum. label : string, optional Label for data series in plot. plot_kwargs : dict-like Additional keyword arguments, passed through to ax.plot(). ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes The axes on which the plot was drawn. """ import matplotlib.pyplot as plt import scipy # check inputs s = asarray_ndim(s, 1) # setup axes if ax is None: fig, ax = plt.subplots() # setup data if bins is None: if clip_endpoints: x = np.arange(1, s.shape[0]-1) y = s[1:-1] else: x = np.arange(s.shape[0]) y = s else: if clip_endpoints: y, b, _ = scipy.stats.binned_statistic( np.arange(1, s.shape[0]-1), values=s[1:-1], bins=bins, statistic='sum') else: y, b, _ = scipy.stats.binned_statistic( np.arange(s.shape[0]), values=s, bins=bins, statistic='sum') # use bin midpoints for plotting x = (b[:-1] + b[1:]) / 2 if n: # convert allele counts to allele frequencies x = x / n ax.set_xlabel('derived allele frequency') else: ax.set_xlabel('derived allele count') # do plotting if plot_kwargs is None: plot_kwargs = dict() ax.plot(x, y, label=label, **plot_kwargs) # tidy ax.set_yscale(yscale) ax.set_ylabel('site frequency') ax.autoscale(axis='x', tight=True) return ax
def function[plot_sfs, parameter[s, yscale, bins, n, clip_endpoints, label, plot_kwargs, ax]]: constant[Plot a site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes,) Site frequency spectrum. yscale : string, optional Y axis scale. bins : int or array_like, int, optional Allele count bins. n : int, optional Number of chromosomes sampled. If provided, X axis will be plotted as allele frequency, otherwise as allele count. clip_endpoints : bool, optional If True, do not plot first and last values from frequency spectrum. label : string, optional Label for data series in plot. plot_kwargs : dict-like Additional keyword arguments, passed through to ax.plot(). ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes The axes on which the plot was drawn. ] import module[matplotlib.pyplot] as alias[plt] import module[scipy] variable[s] assign[=] call[name[asarray_ndim], parameter[name[s], constant[1]]] if compare[name[ax] is constant[None]] begin[:] <ast.Tuple object at 0x7da18f58fa90> assign[=] call[name[plt].subplots, parameter[]] if compare[name[bins] is constant[None]] begin[:] if name[clip_endpoints] begin[:] variable[x] assign[=] call[name[np].arange, parameter[constant[1], binary_operation[call[name[s].shape][constant[0]] - constant[1]]]] variable[y] assign[=] call[name[s]][<ast.Slice object at 0x7da18f58e980>] if name[n] begin[:] variable[x] assign[=] binary_operation[name[x] / name[n]] call[name[ax].set_xlabel, parameter[constant[derived allele frequency]]] if compare[name[plot_kwargs] is constant[None]] begin[:] variable[plot_kwargs] assign[=] call[name[dict], parameter[]] call[name[ax].plot, parameter[name[x], name[y]]] call[name[ax].set_yscale, parameter[name[yscale]]] call[name[ax].set_ylabel, parameter[constant[site frequency]]] call[name[ax].autoscale, parameter[]] return[name[ax]]
keyword[def] identifier[plot_sfs] ( identifier[s] , identifier[yscale] = literal[string] , identifier[bins] = keyword[None] , identifier[n] = keyword[None] , identifier[clip_endpoints] = keyword[True] , identifier[label] = keyword[None] , identifier[plot_kwargs] = keyword[None] , identifier[ax] = keyword[None] ): literal[string] keyword[import] identifier[matplotlib] . identifier[pyplot] keyword[as] identifier[plt] keyword[import] identifier[scipy] identifier[s] = identifier[asarray_ndim] ( identifier[s] , literal[int] ) keyword[if] identifier[ax] keyword[is] keyword[None] : identifier[fig] , identifier[ax] = identifier[plt] . identifier[subplots] () keyword[if] identifier[bins] keyword[is] keyword[None] : keyword[if] identifier[clip_endpoints] : identifier[x] = identifier[np] . identifier[arange] ( literal[int] , identifier[s] . identifier[shape] [ literal[int] ]- literal[int] ) identifier[y] = identifier[s] [ literal[int] :- literal[int] ] keyword[else] : identifier[x] = identifier[np] . identifier[arange] ( identifier[s] . identifier[shape] [ literal[int] ]) identifier[y] = identifier[s] keyword[else] : keyword[if] identifier[clip_endpoints] : identifier[y] , identifier[b] , identifier[_] = identifier[scipy] . identifier[stats] . identifier[binned_statistic] ( identifier[np] . identifier[arange] ( literal[int] , identifier[s] . identifier[shape] [ literal[int] ]- literal[int] ), identifier[values] = identifier[s] [ literal[int] :- literal[int] ], identifier[bins] = identifier[bins] , identifier[statistic] = literal[string] ) keyword[else] : identifier[y] , identifier[b] , identifier[_] = identifier[scipy] . identifier[stats] . identifier[binned_statistic] ( identifier[np] . identifier[arange] ( identifier[s] . identifier[shape] [ literal[int] ]), identifier[values] = identifier[s] , identifier[bins] = identifier[bins] , identifier[statistic] = literal[string] ) identifier[x] =( identifier[b] [:- literal[int] ]+ identifier[b] [ literal[int] :])/ literal[int] keyword[if] identifier[n] : identifier[x] = identifier[x] / identifier[n] identifier[ax] . identifier[set_xlabel] ( literal[string] ) keyword[else] : identifier[ax] . identifier[set_xlabel] ( literal[string] ) keyword[if] identifier[plot_kwargs] keyword[is] keyword[None] : identifier[plot_kwargs] = identifier[dict] () identifier[ax] . identifier[plot] ( identifier[x] , identifier[y] , identifier[label] = identifier[label] ,** identifier[plot_kwargs] ) identifier[ax] . identifier[set_yscale] ( identifier[yscale] ) identifier[ax] . identifier[set_ylabel] ( literal[string] ) identifier[ax] . identifier[autoscale] ( identifier[axis] = literal[string] , identifier[tight] = keyword[True] ) keyword[return] identifier[ax]
def plot_sfs(s, yscale='log', bins=None, n=None, clip_endpoints=True, label=None, plot_kwargs=None, ax=None): """Plot a site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes,) Site frequency spectrum. yscale : string, optional Y axis scale. bins : int or array_like, int, optional Allele count bins. n : int, optional Number of chromosomes sampled. If provided, X axis will be plotted as allele frequency, otherwise as allele count. clip_endpoints : bool, optional If True, do not plot first and last values from frequency spectrum. label : string, optional Label for data series in plot. plot_kwargs : dict-like Additional keyword arguments, passed through to ax.plot(). ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes The axes on which the plot was drawn. """ import matplotlib.pyplot as plt import scipy # check inputs s = asarray_ndim(s, 1) # setup axes if ax is None: (fig, ax) = plt.subplots() # depends on [control=['if'], data=['ax']] # setup data if bins is None: if clip_endpoints: x = np.arange(1, s.shape[0] - 1) y = s[1:-1] # depends on [control=['if'], data=[]] else: x = np.arange(s.shape[0]) y = s # depends on [control=['if'], data=[]] else: if clip_endpoints: (y, b, _) = scipy.stats.binned_statistic(np.arange(1, s.shape[0] - 1), values=s[1:-1], bins=bins, statistic='sum') # depends on [control=['if'], data=[]] else: (y, b, _) = scipy.stats.binned_statistic(np.arange(s.shape[0]), values=s, bins=bins, statistic='sum') # use bin midpoints for plotting x = (b[:-1] + b[1:]) / 2 if n: # convert allele counts to allele frequencies x = x / n ax.set_xlabel('derived allele frequency') # depends on [control=['if'], data=[]] else: ax.set_xlabel('derived allele count') # do plotting if plot_kwargs is None: plot_kwargs = dict() # depends on [control=['if'], data=['plot_kwargs']] ax.plot(x, y, label=label, **plot_kwargs) # tidy ax.set_yscale(yscale) ax.set_ylabel('site frequency') ax.autoscale(axis='x', tight=True) return ax
def setShowRichText( self, state ): """ Sets whether or not the delegate should render rich text information \ as HTML when drawing the contents of the item. :param state | <bool> """ delegate = self.itemDelegate() if isinstance(delegate, XTreeWidgetDelegate): delegate.setShowRichText(state)
def function[setShowRichText, parameter[self, state]]: constant[ Sets whether or not the delegate should render rich text information as HTML when drawing the contents of the item. :param state | <bool> ] variable[delegate] assign[=] call[name[self].itemDelegate, parameter[]] if call[name[isinstance], parameter[name[delegate], name[XTreeWidgetDelegate]]] begin[:] call[name[delegate].setShowRichText, parameter[name[state]]]
keyword[def] identifier[setShowRichText] ( identifier[self] , identifier[state] ): literal[string] identifier[delegate] = identifier[self] . identifier[itemDelegate] () keyword[if] identifier[isinstance] ( identifier[delegate] , identifier[XTreeWidgetDelegate] ): identifier[delegate] . identifier[setShowRichText] ( identifier[state] )
def setShowRichText(self, state): """ Sets whether or not the delegate should render rich text information as HTML when drawing the contents of the item. :param state | <bool> """ delegate = self.itemDelegate() if isinstance(delegate, XTreeWidgetDelegate): delegate.setShowRichText(state) # depends on [control=['if'], data=[]]
def send_message(message: str, subject: str, recip: list, recip_email: list, html_message: str = None): """ Sends message to specified value. Source: Himanshu Shankar (https://github.com/iamhssingh) Parameters ---------- message: str Message that is to be sent to user. subject: str Subject that is to be sent to user, in case prop is an email. recip: list Recipient to whom message is being sent. recip_email: list Recipient to whom EMail is being sent. This will be deprecated once SMS feature is brought in. html_message: str HTML variant of message, if any. Returns ------- sent: dict """ import smtplib from django.conf import settings from django.core.mail import send_mail from sendsms import api sent = {'success': False, 'message': None} if not getattr(settings, 'EMAIL_HOST', None): raise ValueError('EMAIL_HOST must be defined in django ' 'setting for sending mail.') if not getattr(settings, 'EMAIL_FROM', None): raise ValueError('EMAIL_FROM must be defined in django setting ' 'for sending mail. Who is sending email?') if not getattr(settings, 'EMAIL_FROM', None): raise ValueError('EMAIL_FROM must be defined in django setting ' 'for sending mail. Who is sending email?') # Check if there is any recipient if not len(recip) > 0: raise ValueError('No recipient to send message.') # Check if the value of recipient is valid (min length: [email protected]) elif len(recip[0]) < 5: raise ValueError('Invalid recipient.') # Check if all recipient in list are of same type is_email = validate_email(recip[0]) for ind in range(len(recip)): if validate_email(recip[ind]) is not is_email: raise ValueError('All recipient should be of same type.') elif not is_email: recip[ind] = get_mobile_number(recip[ind]) # Check if fallback email is indeed an email for rcp in recip_email: if not validate_email(rcp): raise ValueError('Invalid email provided: {}'.format(rcp)) if isinstance(recip, str): # For backsupport recip = [recip] if isinstance(recip_email, str): # For backsupport recip_email = [recip_email] if is_email: try: send_mail(subject=subject, message=message, html_message=html_message, from_email=settings.EMAIL_FROM, recipient_list=recip) except smtplib.SMTPException as ex: sent['message'] = 'Message sending failed!' + str(ex.args) sent['success'] = False else: sent['message'] = 'Message sent successfully!' sent['success'] = True else: try: api.send_sms(body=message, to=recip, from_phone=None) # Django SendSMS doesn't provide an output of success/failure. # Send mail either ways, just to ensure delivery. send_message(message=message, subject=subject, recip=recip_email, recip_email=recip_email, html_message=html_message) except Exception as ex: sent['message'] = 'Message sending Failed!' + str(ex.args) sent['success'] = False send_message(message=message, subject=subject, recip=recip_email, recip_email=recip_email, html_message=html_message) else: sent['message'] = 'Message sent successfully!' sent['success'] = True return sent
def function[send_message, parameter[message, subject, recip, recip_email, html_message]]: constant[ Sends message to specified value. Source: Himanshu Shankar (https://github.com/iamhssingh) Parameters ---------- message: str Message that is to be sent to user. subject: str Subject that is to be sent to user, in case prop is an email. recip: list Recipient to whom message is being sent. recip_email: list Recipient to whom EMail is being sent. This will be deprecated once SMS feature is brought in. html_message: str HTML variant of message, if any. Returns ------- sent: dict ] import module[smtplib] from relative_module[django.conf] import module[settings] from relative_module[django.core.mail] import module[send_mail] from relative_module[sendsms] import module[api] variable[sent] assign[=] dictionary[[<ast.Constant object at 0x7da1b19107f0>, <ast.Constant object at 0x7da1b1912b90>], [<ast.Constant object at 0x7da1b1912b30>, <ast.Constant object at 0x7da1b1910bb0>]] if <ast.UnaryOp object at 0x7da1b1911150> begin[:] <ast.Raise object at 0x7da1b1910160> if <ast.UnaryOp object at 0x7da1b19108e0> begin[:] <ast.Raise object at 0x7da1b1910ca0> if <ast.UnaryOp object at 0x7da1b1913520> begin[:] <ast.Raise object at 0x7da1b1912080> if <ast.UnaryOp object at 0x7da1b1912c80> begin[:] <ast.Raise object at 0x7da1b19137c0> variable[is_email] assign[=] call[name[validate_email], parameter[call[name[recip]][constant[0]]]] for taget[name[ind]] in starred[call[name[range], parameter[call[name[len], parameter[name[recip]]]]]] begin[:] if compare[call[name[validate_email], parameter[call[name[recip]][name[ind]]]] is_not name[is_email]] begin[:] <ast.Raise object at 0x7da1b1913760> for taget[name[rcp]] in starred[name[recip_email]] begin[:] if <ast.UnaryOp object at 0x7da1b1910790> begin[:] <ast.Raise object at 0x7da1b19139d0> if call[name[isinstance], parameter[name[recip], name[str]]] begin[:] variable[recip] assign[=] list[[<ast.Name object at 0x7da1b1910d30>]] if call[name[isinstance], parameter[name[recip_email], name[str]]] begin[:] variable[recip_email] assign[=] list[[<ast.Name object at 0x7da1b1911e10>]] if name[is_email] begin[:] <ast.Try object at 0x7da1b1913cd0> return[name[sent]]
keyword[def] identifier[send_message] ( identifier[message] : identifier[str] , identifier[subject] : identifier[str] , identifier[recip] : identifier[list] , identifier[recip_email] : identifier[list] , identifier[html_message] : identifier[str] = keyword[None] ): literal[string] keyword[import] identifier[smtplib] keyword[from] identifier[django] . identifier[conf] keyword[import] identifier[settings] keyword[from] identifier[django] . identifier[core] . identifier[mail] keyword[import] identifier[send_mail] keyword[from] identifier[sendsms] keyword[import] identifier[api] identifier[sent] ={ literal[string] : keyword[False] , literal[string] : keyword[None] } keyword[if] keyword[not] identifier[getattr] ( identifier[settings] , literal[string] , keyword[None] ): keyword[raise] identifier[ValueError] ( literal[string] literal[string] ) keyword[if] keyword[not] identifier[getattr] ( identifier[settings] , literal[string] , keyword[None] ): keyword[raise] identifier[ValueError] ( literal[string] literal[string] ) keyword[if] keyword[not] identifier[getattr] ( identifier[settings] , literal[string] , keyword[None] ): keyword[raise] identifier[ValueError] ( literal[string] literal[string] ) keyword[if] keyword[not] identifier[len] ( identifier[recip] )> literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[elif] identifier[len] ( identifier[recip] [ literal[int] ])< literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[is_email] = identifier[validate_email] ( identifier[recip] [ literal[int] ]) keyword[for] identifier[ind] keyword[in] identifier[range] ( identifier[len] ( identifier[recip] )): keyword[if] identifier[validate_email] ( identifier[recip] [ identifier[ind] ]) keyword[is] keyword[not] identifier[is_email] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[elif] keyword[not] identifier[is_email] : identifier[recip] [ identifier[ind] ]= identifier[get_mobile_number] ( identifier[recip] [ identifier[ind] ]) keyword[for] identifier[rcp] keyword[in] identifier[recip_email] : keyword[if] keyword[not] identifier[validate_email] ( identifier[rcp] ): keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[rcp] )) keyword[if] identifier[isinstance] ( identifier[recip] , identifier[str] ): identifier[recip] =[ identifier[recip] ] keyword[if] identifier[isinstance] ( identifier[recip_email] , identifier[str] ): identifier[recip_email] =[ identifier[recip_email] ] keyword[if] identifier[is_email] : keyword[try] : identifier[send_mail] ( identifier[subject] = identifier[subject] , identifier[message] = identifier[message] , identifier[html_message] = identifier[html_message] , identifier[from_email] = identifier[settings] . identifier[EMAIL_FROM] , identifier[recipient_list] = identifier[recip] ) keyword[except] identifier[smtplib] . identifier[SMTPException] keyword[as] identifier[ex] : identifier[sent] [ literal[string] ]= literal[string] + identifier[str] ( identifier[ex] . identifier[args] ) identifier[sent] [ literal[string] ]= keyword[False] keyword[else] : identifier[sent] [ literal[string] ]= literal[string] identifier[sent] [ literal[string] ]= keyword[True] keyword[else] : keyword[try] : identifier[api] . identifier[send_sms] ( identifier[body] = identifier[message] , identifier[to] = identifier[recip] , identifier[from_phone] = keyword[None] ) identifier[send_message] ( identifier[message] = identifier[message] , identifier[subject] = identifier[subject] , identifier[recip] = identifier[recip_email] , identifier[recip_email] = identifier[recip_email] , identifier[html_message] = identifier[html_message] ) keyword[except] identifier[Exception] keyword[as] identifier[ex] : identifier[sent] [ literal[string] ]= literal[string] + identifier[str] ( identifier[ex] . identifier[args] ) identifier[sent] [ literal[string] ]= keyword[False] identifier[send_message] ( identifier[message] = identifier[message] , identifier[subject] = identifier[subject] , identifier[recip] = identifier[recip_email] , identifier[recip_email] = identifier[recip_email] , identifier[html_message] = identifier[html_message] ) keyword[else] : identifier[sent] [ literal[string] ]= literal[string] identifier[sent] [ literal[string] ]= keyword[True] keyword[return] identifier[sent]
def send_message(message: str, subject: str, recip: list, recip_email: list, html_message: str=None): """ Sends message to specified value. Source: Himanshu Shankar (https://github.com/iamhssingh) Parameters ---------- message: str Message that is to be sent to user. subject: str Subject that is to be sent to user, in case prop is an email. recip: list Recipient to whom message is being sent. recip_email: list Recipient to whom EMail is being sent. This will be deprecated once SMS feature is brought in. html_message: str HTML variant of message, if any. Returns ------- sent: dict """ import smtplib from django.conf import settings from django.core.mail import send_mail from sendsms import api sent = {'success': False, 'message': None} if not getattr(settings, 'EMAIL_HOST', None): raise ValueError('EMAIL_HOST must be defined in django setting for sending mail.') # depends on [control=['if'], data=[]] if not getattr(settings, 'EMAIL_FROM', None): raise ValueError('EMAIL_FROM must be defined in django setting for sending mail. Who is sending email?') # depends on [control=['if'], data=[]] if not getattr(settings, 'EMAIL_FROM', None): raise ValueError('EMAIL_FROM must be defined in django setting for sending mail. Who is sending email?') # depends on [control=['if'], data=[]] # Check if there is any recipient if not len(recip) > 0: raise ValueError('No recipient to send message.') # depends on [control=['if'], data=[]] # Check if the value of recipient is valid (min length: [email protected]) elif len(recip[0]) < 5: raise ValueError('Invalid recipient.') # depends on [control=['if'], data=[]] # Check if all recipient in list are of same type is_email = validate_email(recip[0]) for ind in range(len(recip)): if validate_email(recip[ind]) is not is_email: raise ValueError('All recipient should be of same type.') # depends on [control=['if'], data=[]] elif not is_email: recip[ind] = get_mobile_number(recip[ind]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ind']] # Check if fallback email is indeed an email for rcp in recip_email: if not validate_email(rcp): raise ValueError('Invalid email provided: {}'.format(rcp)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['rcp']] if isinstance(recip, str): # For backsupport recip = [recip] # depends on [control=['if'], data=[]] if isinstance(recip_email, str): # For backsupport recip_email = [recip_email] # depends on [control=['if'], data=[]] if is_email: try: send_mail(subject=subject, message=message, html_message=html_message, from_email=settings.EMAIL_FROM, recipient_list=recip) # depends on [control=['try'], data=[]] except smtplib.SMTPException as ex: sent['message'] = 'Message sending failed!' + str(ex.args) sent['success'] = False # depends on [control=['except'], data=['ex']] else: sent['message'] = 'Message sent successfully!' sent['success'] = True # depends on [control=['if'], data=[]] else: try: api.send_sms(body=message, to=recip, from_phone=None) # Django SendSMS doesn't provide an output of success/failure. # Send mail either ways, just to ensure delivery. send_message(message=message, subject=subject, recip=recip_email, recip_email=recip_email, html_message=html_message) # depends on [control=['try'], data=[]] except Exception as ex: sent['message'] = 'Message sending Failed!' + str(ex.args) sent['success'] = False send_message(message=message, subject=subject, recip=recip_email, recip_email=recip_email, html_message=html_message) # depends on [control=['except'], data=['ex']] else: sent['message'] = 'Message sent successfully!' sent['success'] = True return sent
def pressure(self): """The current pressure being applied on the tool in use, normalized to the range [0, 1] and whether it has changed in this event. If this axis does not exist on the current tool, this property is (0, :obj:`False`). Returns: (float, bool): The current value of the the axis and whether it has changed. """ pressure = self._libinput.libinput_event_tablet_tool_get_pressure( self._handle) changed = self._libinput. \ libinput_event_tablet_tool_pressure_has_changed(self._handle) return pressure, changed
def function[pressure, parameter[self]]: constant[The current pressure being applied on the tool in use, normalized to the range [0, 1] and whether it has changed in this event. If this axis does not exist on the current tool, this property is (0, :obj:`False`). Returns: (float, bool): The current value of the the axis and whether it has changed. ] variable[pressure] assign[=] call[name[self]._libinput.libinput_event_tablet_tool_get_pressure, parameter[name[self]._handle]] variable[changed] assign[=] call[name[self]._libinput.libinput_event_tablet_tool_pressure_has_changed, parameter[name[self]._handle]] return[tuple[[<ast.Name object at 0x7da204621450>, <ast.Name object at 0x7da2046201c0>]]]
keyword[def] identifier[pressure] ( identifier[self] ): literal[string] identifier[pressure] = identifier[self] . identifier[_libinput] . identifier[libinput_event_tablet_tool_get_pressure] ( identifier[self] . identifier[_handle] ) identifier[changed] = identifier[self] . identifier[_libinput] . identifier[libinput_event_tablet_tool_pressure_has_changed] ( identifier[self] . identifier[_handle] ) keyword[return] identifier[pressure] , identifier[changed]
def pressure(self): """The current pressure being applied on the tool in use, normalized to the range [0, 1] and whether it has changed in this event. If this axis does not exist on the current tool, this property is (0, :obj:`False`). Returns: (float, bool): The current value of the the axis and whether it has changed. """ pressure = self._libinput.libinput_event_tablet_tool_get_pressure(self._handle) changed = self._libinput.libinput_event_tablet_tool_pressure_has_changed(self._handle) return (pressure, changed)
def addToDefinition(self, json_dict): """ The addToDefinition operation supports adding a definition property to a hosted feature service. The result of this operation is a response indicating success or failure with error code and description. This function will allow users to change add additional values to an already published service. Input: json_dict - part to add to host service. The part format can be derived from the asDictionary property. For layer level modifications, run updates on each individual feature service layer object. Output: JSON message as dictionary """ params = { "f" : "json", "addToDefinition" : json.dumps(json_dict), "async" : False } uURL = self._url + "/addToDefinition" res = self._post(url=uURL, param_dict=params, securityHandler=self._securityHandler, proxy_port=self._proxy_port, proxy_url=self._proxy_url) self.refresh() return res
def function[addToDefinition, parameter[self, json_dict]]: constant[ The addToDefinition operation supports adding a definition property to a hosted feature service. The result of this operation is a response indicating success or failure with error code and description. This function will allow users to change add additional values to an already published service. Input: json_dict - part to add to host service. The part format can be derived from the asDictionary property. For layer level modifications, run updates on each individual feature service layer object. Output: JSON message as dictionary ] variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b124af20>, <ast.Constant object at 0x7da1b1249900>, <ast.Constant object at 0x7da1b1249c60>], [<ast.Constant object at 0x7da1b124a320>, <ast.Call object at 0x7da1b124a470>, <ast.Constant object at 0x7da1b1248c70>]] variable[uURL] assign[=] binary_operation[name[self]._url + constant[/addToDefinition]] variable[res] assign[=] call[name[self]._post, parameter[]] call[name[self].refresh, parameter[]] return[name[res]]
keyword[def] identifier[addToDefinition] ( identifier[self] , identifier[json_dict] ): literal[string] identifier[params] ={ literal[string] : literal[string] , literal[string] : identifier[json] . identifier[dumps] ( identifier[json_dict] ), literal[string] : keyword[False] } identifier[uURL] = identifier[self] . identifier[_url] + literal[string] identifier[res] = identifier[self] . identifier[_post] ( identifier[url] = identifier[uURL] , identifier[param_dict] = identifier[params] , identifier[securityHandler] = identifier[self] . identifier[_securityHandler] , identifier[proxy_port] = identifier[self] . identifier[_proxy_port] , identifier[proxy_url] = identifier[self] . identifier[_proxy_url] ) identifier[self] . identifier[refresh] () keyword[return] identifier[res]
def addToDefinition(self, json_dict): """ The addToDefinition operation supports adding a definition property to a hosted feature service. The result of this operation is a response indicating success or failure with error code and description. This function will allow users to change add additional values to an already published service. Input: json_dict - part to add to host service. The part format can be derived from the asDictionary property. For layer level modifications, run updates on each individual feature service layer object. Output: JSON message as dictionary """ params = {'f': 'json', 'addToDefinition': json.dumps(json_dict), 'async': False} uURL = self._url + '/addToDefinition' res = self._post(url=uURL, param_dict=params, securityHandler=self._securityHandler, proxy_port=self._proxy_port, proxy_url=self._proxy_url) self.refresh() return res
def run(self, concurrency=0, outline=False, tail=False, dump=False, *args, **kwargs): """Kicks off the build/update of the stacks in the stack_definitions. This is the main entry point for the Builder. """ plan = self._generate_plan(tail=tail) if not plan.keys(): logger.warn('WARNING: No stacks detected (error in config?)') if not outline and not dump: plan.outline(logging.DEBUG) logger.debug("Launching stacks: %s", ", ".join(plan.keys())) walker = build_walker(concurrency) plan.execute(walker) else: if outline: plan.outline() if dump: plan.dump(directory=dump, context=self.context, provider=self.provider)
def function[run, parameter[self, concurrency, outline, tail, dump]]: constant[Kicks off the build/update of the stacks in the stack_definitions. This is the main entry point for the Builder. ] variable[plan] assign[=] call[name[self]._generate_plan, parameter[]] if <ast.UnaryOp object at 0x7da1b180eb60> begin[:] call[name[logger].warn, parameter[constant[WARNING: No stacks detected (error in config?)]]] if <ast.BoolOp object at 0x7da1b180e200> begin[:] call[name[plan].outline, parameter[name[logging].DEBUG]] call[name[logger].debug, parameter[constant[Launching stacks: %s], call[constant[, ].join, parameter[call[name[plan].keys, parameter[]]]]]] variable[walker] assign[=] call[name[build_walker], parameter[name[concurrency]]] call[name[plan].execute, parameter[name[walker]]]
keyword[def] identifier[run] ( identifier[self] , identifier[concurrency] = literal[int] , identifier[outline] = keyword[False] , identifier[tail] = keyword[False] , identifier[dump] = keyword[False] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[plan] = identifier[self] . identifier[_generate_plan] ( identifier[tail] = identifier[tail] ) keyword[if] keyword[not] identifier[plan] . identifier[keys] (): identifier[logger] . identifier[warn] ( literal[string] ) keyword[if] keyword[not] identifier[outline] keyword[and] keyword[not] identifier[dump] : identifier[plan] . identifier[outline] ( identifier[logging] . identifier[DEBUG] ) identifier[logger] . identifier[debug] ( literal[string] , literal[string] . identifier[join] ( identifier[plan] . identifier[keys] ())) identifier[walker] = identifier[build_walker] ( identifier[concurrency] ) identifier[plan] . identifier[execute] ( identifier[walker] ) keyword[else] : keyword[if] identifier[outline] : identifier[plan] . identifier[outline] () keyword[if] identifier[dump] : identifier[plan] . identifier[dump] ( identifier[directory] = identifier[dump] , identifier[context] = identifier[self] . identifier[context] , identifier[provider] = identifier[self] . identifier[provider] )
def run(self, concurrency=0, outline=False, tail=False, dump=False, *args, **kwargs): """Kicks off the build/update of the stacks in the stack_definitions. This is the main entry point for the Builder. """ plan = self._generate_plan(tail=tail) if not plan.keys(): logger.warn('WARNING: No stacks detected (error in config?)') # depends on [control=['if'], data=[]] if not outline and (not dump): plan.outline(logging.DEBUG) logger.debug('Launching stacks: %s', ', '.join(plan.keys())) walker = build_walker(concurrency) plan.execute(walker) # depends on [control=['if'], data=[]] else: if outline: plan.outline() # depends on [control=['if'], data=[]] if dump: plan.dump(directory=dump, context=self.context, provider=self.provider) # depends on [control=['if'], data=[]]
def _set_repo_options(repo, options): ''' Set the options to the repo. ''' delimiters = "[", "]" pattern = '|'.join(map(re.escape, delimiters)) for option in options: splitted = re.split(pattern, option) for opt in splitted: _set_repo_option(repo, opt)
def function[_set_repo_options, parameter[repo, options]]: constant[ Set the options to the repo. ] variable[delimiters] assign[=] tuple[[<ast.Constant object at 0x7da18eb55450>, <ast.Constant object at 0x7da18eb57a00>]] variable[pattern] assign[=] call[constant[|].join, parameter[call[name[map], parameter[name[re].escape, name[delimiters]]]]] for taget[name[option]] in starred[name[options]] begin[:] variable[splitted] assign[=] call[name[re].split, parameter[name[pattern], name[option]]] for taget[name[opt]] in starred[name[splitted]] begin[:] call[name[_set_repo_option], parameter[name[repo], name[opt]]]
keyword[def] identifier[_set_repo_options] ( identifier[repo] , identifier[options] ): literal[string] identifier[delimiters] = literal[string] , literal[string] identifier[pattern] = literal[string] . identifier[join] ( identifier[map] ( identifier[re] . identifier[escape] , identifier[delimiters] )) keyword[for] identifier[option] keyword[in] identifier[options] : identifier[splitted] = identifier[re] . identifier[split] ( identifier[pattern] , identifier[option] ) keyword[for] identifier[opt] keyword[in] identifier[splitted] : identifier[_set_repo_option] ( identifier[repo] , identifier[opt] )
def _set_repo_options(repo, options): """ Set the options to the repo. """ delimiters = ('[', ']') pattern = '|'.join(map(re.escape, delimiters)) for option in options: splitted = re.split(pattern, option) for opt in splitted: _set_repo_option(repo, opt) # depends on [control=['for'], data=['opt']] # depends on [control=['for'], data=['option']]
async def get_response(self, path: str, scope: Scope) -> Response: """ Returns an HTTP response, given the incoming path, method and request headers. """ if scope["method"] not in ("GET", "HEAD"): return PlainTextResponse("Method Not Allowed", status_code=405) if path.startswith(".."): # Most clients will normalize the path, so we shouldn't normally # get this, but don't allow misbehaving clients to break out of # the static files directory. return PlainTextResponse("Not Found", status_code=404) full_path, stat_result = await self.lookup_path(path) if stat_result and stat.S_ISREG(stat_result.st_mode): # We have a static file to serve. return self.file_response(full_path, stat_result, scope) elif stat_result and stat.S_ISDIR(stat_result.st_mode) and self.html: # We're in HTML mode, and have got a directory URL. # Check if we have 'index.html' file to serve. index_path = os.path.join(path, "index.html") full_path, stat_result = await self.lookup_path(index_path) if stat_result is not None and stat.S_ISREG(stat_result.st_mode): if not scope["path"].endswith("/"): # Directory URLs should redirect to always end in "/". url = URL(scope=scope) url = url.replace(path=url.path + "/") return RedirectResponse(url=url) return self.file_response(full_path, stat_result, scope) if self.html: # Check for '404.html' if we're in HTML mode. full_path, stat_result = await self.lookup_path("404.html") if stat_result is not None and stat.S_ISREG(stat_result.st_mode): return self.file_response( full_path, stat_result, scope, status_code=404 ) return PlainTextResponse("Not Found", status_code=404)
<ast.AsyncFunctionDef object at 0x7da1b025fa60>
keyword[async] keyword[def] identifier[get_response] ( identifier[self] , identifier[path] : identifier[str] , identifier[scope] : identifier[Scope] )-> identifier[Response] : literal[string] keyword[if] identifier[scope] [ literal[string] ] keyword[not] keyword[in] ( literal[string] , literal[string] ): keyword[return] identifier[PlainTextResponse] ( literal[string] , identifier[status_code] = literal[int] ) keyword[if] identifier[path] . identifier[startswith] ( literal[string] ): keyword[return] identifier[PlainTextResponse] ( literal[string] , identifier[status_code] = literal[int] ) identifier[full_path] , identifier[stat_result] = keyword[await] identifier[self] . identifier[lookup_path] ( identifier[path] ) keyword[if] identifier[stat_result] keyword[and] identifier[stat] . identifier[S_ISREG] ( identifier[stat_result] . identifier[st_mode] ): keyword[return] identifier[self] . identifier[file_response] ( identifier[full_path] , identifier[stat_result] , identifier[scope] ) keyword[elif] identifier[stat_result] keyword[and] identifier[stat] . identifier[S_ISDIR] ( identifier[stat_result] . identifier[st_mode] ) keyword[and] identifier[self] . identifier[html] : identifier[index_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[path] , literal[string] ) identifier[full_path] , identifier[stat_result] = keyword[await] identifier[self] . identifier[lookup_path] ( identifier[index_path] ) keyword[if] identifier[stat_result] keyword[is] keyword[not] keyword[None] keyword[and] identifier[stat] . identifier[S_ISREG] ( identifier[stat_result] . identifier[st_mode] ): keyword[if] keyword[not] identifier[scope] [ literal[string] ]. identifier[endswith] ( literal[string] ): identifier[url] = identifier[URL] ( identifier[scope] = identifier[scope] ) identifier[url] = identifier[url] . identifier[replace] ( identifier[path] = identifier[url] . identifier[path] + literal[string] ) keyword[return] identifier[RedirectResponse] ( identifier[url] = identifier[url] ) keyword[return] identifier[self] . identifier[file_response] ( identifier[full_path] , identifier[stat_result] , identifier[scope] ) keyword[if] identifier[self] . identifier[html] : identifier[full_path] , identifier[stat_result] = keyword[await] identifier[self] . identifier[lookup_path] ( literal[string] ) keyword[if] identifier[stat_result] keyword[is] keyword[not] keyword[None] keyword[and] identifier[stat] . identifier[S_ISREG] ( identifier[stat_result] . identifier[st_mode] ): keyword[return] identifier[self] . identifier[file_response] ( identifier[full_path] , identifier[stat_result] , identifier[scope] , identifier[status_code] = literal[int] ) keyword[return] identifier[PlainTextResponse] ( literal[string] , identifier[status_code] = literal[int] )
async def get_response(self, path: str, scope: Scope) -> Response: """ Returns an HTTP response, given the incoming path, method and request headers. """ if scope['method'] not in ('GET', 'HEAD'): return PlainTextResponse('Method Not Allowed', status_code=405) # depends on [control=['if'], data=[]] if path.startswith('..'): # Most clients will normalize the path, so we shouldn't normally # get this, but don't allow misbehaving clients to break out of # the static files directory. return PlainTextResponse('Not Found', status_code=404) # depends on [control=['if'], data=[]] (full_path, stat_result) = await self.lookup_path(path) if stat_result and stat.S_ISREG(stat_result.st_mode): # We have a static file to serve. return self.file_response(full_path, stat_result, scope) # depends on [control=['if'], data=[]] elif stat_result and stat.S_ISDIR(stat_result.st_mode) and self.html: # We're in HTML mode, and have got a directory URL. # Check if we have 'index.html' file to serve. index_path = os.path.join(path, 'index.html') (full_path, stat_result) = await self.lookup_path(index_path) if stat_result is not None and stat.S_ISREG(stat_result.st_mode): if not scope['path'].endswith('/'): # Directory URLs should redirect to always end in "/". url = URL(scope=scope) url = url.replace(path=url.path + '/') return RedirectResponse(url=url) # depends on [control=['if'], data=[]] return self.file_response(full_path, stat_result, scope) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if self.html: # Check for '404.html' if we're in HTML mode. (full_path, stat_result) = await self.lookup_path('404.html') if stat_result is not None and stat.S_ISREG(stat_result.st_mode): return self.file_response(full_path, stat_result, scope, status_code=404) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return PlainTextResponse('Not Found', status_code=404)
def _makeTimingRelative(absoluteDataList): ''' Given normal pitch tier data, puts the times on a scale from 0 to 1 Input is a list of tuples of the form ([(time1, pitch1), (time2, pitch2),...] Also returns the start and end time so that the process can be reversed ''' timingSeq = [row[0] for row in absoluteDataList] valueSeq = [list(row[1:]) for row in absoluteDataList] relTimingSeq, startTime, endTime = makeSequenceRelative(timingSeq) relDataList = [tuple([time, ] + row) for time, row in zip(relTimingSeq, valueSeq)] return relDataList, startTime, endTime
def function[_makeTimingRelative, parameter[absoluteDataList]]: constant[ Given normal pitch tier data, puts the times on a scale from 0 to 1 Input is a list of tuples of the form ([(time1, pitch1), (time2, pitch2),...] Also returns the start and end time so that the process can be reversed ] variable[timingSeq] assign[=] <ast.ListComp object at 0x7da1b10408e0> variable[valueSeq] assign[=] <ast.ListComp object at 0x7da1b1040820> <ast.Tuple object at 0x7da1b1040eb0> assign[=] call[name[makeSequenceRelative], parameter[name[timingSeq]]] variable[relDataList] assign[=] <ast.ListComp object at 0x7da1b1040f40> return[tuple[[<ast.Name object at 0x7da1b1107fa0>, <ast.Name object at 0x7da1b1105780>, <ast.Name object at 0x7da1b1106da0>]]]
keyword[def] identifier[_makeTimingRelative] ( identifier[absoluteDataList] ): literal[string] identifier[timingSeq] =[ identifier[row] [ literal[int] ] keyword[for] identifier[row] keyword[in] identifier[absoluteDataList] ] identifier[valueSeq] =[ identifier[list] ( identifier[row] [ literal[int] :]) keyword[for] identifier[row] keyword[in] identifier[absoluteDataList] ] identifier[relTimingSeq] , identifier[startTime] , identifier[endTime] = identifier[makeSequenceRelative] ( identifier[timingSeq] ) identifier[relDataList] =[ identifier[tuple] ([ identifier[time] ,]+ identifier[row] ) keyword[for] identifier[time] , identifier[row] keyword[in] identifier[zip] ( identifier[relTimingSeq] , identifier[valueSeq] )] keyword[return] identifier[relDataList] , identifier[startTime] , identifier[endTime]
def _makeTimingRelative(absoluteDataList): """ Given normal pitch tier data, puts the times on a scale from 0 to 1 Input is a list of tuples of the form ([(time1, pitch1), (time2, pitch2),...] Also returns the start and end time so that the process can be reversed """ timingSeq = [row[0] for row in absoluteDataList] valueSeq = [list(row[1:]) for row in absoluteDataList] (relTimingSeq, startTime, endTime) = makeSequenceRelative(timingSeq) relDataList = [tuple([time] + row) for (time, row) in zip(relTimingSeq, valueSeq)] return (relDataList, startTime, endTime)
def _register_order_book_channels(self): """ Registers the binding for the order_book channels. :return: """ channels = {'order_book': self.btcusd_ob_callback, 'order_book_btceur': self.btceur_ob_callback, 'order_book_eurusd': self.eurusd_ob_callback, 'order_book_xrpusd': self.xrpusd_ob_callback, 'order_book_xrpeur': self.xrpeur_ob_callback, 'order_book_xrpbtc': self.xrpbtc_ob_callback} event = 'data' self._bind_channels(event, channels)
def function[_register_order_book_channels, parameter[self]]: constant[ Registers the binding for the order_book channels. :return: ] variable[channels] assign[=] dictionary[[<ast.Constant object at 0x7da1b07bc4f0>, <ast.Constant object at 0x7da1b07bc4c0>, <ast.Constant object at 0x7da1b07bd240>, <ast.Constant object at 0x7da18f58eda0>, <ast.Constant object at 0x7da18f58e0e0>, <ast.Constant object at 0x7da18f58e800>], [<ast.Attribute object at 0x7da18f58ec50>, <ast.Attribute object at 0x7da18f58f160>, <ast.Attribute object at 0x7da18f58f0d0>, <ast.Attribute object at 0x7da18f58c2b0>, <ast.Attribute object at 0x7da18f58d840>, <ast.Attribute object at 0x7da18f58cac0>]] variable[event] assign[=] constant[data] call[name[self]._bind_channels, parameter[name[event], name[channels]]]
keyword[def] identifier[_register_order_book_channels] ( identifier[self] ): literal[string] identifier[channels] ={ literal[string] : identifier[self] . identifier[btcusd_ob_callback] , literal[string] : identifier[self] . identifier[btceur_ob_callback] , literal[string] : identifier[self] . identifier[eurusd_ob_callback] , literal[string] : identifier[self] . identifier[xrpusd_ob_callback] , literal[string] : identifier[self] . identifier[xrpeur_ob_callback] , literal[string] : identifier[self] . identifier[xrpbtc_ob_callback] } identifier[event] = literal[string] identifier[self] . identifier[_bind_channels] ( identifier[event] , identifier[channels] )
def _register_order_book_channels(self): """ Registers the binding for the order_book channels. :return: """ channels = {'order_book': self.btcusd_ob_callback, 'order_book_btceur': self.btceur_ob_callback, 'order_book_eurusd': self.eurusd_ob_callback, 'order_book_xrpusd': self.xrpusd_ob_callback, 'order_book_xrpeur': self.xrpeur_ob_callback, 'order_book_xrpbtc': self.xrpbtc_ob_callback} event = 'data' self._bind_channels(event, channels)
def radec2sky(ra, dec): """ Convert [ra], [dec] to [(ra[0], dec[0]),....] and also ra,dec to [(ra,dec)] if ra/dec are not iterable Parameters ---------- ra, dec : float or iterable Sky coordinates Returns ------- sky : numpy.array array of (ra,dec) coordinates. """ try: sky = np.array(list(zip(ra, dec))) except TypeError: sky = np.array([(ra, dec)]) return sky
def function[radec2sky, parameter[ra, dec]]: constant[ Convert [ra], [dec] to [(ra[0], dec[0]),....] and also ra,dec to [(ra,dec)] if ra/dec are not iterable Parameters ---------- ra, dec : float or iterable Sky coordinates Returns ------- sky : numpy.array array of (ra,dec) coordinates. ] <ast.Try object at 0x7da20e9550c0> return[name[sky]]
keyword[def] identifier[radec2sky] ( identifier[ra] , identifier[dec] ): literal[string] keyword[try] : identifier[sky] = identifier[np] . identifier[array] ( identifier[list] ( identifier[zip] ( identifier[ra] , identifier[dec] ))) keyword[except] identifier[TypeError] : identifier[sky] = identifier[np] . identifier[array] ([( identifier[ra] , identifier[dec] )]) keyword[return] identifier[sky]
def radec2sky(ra, dec): """ Convert [ra], [dec] to [(ra[0], dec[0]),....] and also ra,dec to [(ra,dec)] if ra/dec are not iterable Parameters ---------- ra, dec : float or iterable Sky coordinates Returns ------- sky : numpy.array array of (ra,dec) coordinates. """ try: sky = np.array(list(zip(ra, dec))) # depends on [control=['try'], data=[]] except TypeError: sky = np.array([(ra, dec)]) # depends on [control=['except'], data=[]] return sky
def generate(self, clusters, version=None): """ Generates HAProxy config file content based on a given list of clusters. """ now = datetime.datetime.now() sections = [ Section( "Auto-generated by Lighthouse (%s)" % now.strftime("%c"), self.global_stanza, self.defaults_stanza ) ] meta_stanzas = [ MetaFrontendStanza( name, self.meta_clusters[name]["port"], self.meta_clusters[name].get("frontend", []), members, self.bind_address ) for name, members in six.iteritems(self.get_meta_clusters(clusters)) ] frontend_stanzas = [ FrontendStanza(cluster, self.bind_address) for cluster in clusters if "port" in cluster.haproxy ] backend_stanzas = [BackendStanza(cluster) for cluster in clusters] if version and version >= (1, 5, 0): peers_stanzas = [PeersStanza(cluster) for cluster in clusters] else: peers_stanzas = [] sections.extend([ Section("Frontend stanzas for ACL meta clusters", *meta_stanzas), Section("Per-cluster frontend definitions", *frontend_stanzas), Section("Per-cluster backend definitions", *backend_stanzas), Section("Per-cluster peer listings", *peers_stanzas), Section("Individual proxy definitions", *self.proxy_stanzas), ]) if self.stats_stanza: sections.append( Section("Listener for stats web interface", self.stats_stanza) ) return "\n\n\n".join([str(section) for section in sections]) + "\n"
def function[generate, parameter[self, clusters, version]]: constant[ Generates HAProxy config file content based on a given list of clusters. ] variable[now] assign[=] call[name[datetime].datetime.now, parameter[]] variable[sections] assign[=] list[[<ast.Call object at 0x7da207f001f0>]] variable[meta_stanzas] assign[=] <ast.ListComp object at 0x7da207f005e0> variable[frontend_stanzas] assign[=] <ast.ListComp object at 0x7da207f031f0> variable[backend_stanzas] assign[=] <ast.ListComp object at 0x7da2041d89a0> if <ast.BoolOp object at 0x7da20e9b0d30> begin[:] variable[peers_stanzas] assign[=] <ast.ListComp object at 0x7da18f09e740> call[name[sections].extend, parameter[list[[<ast.Call object at 0x7da18f09f640>, <ast.Call object at 0x7da2041d83a0>, <ast.Call object at 0x7da2041db820>, <ast.Call object at 0x7da2041d84c0>, <ast.Call object at 0x7da2041dae60>]]]] if name[self].stats_stanza begin[:] call[name[sections].append, parameter[call[name[Section], parameter[constant[Listener for stats web interface], name[self].stats_stanza]]]] return[binary_operation[call[constant[ ].join, parameter[<ast.ListComp object at 0x7da1b0b71300>]] + constant[ ]]]
keyword[def] identifier[generate] ( identifier[self] , identifier[clusters] , identifier[version] = keyword[None] ): literal[string] identifier[now] = identifier[datetime] . identifier[datetime] . identifier[now] () identifier[sections] =[ identifier[Section] ( literal[string] % identifier[now] . identifier[strftime] ( literal[string] ), identifier[self] . identifier[global_stanza] , identifier[self] . identifier[defaults_stanza] ) ] identifier[meta_stanzas] =[ identifier[MetaFrontendStanza] ( identifier[name] , identifier[self] . identifier[meta_clusters] [ identifier[name] ][ literal[string] ], identifier[self] . identifier[meta_clusters] [ identifier[name] ]. identifier[get] ( literal[string] ,[]), identifier[members] , identifier[self] . identifier[bind_address] ) keyword[for] identifier[name] , identifier[members] keyword[in] identifier[six] . identifier[iteritems] ( identifier[self] . identifier[get_meta_clusters] ( identifier[clusters] )) ] identifier[frontend_stanzas] =[ identifier[FrontendStanza] ( identifier[cluster] , identifier[self] . identifier[bind_address] ) keyword[for] identifier[cluster] keyword[in] identifier[clusters] keyword[if] literal[string] keyword[in] identifier[cluster] . identifier[haproxy] ] identifier[backend_stanzas] =[ identifier[BackendStanza] ( identifier[cluster] ) keyword[for] identifier[cluster] keyword[in] identifier[clusters] ] keyword[if] identifier[version] keyword[and] identifier[version] >=( literal[int] , literal[int] , literal[int] ): identifier[peers_stanzas] =[ identifier[PeersStanza] ( identifier[cluster] ) keyword[for] identifier[cluster] keyword[in] identifier[clusters] ] keyword[else] : identifier[peers_stanzas] =[] identifier[sections] . identifier[extend] ([ identifier[Section] ( literal[string] ,* identifier[meta_stanzas] ), identifier[Section] ( literal[string] ,* identifier[frontend_stanzas] ), identifier[Section] ( literal[string] ,* identifier[backend_stanzas] ), identifier[Section] ( literal[string] ,* identifier[peers_stanzas] ), identifier[Section] ( literal[string] ,* identifier[self] . identifier[proxy_stanzas] ), ]) keyword[if] identifier[self] . identifier[stats_stanza] : identifier[sections] . identifier[append] ( identifier[Section] ( literal[string] , identifier[self] . identifier[stats_stanza] ) ) keyword[return] literal[string] . identifier[join] ([ identifier[str] ( identifier[section] ) keyword[for] identifier[section] keyword[in] identifier[sections] ])+ literal[string]
def generate(self, clusters, version=None): """ Generates HAProxy config file content based on a given list of clusters. """ now = datetime.datetime.now() sections = [Section('Auto-generated by Lighthouse (%s)' % now.strftime('%c'), self.global_stanza, self.defaults_stanza)] meta_stanzas = [MetaFrontendStanza(name, self.meta_clusters[name]['port'], self.meta_clusters[name].get('frontend', []), members, self.bind_address) for (name, members) in six.iteritems(self.get_meta_clusters(clusters))] frontend_stanzas = [FrontendStanza(cluster, self.bind_address) for cluster in clusters if 'port' in cluster.haproxy] backend_stanzas = [BackendStanza(cluster) for cluster in clusters] if version and version >= (1, 5, 0): peers_stanzas = [PeersStanza(cluster) for cluster in clusters] # depends on [control=['if'], data=[]] else: peers_stanzas = [] sections.extend([Section('Frontend stanzas for ACL meta clusters', *meta_stanzas), Section('Per-cluster frontend definitions', *frontend_stanzas), Section('Per-cluster backend definitions', *backend_stanzas), Section('Per-cluster peer listings', *peers_stanzas), Section('Individual proxy definitions', *self.proxy_stanzas)]) if self.stats_stanza: sections.append(Section('Listener for stats web interface', self.stats_stanza)) # depends on [control=['if'], data=[]] return '\n\n\n'.join([str(section) for section in sections]) + '\n'
def download(self, job_id, destination=None, timeout=DEFAULT_TIMEOUT, retries=DEFAULT_RETRIES): """ Downloads all screenshots for given job_id to `destination` folder. If `destination` is None, then screenshots will be saved in current directory. """ self._retries_num = 0 sleep(timeout) self.save_many(job_id, destination, timeout, retries) return self._cache
def function[download, parameter[self, job_id, destination, timeout, retries]]: constant[ Downloads all screenshots for given job_id to `destination` folder. If `destination` is None, then screenshots will be saved in current directory. ] name[self]._retries_num assign[=] constant[0] call[name[sleep], parameter[name[timeout]]] call[name[self].save_many, parameter[name[job_id], name[destination], name[timeout], name[retries]]] return[name[self]._cache]
keyword[def] identifier[download] ( identifier[self] , identifier[job_id] , identifier[destination] = keyword[None] , identifier[timeout] = identifier[DEFAULT_TIMEOUT] , identifier[retries] = identifier[DEFAULT_RETRIES] ): literal[string] identifier[self] . identifier[_retries_num] = literal[int] identifier[sleep] ( identifier[timeout] ) identifier[self] . identifier[save_many] ( identifier[job_id] , identifier[destination] , identifier[timeout] , identifier[retries] ) keyword[return] identifier[self] . identifier[_cache]
def download(self, job_id, destination=None, timeout=DEFAULT_TIMEOUT, retries=DEFAULT_RETRIES): """ Downloads all screenshots for given job_id to `destination` folder. If `destination` is None, then screenshots will be saved in current directory. """ self._retries_num = 0 sleep(timeout) self.save_many(job_id, destination, timeout, retries) return self._cache
def refresh_session(self, sessionkey, refresh_token=None): ''' Refresh Session Token ''' if not refresh_token: refresh_token = sessionkey params = { 'appkey' : self.API_KEY, 'sessionkey' : sessionkey, 'refresh_token': refresh_token } src = ''.join(["%s%s" % (k, v) for k, v in sorted(params.iteritems())]) + self.APP_SECRET params['sign'] = md5(src).hexdigest().upper() form_data = urllib.urlencode(params) rsp = requests.get('%s?%s'%(self.REFRESH_TOKEN_URL, form_data)) rsp = json.loads(rsp.content) if 'error' in rsp: raise TOPException(rsp['error'], rsp['error_description']) return None rsp['re_expires_in'] = int(rsp['re_expires_in']) rsp['expires_in'] = int(rsp['expires_in']) rsp['session'] = rsp['top_session'] del rsp['top_session'] return rsp
def function[refresh_session, parameter[self, sessionkey, refresh_token]]: constant[ Refresh Session Token ] if <ast.UnaryOp object at 0x7da1b25d97e0> begin[:] variable[refresh_token] assign[=] name[sessionkey] variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b25dbb50>, <ast.Constant object at 0x7da1b25dbaf0>, <ast.Constant object at 0x7da1b25dbb20>], [<ast.Attribute object at 0x7da1b25d9ab0>, <ast.Name object at 0x7da1b25d9ae0>, <ast.Name object at 0x7da1b25dabc0>]] variable[src] assign[=] binary_operation[call[constant[].join, parameter[<ast.ListComp object at 0x7da1b25d8730>]] + name[self].APP_SECRET] call[name[params]][constant[sign]] assign[=] call[call[call[name[md5], parameter[name[src]]].hexdigest, parameter[]].upper, parameter[]] variable[form_data] assign[=] call[name[urllib].urlencode, parameter[name[params]]] variable[rsp] assign[=] call[name[requests].get, parameter[binary_operation[constant[%s?%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b25da350>, <ast.Name object at 0x7da1b25da680>]]]]] variable[rsp] assign[=] call[name[json].loads, parameter[name[rsp].content]] if compare[constant[error] in name[rsp]] begin[:] <ast.Raise object at 0x7da1b25db040> return[constant[None]] call[name[rsp]][constant[re_expires_in]] assign[=] call[name[int], parameter[call[name[rsp]][constant[re_expires_in]]]] call[name[rsp]][constant[expires_in]] assign[=] call[name[int], parameter[call[name[rsp]][constant[expires_in]]]] call[name[rsp]][constant[session]] assign[=] call[name[rsp]][constant[top_session]] <ast.Delete object at 0x7da1b25db5e0> return[name[rsp]]
keyword[def] identifier[refresh_session] ( identifier[self] , identifier[sessionkey] , identifier[refresh_token] = keyword[None] ): literal[string] keyword[if] keyword[not] identifier[refresh_token] : identifier[refresh_token] = identifier[sessionkey] identifier[params] ={ literal[string] : identifier[self] . identifier[API_KEY] , literal[string] : identifier[sessionkey] , literal[string] : identifier[refresh_token] } identifier[src] = literal[string] . identifier[join] ([ literal[string] %( identifier[k] , identifier[v] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[sorted] ( identifier[params] . identifier[iteritems] ())])+ identifier[self] . identifier[APP_SECRET] identifier[params] [ literal[string] ]= identifier[md5] ( identifier[src] ). identifier[hexdigest] (). identifier[upper] () identifier[form_data] = identifier[urllib] . identifier[urlencode] ( identifier[params] ) identifier[rsp] = identifier[requests] . identifier[get] ( literal[string] %( identifier[self] . identifier[REFRESH_TOKEN_URL] , identifier[form_data] )) identifier[rsp] = identifier[json] . identifier[loads] ( identifier[rsp] . identifier[content] ) keyword[if] literal[string] keyword[in] identifier[rsp] : keyword[raise] identifier[TOPException] ( identifier[rsp] [ literal[string] ], identifier[rsp] [ literal[string] ]) keyword[return] keyword[None] identifier[rsp] [ literal[string] ]= identifier[int] ( identifier[rsp] [ literal[string] ]) identifier[rsp] [ literal[string] ]= identifier[int] ( identifier[rsp] [ literal[string] ]) identifier[rsp] [ literal[string] ]= identifier[rsp] [ literal[string] ] keyword[del] identifier[rsp] [ literal[string] ] keyword[return] identifier[rsp]
def refresh_session(self, sessionkey, refresh_token=None): """ Refresh Session Token """ if not refresh_token: refresh_token = sessionkey # depends on [control=['if'], data=[]] params = {'appkey': self.API_KEY, 'sessionkey': sessionkey, 'refresh_token': refresh_token} src = ''.join(['%s%s' % (k, v) for (k, v) in sorted(params.iteritems())]) + self.APP_SECRET params['sign'] = md5(src).hexdigest().upper() form_data = urllib.urlencode(params) rsp = requests.get('%s?%s' % (self.REFRESH_TOKEN_URL, form_data)) rsp = json.loads(rsp.content) if 'error' in rsp: raise TOPException(rsp['error'], rsp['error_description']) return None # depends on [control=['if'], data=['rsp']] rsp['re_expires_in'] = int(rsp['re_expires_in']) rsp['expires_in'] = int(rsp['expires_in']) rsp['session'] = rsp['top_session'] del rsp['top_session'] return rsp
def get_requirements_file_from_url(url): """fetches the requiremets from the url""" response = requests.get(url) if response.status_code == 200: return StringIO(response.text) else: return StringIO("")
def function[get_requirements_file_from_url, parameter[url]]: constant[fetches the requiremets from the url] variable[response] assign[=] call[name[requests].get, parameter[name[url]]] if compare[name[response].status_code equal[==] constant[200]] begin[:] return[call[name[StringIO], parameter[name[response].text]]]
keyword[def] identifier[get_requirements_file_from_url] ( identifier[url] ): literal[string] identifier[response] = identifier[requests] . identifier[get] ( identifier[url] ) keyword[if] identifier[response] . identifier[status_code] == literal[int] : keyword[return] identifier[StringIO] ( identifier[response] . identifier[text] ) keyword[else] : keyword[return] identifier[StringIO] ( literal[string] )
def get_requirements_file_from_url(url): """fetches the requiremets from the url""" response = requests.get(url) if response.status_code == 200: return StringIO(response.text) # depends on [control=['if'], data=[]] else: return StringIO('')
def analyze(self, scratch, **kwargs): """Run and return the results of the VariableInitialization plugin.""" variables = dict((x, self.variable_state(x.scripts, x.variables)) for x in scratch.sprites) variables['global'] = self.variable_state(self.iter_scripts(scratch), scratch.stage.variables) # Output for now import pprint pprint.pprint(variables) return {'variables': variables}
def function[analyze, parameter[self, scratch]]: constant[Run and return the results of the VariableInitialization plugin.] variable[variables] assign[=] call[name[dict], parameter[<ast.GeneratorExp object at 0x7da18f723b20>]] call[name[variables]][constant[global]] assign[=] call[name[self].variable_state, parameter[call[name[self].iter_scripts, parameter[name[scratch]]], name[scratch].stage.variables]] import module[pprint] call[name[pprint].pprint, parameter[name[variables]]] return[dictionary[[<ast.Constant object at 0x7da18f58da20>], [<ast.Name object at 0x7da18f58e170>]]]
keyword[def] identifier[analyze] ( identifier[self] , identifier[scratch] ,** identifier[kwargs] ): literal[string] identifier[variables] = identifier[dict] (( identifier[x] , identifier[self] . identifier[variable_state] ( identifier[x] . identifier[scripts] , identifier[x] . identifier[variables] )) keyword[for] identifier[x] keyword[in] identifier[scratch] . identifier[sprites] ) identifier[variables] [ literal[string] ]= identifier[self] . identifier[variable_state] ( identifier[self] . identifier[iter_scripts] ( identifier[scratch] ), identifier[scratch] . identifier[stage] . identifier[variables] ) keyword[import] identifier[pprint] identifier[pprint] . identifier[pprint] ( identifier[variables] ) keyword[return] { literal[string] : identifier[variables] }
def analyze(self, scratch, **kwargs): """Run and return the results of the VariableInitialization plugin.""" variables = dict(((x, self.variable_state(x.scripts, x.variables)) for x in scratch.sprites)) variables['global'] = self.variable_state(self.iter_scripts(scratch), scratch.stage.variables) # Output for now import pprint pprint.pprint(variables) return {'variables': variables}
def is_writable(path): """Check if path has write access""" try: testfile = tempfile.TemporaryFile(dir=path) testfile.close() except OSError as e: if e.errno == errno.EACCES: # 13 return False return True
def function[is_writable, parameter[path]]: constant[Check if path has write access] <ast.Try object at 0x7da1b2043d90> return[constant[True]]
keyword[def] identifier[is_writable] ( identifier[path] ): literal[string] keyword[try] : identifier[testfile] = identifier[tempfile] . identifier[TemporaryFile] ( identifier[dir] = identifier[path] ) identifier[testfile] . identifier[close] () keyword[except] identifier[OSError] keyword[as] identifier[e] : keyword[if] identifier[e] . identifier[errno] == identifier[errno] . identifier[EACCES] : keyword[return] keyword[False] keyword[return] keyword[True]
def is_writable(path): """Check if path has write access""" try: testfile = tempfile.TemporaryFile(dir=path) testfile.close() # depends on [control=['try'], data=[]] except OSError as e: if e.errno == errno.EACCES: # 13 return False # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['e']] return True
def _initialize_data(self): """ This method is called once on construction. After that, it may be manually called again to reset the device to its default state. After the first call during construction, the class is frozen. This means that attempting to define a new member variable will raise an exception. This is to prevent typos from inadvertently and silently adding new members instead of accessing existing ones. """ self.serial_command_mode = False self.pump_overspeed = False self.start_commanded = False self.stop_commanded = False self.hold_commanded = False # Real device remembers values from last run, we use arbitrary defaults self.temperature_rate = 5.0 # Rate of change of temperature in C/min self.temperature_limit = 0.0 # Target temperature in C self.pump_speed = 0 # Pump speed in arbitrary unit, ranging 0 to 30 self.temperature = 24.0 # Current temperature in C self.pump_manual_mode = False self.manual_target_speed = 0
def function[_initialize_data, parameter[self]]: constant[ This method is called once on construction. After that, it may be manually called again to reset the device to its default state. After the first call during construction, the class is frozen. This means that attempting to define a new member variable will raise an exception. This is to prevent typos from inadvertently and silently adding new members instead of accessing existing ones. ] name[self].serial_command_mode assign[=] constant[False] name[self].pump_overspeed assign[=] constant[False] name[self].start_commanded assign[=] constant[False] name[self].stop_commanded assign[=] constant[False] name[self].hold_commanded assign[=] constant[False] name[self].temperature_rate assign[=] constant[5.0] name[self].temperature_limit assign[=] constant[0.0] name[self].pump_speed assign[=] constant[0] name[self].temperature assign[=] constant[24.0] name[self].pump_manual_mode assign[=] constant[False] name[self].manual_target_speed assign[=] constant[0]
keyword[def] identifier[_initialize_data] ( identifier[self] ): literal[string] identifier[self] . identifier[serial_command_mode] = keyword[False] identifier[self] . identifier[pump_overspeed] = keyword[False] identifier[self] . identifier[start_commanded] = keyword[False] identifier[self] . identifier[stop_commanded] = keyword[False] identifier[self] . identifier[hold_commanded] = keyword[False] identifier[self] . identifier[temperature_rate] = literal[int] identifier[self] . identifier[temperature_limit] = literal[int] identifier[self] . identifier[pump_speed] = literal[int] identifier[self] . identifier[temperature] = literal[int] identifier[self] . identifier[pump_manual_mode] = keyword[False] identifier[self] . identifier[manual_target_speed] = literal[int]
def _initialize_data(self): """ This method is called once on construction. After that, it may be manually called again to reset the device to its default state. After the first call during construction, the class is frozen. This means that attempting to define a new member variable will raise an exception. This is to prevent typos from inadvertently and silently adding new members instead of accessing existing ones. """ self.serial_command_mode = False self.pump_overspeed = False self.start_commanded = False self.stop_commanded = False self.hold_commanded = False # Real device remembers values from last run, we use arbitrary defaults self.temperature_rate = 5.0 # Rate of change of temperature in C/min self.temperature_limit = 0.0 # Target temperature in C self.pump_speed = 0 # Pump speed in arbitrary unit, ranging 0 to 30 self.temperature = 24.0 # Current temperature in C self.pump_manual_mode = False self.manual_target_speed = 0
def up(job, input_file_id_1, input_file_id_2): """Merges the two files and places them in the output. """ with job.fileStore.writeGlobalFileStream() as (fileHandle, output_id): with job.fileStore.readGlobalFileStream(input_file_id_1) as inputFileHandle1: with job.fileStore.readGlobalFileStream(input_file_id_2) as inputFileHandle2: job.fileStore.logToMaster("Merging %s and %s to %s" % (input_file_id_1, input_file_id_2, output_id)) merge(inputFileHandle1, inputFileHandle2, fileHandle) # Cleanup up the input files - these deletes will occur after the completion is successful. job.fileStore.deleteGlobalFile(input_file_id_1) job.fileStore.deleteGlobalFile(input_file_id_2) return output_id
def function[up, parameter[job, input_file_id_1, input_file_id_2]]: constant[Merges the two files and places them in the output. ] with call[name[job].fileStore.writeGlobalFileStream, parameter[]] begin[:] with call[name[job].fileStore.readGlobalFileStream, parameter[name[input_file_id_1]]] begin[:] with call[name[job].fileStore.readGlobalFileStream, parameter[name[input_file_id_2]]] begin[:] call[name[job].fileStore.logToMaster, parameter[binary_operation[constant[Merging %s and %s to %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f813730>, <ast.Name object at 0x7da18f810be0>, <ast.Name object at 0x7da18f810e80>]]]]] call[name[merge], parameter[name[inputFileHandle1], name[inputFileHandle2], name[fileHandle]]] call[name[job].fileStore.deleteGlobalFile, parameter[name[input_file_id_1]]] call[name[job].fileStore.deleteGlobalFile, parameter[name[input_file_id_2]]] return[name[output_id]]
keyword[def] identifier[up] ( identifier[job] , identifier[input_file_id_1] , identifier[input_file_id_2] ): literal[string] keyword[with] identifier[job] . identifier[fileStore] . identifier[writeGlobalFileStream] () keyword[as] ( identifier[fileHandle] , identifier[output_id] ): keyword[with] identifier[job] . identifier[fileStore] . identifier[readGlobalFileStream] ( identifier[input_file_id_1] ) keyword[as] identifier[inputFileHandle1] : keyword[with] identifier[job] . identifier[fileStore] . identifier[readGlobalFileStream] ( identifier[input_file_id_2] ) keyword[as] identifier[inputFileHandle2] : identifier[job] . identifier[fileStore] . identifier[logToMaster] ( literal[string] %( identifier[input_file_id_1] , identifier[input_file_id_2] , identifier[output_id] )) identifier[merge] ( identifier[inputFileHandle1] , identifier[inputFileHandle2] , identifier[fileHandle] ) identifier[job] . identifier[fileStore] . identifier[deleteGlobalFile] ( identifier[input_file_id_1] ) identifier[job] . identifier[fileStore] . identifier[deleteGlobalFile] ( identifier[input_file_id_2] ) keyword[return] identifier[output_id]
def up(job, input_file_id_1, input_file_id_2): """Merges the two files and places them in the output. """ with job.fileStore.writeGlobalFileStream() as (fileHandle, output_id): with job.fileStore.readGlobalFileStream(input_file_id_1) as inputFileHandle1: with job.fileStore.readGlobalFileStream(input_file_id_2) as inputFileHandle2: job.fileStore.logToMaster('Merging %s and %s to %s' % (input_file_id_1, input_file_id_2, output_id)) merge(inputFileHandle1, inputFileHandle2, fileHandle) # depends on [control=['with'], data=['inputFileHandle2']] # depends on [control=['with'], data=['inputFileHandle1']] # Cleanup up the input files - these deletes will occur after the completion is successful. job.fileStore.deleteGlobalFile(input_file_id_1) job.fileStore.deleteGlobalFile(input_file_id_2) return output_id # depends on [control=['with'], data=[]]
def get_stats_monthly(start=None, end=None, **kwargs): """ MOVED to iexfinance.iexdata.get_stats_summary """ import warnings warnings.warn(WNG_MSG % ("get_stats_monthly", "iexdata.get_stats_summary")) return MonthlySummaryReader(start=start, end=end, **kwargs).fetch()
def function[get_stats_monthly, parameter[start, end]]: constant[ MOVED to iexfinance.iexdata.get_stats_summary ] import module[warnings] call[name[warnings].warn, parameter[binary_operation[name[WNG_MSG] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Constant object at 0x7da1b1d8c070>, <ast.Constant object at 0x7da1b1d8d780>]]]]] return[call[call[name[MonthlySummaryReader], parameter[]].fetch, parameter[]]]
keyword[def] identifier[get_stats_monthly] ( identifier[start] = keyword[None] , identifier[end] = keyword[None] ,** identifier[kwargs] ): literal[string] keyword[import] identifier[warnings] identifier[warnings] . identifier[warn] ( identifier[WNG_MSG] %( literal[string] , literal[string] )) keyword[return] identifier[MonthlySummaryReader] ( identifier[start] = identifier[start] , identifier[end] = identifier[end] ,** identifier[kwargs] ). identifier[fetch] ()
def get_stats_monthly(start=None, end=None, **kwargs): """ MOVED to iexfinance.iexdata.get_stats_summary """ import warnings warnings.warn(WNG_MSG % ('get_stats_monthly', 'iexdata.get_stats_summary')) return MonthlySummaryReader(start=start, end=end, **kwargs).fetch()
def LE16(value, min_value=None, max_value=None, fuzzable=True, name=None, full_range=False): '''16-bit field, Little endian encoded''' return UInt16(value, min_value=min_value, max_value=max_value, encoder=ENC_INT_LE, fuzzable=fuzzable, name=name, full_range=full_range)
def function[LE16, parameter[value, min_value, max_value, fuzzable, name, full_range]]: constant[16-bit field, Little endian encoded] return[call[name[UInt16], parameter[name[value]]]]
keyword[def] identifier[LE16] ( identifier[value] , identifier[min_value] = keyword[None] , identifier[max_value] = keyword[None] , identifier[fuzzable] = keyword[True] , identifier[name] = keyword[None] , identifier[full_range] = keyword[False] ): literal[string] keyword[return] identifier[UInt16] ( identifier[value] , identifier[min_value] = identifier[min_value] , identifier[max_value] = identifier[max_value] , identifier[encoder] = identifier[ENC_INT_LE] , identifier[fuzzable] = identifier[fuzzable] , identifier[name] = identifier[name] , identifier[full_range] = identifier[full_range] )
def LE16(value, min_value=None, max_value=None, fuzzable=True, name=None, full_range=False): """16-bit field, Little endian encoded""" return UInt16(value, min_value=min_value, max_value=max_value, encoder=ENC_INT_LE, fuzzable=fuzzable, name=name, full_range=full_range)
def id(self): """ Bug ID number that caused this error """ m = re.match(r'Bug #(\d+) does not exist', self.message) return m.group(1)
def function[id, parameter[self]]: constant[ Bug ID number that caused this error ] variable[m] assign[=] call[name[re].match, parameter[constant[Bug #(\d+) does not exist], name[self].message]] return[call[name[m].group, parameter[constant[1]]]]
keyword[def] identifier[id] ( identifier[self] ): literal[string] identifier[m] = identifier[re] . identifier[match] ( literal[string] , identifier[self] . identifier[message] ) keyword[return] identifier[m] . identifier[group] ( literal[int] )
def id(self): """ Bug ID number that caused this error """ m = re.match('Bug #(\\d+) does not exist', self.message) return m.group(1)
def save_module(self, obj): """ Save a module as an import """ mod_name = obj.__name__ # If module is successfully found then it is not a dynamically created module if hasattr(obj, '__file__'): is_dynamic = False else: try: _find_module(mod_name) is_dynamic = False except ImportError: is_dynamic = True self.modules.add(obj) if is_dynamic: self.save_reduce(dynamic_subimport, (obj.__name__, vars(obj)), obj=obj) else: self.save_reduce(subimport, (obj.__name__,), obj=obj)
def function[save_module, parameter[self, obj]]: constant[ Save a module as an import ] variable[mod_name] assign[=] name[obj].__name__ if call[name[hasattr], parameter[name[obj], constant[__file__]]] begin[:] variable[is_dynamic] assign[=] constant[False] call[name[self].modules.add, parameter[name[obj]]] if name[is_dynamic] begin[:] call[name[self].save_reduce, parameter[name[dynamic_subimport], tuple[[<ast.Attribute object at 0x7da1b1fc9990>, <ast.Call object at 0x7da1b1fc9300>]]]]
keyword[def] identifier[save_module] ( identifier[self] , identifier[obj] ): literal[string] identifier[mod_name] = identifier[obj] . identifier[__name__] keyword[if] identifier[hasattr] ( identifier[obj] , literal[string] ): identifier[is_dynamic] = keyword[False] keyword[else] : keyword[try] : identifier[_find_module] ( identifier[mod_name] ) identifier[is_dynamic] = keyword[False] keyword[except] identifier[ImportError] : identifier[is_dynamic] = keyword[True] identifier[self] . identifier[modules] . identifier[add] ( identifier[obj] ) keyword[if] identifier[is_dynamic] : identifier[self] . identifier[save_reduce] ( identifier[dynamic_subimport] ,( identifier[obj] . identifier[__name__] , identifier[vars] ( identifier[obj] )), identifier[obj] = identifier[obj] ) keyword[else] : identifier[self] . identifier[save_reduce] ( identifier[subimport] ,( identifier[obj] . identifier[__name__] ,), identifier[obj] = identifier[obj] )
def save_module(self, obj): """ Save a module as an import """ mod_name = obj.__name__ # If module is successfully found then it is not a dynamically created module if hasattr(obj, '__file__'): is_dynamic = False # depends on [control=['if'], data=[]] else: try: _find_module(mod_name) is_dynamic = False # depends on [control=['try'], data=[]] except ImportError: is_dynamic = True # depends on [control=['except'], data=[]] self.modules.add(obj) if is_dynamic: self.save_reduce(dynamic_subimport, (obj.__name__, vars(obj)), obj=obj) # depends on [control=['if'], data=[]] else: self.save_reduce(subimport, (obj.__name__,), obj=obj)
def log_env_gte(desired): """ Boolean check if the current environment LOGLEVEL is at least as verbose as a desired LOGLEVEL :param desired: <str> one of 9 keys in <brain.environment.stage> :return: <bool> """ return LOGLEVELS.get(check_log_env()) >= LOGLEVELS.get(desired, LOGLEVELS[TEST])
def function[log_env_gte, parameter[desired]]: constant[ Boolean check if the current environment LOGLEVEL is at least as verbose as a desired LOGLEVEL :param desired: <str> one of 9 keys in <brain.environment.stage> :return: <bool> ] return[compare[call[name[LOGLEVELS].get, parameter[call[name[check_log_env], parameter[]]]] greater_or_equal[>=] call[name[LOGLEVELS].get, parameter[name[desired], call[name[LOGLEVELS]][name[TEST]]]]]]
keyword[def] identifier[log_env_gte] ( identifier[desired] ): literal[string] keyword[return] identifier[LOGLEVELS] . identifier[get] ( identifier[check_log_env] ())>= identifier[LOGLEVELS] . identifier[get] ( identifier[desired] , identifier[LOGLEVELS] [ identifier[TEST] ])
def log_env_gte(desired): """ Boolean check if the current environment LOGLEVEL is at least as verbose as a desired LOGLEVEL :param desired: <str> one of 9 keys in <brain.environment.stage> :return: <bool> """ return LOGLEVELS.get(check_log_env()) >= LOGLEVELS.get(desired, LOGLEVELS[TEST])
def collectintargz(target, source, env): """ Puts all source files into a tar.gz file. """ # the rpm tool depends on a source package, until this is changed # this hack needs to be here that tries to pack all sources in. sources = env.FindSourceFiles() # filter out the target we are building the source list for. sources = [s for s in sources if s not in target] # find the .spec file for rpm and add it since it is not necessarily found # by the FindSourceFiles function. sources.extend( [s for s in source if str(s).rfind('.spec')!=-1] ) # sort to keep sources from changing order across builds sources.sort() # as the source contains the url of the source package this rpm package # is built from, we extract the target name tarball = (str(target[0])+".tar.gz").replace('.rpm', '') try: tarball = env['SOURCE_URL'].split('/')[-1] except KeyError as e: raise SCons.Errors.UserError( "Missing PackageTag '%s' for RPM packager" % e.args[0] ) tarball = src_targz.package(env, source=sources, target=tarball, PACKAGEROOT=env['PACKAGEROOT'], ) return (target, tarball)
def function[collectintargz, parameter[target, source, env]]: constant[ Puts all source files into a tar.gz file. ] variable[sources] assign[=] call[name[env].FindSourceFiles, parameter[]] variable[sources] assign[=] <ast.ListComp object at 0x7da20e954f10> call[name[sources].extend, parameter[<ast.ListComp object at 0x7da20e956770>]] call[name[sources].sort, parameter[]] variable[tarball] assign[=] call[binary_operation[call[name[str], parameter[call[name[target]][constant[0]]]] + constant[.tar.gz]].replace, parameter[constant[.rpm], constant[]]] <ast.Try object at 0x7da2045668f0> variable[tarball] assign[=] call[name[src_targz].package, parameter[name[env]]] return[tuple[[<ast.Name object at 0x7da204564130>, <ast.Name object at 0x7da204564280>]]]
keyword[def] identifier[collectintargz] ( identifier[target] , identifier[source] , identifier[env] ): literal[string] identifier[sources] = identifier[env] . identifier[FindSourceFiles] () identifier[sources] =[ identifier[s] keyword[for] identifier[s] keyword[in] identifier[sources] keyword[if] identifier[s] keyword[not] keyword[in] identifier[target] ] identifier[sources] . identifier[extend] ([ identifier[s] keyword[for] identifier[s] keyword[in] identifier[source] keyword[if] identifier[str] ( identifier[s] ). identifier[rfind] ( literal[string] )!=- literal[int] ]) identifier[sources] . identifier[sort] () identifier[tarball] =( identifier[str] ( identifier[target] [ literal[int] ])+ literal[string] ). identifier[replace] ( literal[string] , literal[string] ) keyword[try] : identifier[tarball] = identifier[env] [ literal[string] ]. identifier[split] ( literal[string] )[- literal[int] ] keyword[except] identifier[KeyError] keyword[as] identifier[e] : keyword[raise] identifier[SCons] . identifier[Errors] . identifier[UserError] ( literal[string] % identifier[e] . identifier[args] [ literal[int] ]) identifier[tarball] = identifier[src_targz] . identifier[package] ( identifier[env] , identifier[source] = identifier[sources] , identifier[target] = identifier[tarball] , identifier[PACKAGEROOT] = identifier[env] [ literal[string] ],) keyword[return] ( identifier[target] , identifier[tarball] )
def collectintargz(target, source, env): """ Puts all source files into a tar.gz file. """ # the rpm tool depends on a source package, until this is changed # this hack needs to be here that tries to pack all sources in. sources = env.FindSourceFiles() # filter out the target we are building the source list for. sources = [s for s in sources if s not in target] # find the .spec file for rpm and add it since it is not necessarily found # by the FindSourceFiles function. sources.extend([s for s in source if str(s).rfind('.spec') != -1]) # sort to keep sources from changing order across builds sources.sort() # as the source contains the url of the source package this rpm package # is built from, we extract the target name tarball = (str(target[0]) + '.tar.gz').replace('.rpm', '') try: tarball = env['SOURCE_URL'].split('/')[-1] # depends on [control=['try'], data=[]] except KeyError as e: raise SCons.Errors.UserError("Missing PackageTag '%s' for RPM packager" % e.args[0]) # depends on [control=['except'], data=['e']] tarball = src_targz.package(env, source=sources, target=tarball, PACKAGEROOT=env['PACKAGEROOT']) return (target, tarball)
def _get_and_set_reconnection_handler(self, new_handler): """ Called by the _ControlReconnectionHandler when a new connection is successfully created. Clears out the _reconnection_handler on this ControlConnection. """ with self._reconnection_lock: old = self._reconnection_handler self._reconnection_handler = new_handler return old
def function[_get_and_set_reconnection_handler, parameter[self, new_handler]]: constant[ Called by the _ControlReconnectionHandler when a new connection is successfully created. Clears out the _reconnection_handler on this ControlConnection. ] with name[self]._reconnection_lock begin[:] variable[old] assign[=] name[self]._reconnection_handler name[self]._reconnection_handler assign[=] name[new_handler] return[name[old]]
keyword[def] identifier[_get_and_set_reconnection_handler] ( identifier[self] , identifier[new_handler] ): literal[string] keyword[with] identifier[self] . identifier[_reconnection_lock] : identifier[old] = identifier[self] . identifier[_reconnection_handler] identifier[self] . identifier[_reconnection_handler] = identifier[new_handler] keyword[return] identifier[old]
def _get_and_set_reconnection_handler(self, new_handler): """ Called by the _ControlReconnectionHandler when a new connection is successfully created. Clears out the _reconnection_handler on this ControlConnection. """ with self._reconnection_lock: old = self._reconnection_handler self._reconnection_handler = new_handler return old # depends on [control=['with'], data=[]]
def add_permission(self, role, name): """ authorize a group for something """ if self.has_permission(role, name): return True targetGroup = AuthGroup.objects(role=role, creator=self.client).first() if not targetGroup: return False # Create or update permission = AuthPermission.objects(name=name).update( add_to_set__groups=[targetGroup], creator=self.client, upsert=True ) return True
def function[add_permission, parameter[self, role, name]]: constant[ authorize a group for something ] if call[name[self].has_permission, parameter[name[role], name[name]]] begin[:] return[constant[True]] variable[targetGroup] assign[=] call[call[name[AuthGroup].objects, parameter[]].first, parameter[]] if <ast.UnaryOp object at 0x7da1b0fde890> begin[:] return[constant[False]] variable[permission] assign[=] call[call[name[AuthPermission].objects, parameter[]].update, parameter[]] return[constant[True]]
keyword[def] identifier[add_permission] ( identifier[self] , identifier[role] , identifier[name] ): literal[string] keyword[if] identifier[self] . identifier[has_permission] ( identifier[role] , identifier[name] ): keyword[return] keyword[True] identifier[targetGroup] = identifier[AuthGroup] . identifier[objects] ( identifier[role] = identifier[role] , identifier[creator] = identifier[self] . identifier[client] ). identifier[first] () keyword[if] keyword[not] identifier[targetGroup] : keyword[return] keyword[False] identifier[permission] = identifier[AuthPermission] . identifier[objects] ( identifier[name] = identifier[name] ). identifier[update] ( identifier[add_to_set__groups] =[ identifier[targetGroup] ], identifier[creator] = identifier[self] . identifier[client] , identifier[upsert] = keyword[True] ) keyword[return] keyword[True]
def add_permission(self, role, name): """ authorize a group for something """ if self.has_permission(role, name): return True # depends on [control=['if'], data=[]] targetGroup = AuthGroup.objects(role=role, creator=self.client).first() if not targetGroup: return False # depends on [control=['if'], data=[]] # Create or update permission = AuthPermission.objects(name=name).update(add_to_set__groups=[targetGroup], creator=self.client, upsert=True) return True
def to_netcdf(data, filename, *, group="posterior", coords=None, dims=None): """Save dataset as a netcdf file. WARNING: Only idempotent in case `data` is InferenceData Parameters ---------- data : InferenceData, or any object accepted by `convert_to_inference_data` Object to be saved filename : str name or path of the file to load trace group : str (optional) In case `data` is not InferenceData, this is the group it will be saved to coords : dict (optional) See `convert_to_inference_data` dims : dict (optional) See `convert_to_inference_data` Returns ------- str filename saved to """ inference_data = convert_to_inference_data(data, group=group, coords=coords, dims=dims) file_name = inference_data.to_netcdf(filename) return file_name
def function[to_netcdf, parameter[data, filename]]: constant[Save dataset as a netcdf file. WARNING: Only idempotent in case `data` is InferenceData Parameters ---------- data : InferenceData, or any object accepted by `convert_to_inference_data` Object to be saved filename : str name or path of the file to load trace group : str (optional) In case `data` is not InferenceData, this is the group it will be saved to coords : dict (optional) See `convert_to_inference_data` dims : dict (optional) See `convert_to_inference_data` Returns ------- str filename saved to ] variable[inference_data] assign[=] call[name[convert_to_inference_data], parameter[name[data]]] variable[file_name] assign[=] call[name[inference_data].to_netcdf, parameter[name[filename]]] return[name[file_name]]
keyword[def] identifier[to_netcdf] ( identifier[data] , identifier[filename] ,*, identifier[group] = literal[string] , identifier[coords] = keyword[None] , identifier[dims] = keyword[None] ): literal[string] identifier[inference_data] = identifier[convert_to_inference_data] ( identifier[data] , identifier[group] = identifier[group] , identifier[coords] = identifier[coords] , identifier[dims] = identifier[dims] ) identifier[file_name] = identifier[inference_data] . identifier[to_netcdf] ( identifier[filename] ) keyword[return] identifier[file_name]
def to_netcdf(data, filename, *, group='posterior', coords=None, dims=None): """Save dataset as a netcdf file. WARNING: Only idempotent in case `data` is InferenceData Parameters ---------- data : InferenceData, or any object accepted by `convert_to_inference_data` Object to be saved filename : str name or path of the file to load trace group : str (optional) In case `data` is not InferenceData, this is the group it will be saved to coords : dict (optional) See `convert_to_inference_data` dims : dict (optional) See `convert_to_inference_data` Returns ------- str filename saved to """ inference_data = convert_to_inference_data(data, group=group, coords=coords, dims=dims) file_name = inference_data.to_netcdf(filename) return file_name
def find_deck_spawns(provider: Provider, prod: bool=True) -> Iterable[str]: '''find deck spawn transactions via Provider, it requires that Deck spawn P2TH were imported in local node or that remote API knows about P2TH address.''' pa_params = param_query(provider.network) if isinstance(provider, RpcNode): if prod: decks = (i["txid"] for i in provider.listtransactions("PAPROD")) else: decks = (i["txid"] for i in provider.listtransactions("PATEST")) if isinstance(provider, Cryptoid) or isinstance(provider, Explorer): if prod: decks = (i for i in provider.listtransactions(pa_params.P2TH_addr)) else: decks = (i for i in provider.listtransactions(pa_params.test_P2TH_addr)) return decks
def function[find_deck_spawns, parameter[provider, prod]]: constant[find deck spawn transactions via Provider, it requires that Deck spawn P2TH were imported in local node or that remote API knows about P2TH address.] variable[pa_params] assign[=] call[name[param_query], parameter[name[provider].network]] if call[name[isinstance], parameter[name[provider], name[RpcNode]]] begin[:] if name[prod] begin[:] variable[decks] assign[=] <ast.GeneratorExp object at 0x7da1b256f2b0> if <ast.BoolOp object at 0x7da1b2490c10> begin[:] if name[prod] begin[:] variable[decks] assign[=] <ast.GeneratorExp object at 0x7da1b2492230> return[name[decks]]
keyword[def] identifier[find_deck_spawns] ( identifier[provider] : identifier[Provider] , identifier[prod] : identifier[bool] = keyword[True] )-> identifier[Iterable] [ identifier[str] ]: literal[string] identifier[pa_params] = identifier[param_query] ( identifier[provider] . identifier[network] ) keyword[if] identifier[isinstance] ( identifier[provider] , identifier[RpcNode] ): keyword[if] identifier[prod] : identifier[decks] =( identifier[i] [ literal[string] ] keyword[for] identifier[i] keyword[in] identifier[provider] . identifier[listtransactions] ( literal[string] )) keyword[else] : identifier[decks] =( identifier[i] [ literal[string] ] keyword[for] identifier[i] keyword[in] identifier[provider] . identifier[listtransactions] ( literal[string] )) keyword[if] identifier[isinstance] ( identifier[provider] , identifier[Cryptoid] ) keyword[or] identifier[isinstance] ( identifier[provider] , identifier[Explorer] ): keyword[if] identifier[prod] : identifier[decks] =( identifier[i] keyword[for] identifier[i] keyword[in] identifier[provider] . identifier[listtransactions] ( identifier[pa_params] . identifier[P2TH_addr] )) keyword[else] : identifier[decks] =( identifier[i] keyword[for] identifier[i] keyword[in] identifier[provider] . identifier[listtransactions] ( identifier[pa_params] . identifier[test_P2TH_addr] )) keyword[return] identifier[decks]
def find_deck_spawns(provider: Provider, prod: bool=True) -> Iterable[str]: """find deck spawn transactions via Provider, it requires that Deck spawn P2TH were imported in local node or that remote API knows about P2TH address.""" pa_params = param_query(provider.network) if isinstance(provider, RpcNode): if prod: decks = (i['txid'] for i in provider.listtransactions('PAPROD')) # depends on [control=['if'], data=[]] else: decks = (i['txid'] for i in provider.listtransactions('PATEST')) # depends on [control=['if'], data=[]] if isinstance(provider, Cryptoid) or isinstance(provider, Explorer): if prod: decks = (i for i in provider.listtransactions(pa_params.P2TH_addr)) # depends on [control=['if'], data=[]] else: decks = (i for i in provider.listtransactions(pa_params.test_P2TH_addr)) # depends on [control=['if'], data=[]] return decks
def new(self, time_flags): # type: (int) -> None ''' Create a new Rock Ridge Time Stamp record. Parameters: time_flags - The flags to use for this time stamp record. Returns: Nothing. ''' if self._initialized: raise pycdlibexception.PyCdlibInternalError('TF record already initialized!') self.time_flags = time_flags tflen = 7 if self.time_flags & (1 << 7): tflen = 17 for index, fieldname in enumerate(self.FIELDNAMES): if self.time_flags & (1 << index): if tflen == 7: setattr(self, fieldname, dates.DirectoryRecordDate()) elif tflen == 17: setattr(self, fieldname, dates.VolumeDescriptorDate()) getattr(self, fieldname).new() self._initialized = True
def function[new, parameter[self, time_flags]]: constant[ Create a new Rock Ridge Time Stamp record. Parameters: time_flags - The flags to use for this time stamp record. Returns: Nothing. ] if name[self]._initialized begin[:] <ast.Raise object at 0x7da1b0f0f3d0> name[self].time_flags assign[=] name[time_flags] variable[tflen] assign[=] constant[7] if binary_operation[name[self].time_flags <ast.BitAnd object at 0x7da2590d6b60> binary_operation[constant[1] <ast.LShift object at 0x7da2590d69e0> constant[7]]] begin[:] variable[tflen] assign[=] constant[17] for taget[tuple[[<ast.Name object at 0x7da1b0f0d060>, <ast.Name object at 0x7da1b0f0c100>]]] in starred[call[name[enumerate], parameter[name[self].FIELDNAMES]]] begin[:] if binary_operation[name[self].time_flags <ast.BitAnd object at 0x7da2590d6b60> binary_operation[constant[1] <ast.LShift object at 0x7da2590d69e0> name[index]]] begin[:] if compare[name[tflen] equal[==] constant[7]] begin[:] call[name[setattr], parameter[name[self], name[fieldname], call[name[dates].DirectoryRecordDate, parameter[]]]] call[call[name[getattr], parameter[name[self], name[fieldname]]].new, parameter[]] name[self]._initialized assign[=] constant[True]
keyword[def] identifier[new] ( identifier[self] , identifier[time_flags] ): literal[string] keyword[if] identifier[self] . identifier[_initialized] : keyword[raise] identifier[pycdlibexception] . identifier[PyCdlibInternalError] ( literal[string] ) identifier[self] . identifier[time_flags] = identifier[time_flags] identifier[tflen] = literal[int] keyword[if] identifier[self] . identifier[time_flags] &( literal[int] << literal[int] ): identifier[tflen] = literal[int] keyword[for] identifier[index] , identifier[fieldname] keyword[in] identifier[enumerate] ( identifier[self] . identifier[FIELDNAMES] ): keyword[if] identifier[self] . identifier[time_flags] &( literal[int] << identifier[index] ): keyword[if] identifier[tflen] == literal[int] : identifier[setattr] ( identifier[self] , identifier[fieldname] , identifier[dates] . identifier[DirectoryRecordDate] ()) keyword[elif] identifier[tflen] == literal[int] : identifier[setattr] ( identifier[self] , identifier[fieldname] , identifier[dates] . identifier[VolumeDescriptorDate] ()) identifier[getattr] ( identifier[self] , identifier[fieldname] ). identifier[new] () identifier[self] . identifier[_initialized] = keyword[True]
def new(self, time_flags): # type: (int) -> None '\n Create a new Rock Ridge Time Stamp record.\n\n Parameters:\n time_flags - The flags to use for this time stamp record.\n Returns:\n Nothing.\n ' if self._initialized: raise pycdlibexception.PyCdlibInternalError('TF record already initialized!') # depends on [control=['if'], data=[]] self.time_flags = time_flags tflen = 7 if self.time_flags & 1 << 7: tflen = 17 # depends on [control=['if'], data=[]] for (index, fieldname) in enumerate(self.FIELDNAMES): if self.time_flags & 1 << index: if tflen == 7: setattr(self, fieldname, dates.DirectoryRecordDate()) # depends on [control=['if'], data=[]] elif tflen == 17: setattr(self, fieldname, dates.VolumeDescriptorDate()) # depends on [control=['if'], data=[]] getattr(self, fieldname).new() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] self._initialized = True
def append(self, item, name=None): """ Adds the given item to the end of the pipeline. """ with self.condition: self.queue.append(item) uuid = self._register_item(name, item) self.condition.notify_all() return uuid
def function[append, parameter[self, item, name]]: constant[ Adds the given item to the end of the pipeline. ] with name[self].condition begin[:] call[name[self].queue.append, parameter[name[item]]] variable[uuid] assign[=] call[name[self]._register_item, parameter[name[name], name[item]]] call[name[self].condition.notify_all, parameter[]] return[name[uuid]]
keyword[def] identifier[append] ( identifier[self] , identifier[item] , identifier[name] = keyword[None] ): literal[string] keyword[with] identifier[self] . identifier[condition] : identifier[self] . identifier[queue] . identifier[append] ( identifier[item] ) identifier[uuid] = identifier[self] . identifier[_register_item] ( identifier[name] , identifier[item] ) identifier[self] . identifier[condition] . identifier[notify_all] () keyword[return] identifier[uuid]
def append(self, item, name=None): """ Adds the given item to the end of the pipeline. """ with self.condition: self.queue.append(item) uuid = self._register_item(name, item) self.condition.notify_all() return uuid # depends on [control=['with'], data=[]]
def export(self): """ Exports as dictionary """ data = {} for key, value in self.items(): data[key] = value return data
def function[export, parameter[self]]: constant[ Exports as dictionary ] variable[data] assign[=] dictionary[[], []] for taget[tuple[[<ast.Name object at 0x7da1b2649270>, <ast.Name object at 0x7da1b2649a50>]]] in starred[call[name[self].items, parameter[]]] begin[:] call[name[data]][name[key]] assign[=] name[value] return[name[data]]
keyword[def] identifier[export] ( identifier[self] ): literal[string] identifier[data] ={} keyword[for] identifier[key] , identifier[value] keyword[in] identifier[self] . identifier[items] (): identifier[data] [ identifier[key] ]= identifier[value] keyword[return] identifier[data]
def export(self): """ Exports as dictionary """ data = {} for (key, value) in self.items(): data[key] = value # depends on [control=['for'], data=[]] return data
def render_mako_template_to( template, outpath, subsd, only_update=False, cwd=None, prev_subsd=None, create_dest_dirs=False, logger=None, pass_warn_string=True, **kwargs): """ template: either string of path or file like obj. Beware of the only_update option, it pays no attention to an updated subsd. pass_warn_string: defult True if True or instance of str: an extra vairable named '_warning_in_the_generated_file_not_to_edit' is passed with a preset (True) or string warning not to directly edit the generated file. """ if cwd: template = os.path.join(cwd, template) outpath = os.path.join(cwd, outpath) outdir = os.path.dirname(outpath) or '.' # avoid '' if not os.path.exists(outdir): if create_dest_dirs: make_dirs(outdir, logger=logger) else: raise FileNotFoundError( "Dest. dir. non-existent: {}".format(outdir)) msg = None if pass_warn_string is True: subsd['_warning_in_the_generated_file_not_to_edit'] = ( "DO NOT EDIT THIS FILE! (Generated from template: {} using" + " Mako python templating engine)" ).format(os.path.basename(template)) elif isinstance(pass_warn_string, str): subsd['_warning_in_the_generated_file_not_to_edit'] =\ pass_warn_string if only_update: if prev_subsd == subsd and not \ missing_or_other_newer(outpath, template): if logger: msg = "Did not re-render {}. (destination newer + same dict)" logger.info(msg.format(template)) return if hasattr(template, 'read'): # set in-file handle to provided template ifh = template else: # Assume template is a string of the path to the template ifh = open(template, 'rt') template_str = ifh.read() kwargs_Template = {'input_encoding': 'utf-8', 'output_encoding': 'utf-8'} kwargs_Template.update(kwargs) with open(outpath, 'wb') as ofh: from mako.template import Template from mako.exceptions import text_error_template try: rendered = Template( template_str, **kwargs_Template).render(**subsd) except: if logger: logger.error(text_error_template().render()) else: print(text_error_template().render()) raise if logger: logger.info("Rendering '{}' to '{}'...".format( ifh.name, outpath)) ofh.write(rendered) return outpath
def function[render_mako_template_to, parameter[template, outpath, subsd, only_update, cwd, prev_subsd, create_dest_dirs, logger, pass_warn_string]]: constant[ template: either string of path or file like obj. Beware of the only_update option, it pays no attention to an updated subsd. pass_warn_string: defult True if True or instance of str: an extra vairable named '_warning_in_the_generated_file_not_to_edit' is passed with a preset (True) or string warning not to directly edit the generated file. ] if name[cwd] begin[:] variable[template] assign[=] call[name[os].path.join, parameter[name[cwd], name[template]]] variable[outpath] assign[=] call[name[os].path.join, parameter[name[cwd], name[outpath]]] variable[outdir] assign[=] <ast.BoolOp object at 0x7da1b085d210> if <ast.UnaryOp object at 0x7da1b085d390> begin[:] if name[create_dest_dirs] begin[:] call[name[make_dirs], parameter[name[outdir]]] variable[msg] assign[=] constant[None] if compare[name[pass_warn_string] is constant[True]] begin[:] call[name[subsd]][constant[_warning_in_the_generated_file_not_to_edit]] assign[=] call[binary_operation[constant[DO NOT EDIT THIS FILE! (Generated from template: {} using] + constant[ Mako python templating engine)]].format, parameter[call[name[os].path.basename, parameter[name[template]]]]] if name[only_update] begin[:] if <ast.BoolOp object at 0x7da1b0831ed0> begin[:] if name[logger] begin[:] variable[msg] assign[=] constant[Did not re-render {}. (destination newer + same dict)] call[name[logger].info, parameter[call[name[msg].format, parameter[name[template]]]]] return[None] if call[name[hasattr], parameter[name[template], constant[read]]] begin[:] variable[ifh] assign[=] name[template] variable[template_str] assign[=] call[name[ifh].read, parameter[]] variable[kwargs_Template] assign[=] dictionary[[<ast.Constant object at 0x7da1b085dd50>, <ast.Constant object at 0x7da1b085dd80>], [<ast.Constant object at 0x7da1b085ddb0>, <ast.Constant object at 0x7da1b085dde0>]] call[name[kwargs_Template].update, parameter[name[kwargs]]] with call[name[open], parameter[name[outpath], constant[wb]]] begin[:] from relative_module[mako.template] import module[Template] from relative_module[mako.exceptions] import module[text_error_template] <ast.Try object at 0x7da1b085ea10> if name[logger] begin[:] call[name[logger].info, parameter[call[constant[Rendering '{}' to '{}'...].format, parameter[name[ifh].name, name[outpath]]]]] call[name[ofh].write, parameter[name[rendered]]] return[name[outpath]]
keyword[def] identifier[render_mako_template_to] ( identifier[template] , identifier[outpath] , identifier[subsd] , identifier[only_update] = keyword[False] , identifier[cwd] = keyword[None] , identifier[prev_subsd] = keyword[None] , identifier[create_dest_dirs] = keyword[False] , identifier[logger] = keyword[None] , identifier[pass_warn_string] = keyword[True] ,** identifier[kwargs] ): literal[string] keyword[if] identifier[cwd] : identifier[template] = identifier[os] . identifier[path] . identifier[join] ( identifier[cwd] , identifier[template] ) identifier[outpath] = identifier[os] . identifier[path] . identifier[join] ( identifier[cwd] , identifier[outpath] ) identifier[outdir] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[outpath] ) keyword[or] literal[string] keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[outdir] ): keyword[if] identifier[create_dest_dirs] : identifier[make_dirs] ( identifier[outdir] , identifier[logger] = identifier[logger] ) keyword[else] : keyword[raise] identifier[FileNotFoundError] ( literal[string] . identifier[format] ( identifier[outdir] )) identifier[msg] = keyword[None] keyword[if] identifier[pass_warn_string] keyword[is] keyword[True] : identifier[subsd] [ literal[string] ]=( literal[string] + literal[string] ). identifier[format] ( identifier[os] . identifier[path] . identifier[basename] ( identifier[template] )) keyword[elif] identifier[isinstance] ( identifier[pass_warn_string] , identifier[str] ): identifier[subsd] [ literal[string] ]= identifier[pass_warn_string] keyword[if] identifier[only_update] : keyword[if] identifier[prev_subsd] == identifier[subsd] keyword[and] keyword[not] identifier[missing_or_other_newer] ( identifier[outpath] , identifier[template] ): keyword[if] identifier[logger] : identifier[msg] = literal[string] identifier[logger] . identifier[info] ( identifier[msg] . identifier[format] ( identifier[template] )) keyword[return] keyword[if] identifier[hasattr] ( identifier[template] , literal[string] ): identifier[ifh] = identifier[template] keyword[else] : identifier[ifh] = identifier[open] ( identifier[template] , literal[string] ) identifier[template_str] = identifier[ifh] . identifier[read] () identifier[kwargs_Template] ={ literal[string] : literal[string] , literal[string] : literal[string] } identifier[kwargs_Template] . identifier[update] ( identifier[kwargs] ) keyword[with] identifier[open] ( identifier[outpath] , literal[string] ) keyword[as] identifier[ofh] : keyword[from] identifier[mako] . identifier[template] keyword[import] identifier[Template] keyword[from] identifier[mako] . identifier[exceptions] keyword[import] identifier[text_error_template] keyword[try] : identifier[rendered] = identifier[Template] ( identifier[template_str] ,** identifier[kwargs_Template] ). identifier[render] (** identifier[subsd] ) keyword[except] : keyword[if] identifier[logger] : identifier[logger] . identifier[error] ( identifier[text_error_template] (). identifier[render] ()) keyword[else] : identifier[print] ( identifier[text_error_template] (). identifier[render] ()) keyword[raise] keyword[if] identifier[logger] : identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[ifh] . identifier[name] , identifier[outpath] )) identifier[ofh] . identifier[write] ( identifier[rendered] ) keyword[return] identifier[outpath]
def render_mako_template_to(template, outpath, subsd, only_update=False, cwd=None, prev_subsd=None, create_dest_dirs=False, logger=None, pass_warn_string=True, **kwargs): """ template: either string of path or file like obj. Beware of the only_update option, it pays no attention to an updated subsd. pass_warn_string: defult True if True or instance of str: an extra vairable named '_warning_in_the_generated_file_not_to_edit' is passed with a preset (True) or string warning not to directly edit the generated file. """ if cwd: template = os.path.join(cwd, template) outpath = os.path.join(cwd, outpath) # depends on [control=['if'], data=[]] outdir = os.path.dirname(outpath) or '.' # avoid '' if not os.path.exists(outdir): if create_dest_dirs: make_dirs(outdir, logger=logger) # depends on [control=['if'], data=[]] else: raise FileNotFoundError('Dest. dir. non-existent: {}'.format(outdir)) # depends on [control=['if'], data=[]] msg = None if pass_warn_string is True: subsd['_warning_in_the_generated_file_not_to_edit'] = ('DO NOT EDIT THIS FILE! (Generated from template: {} using' + ' Mako python templating engine)').format(os.path.basename(template)) # depends on [control=['if'], data=[]] elif isinstance(pass_warn_string, str): subsd['_warning_in_the_generated_file_not_to_edit'] = pass_warn_string # depends on [control=['if'], data=[]] if only_update: if prev_subsd == subsd and (not missing_or_other_newer(outpath, template)): if logger: msg = 'Did not re-render {}. (destination newer + same dict)' logger.info(msg.format(template)) # depends on [control=['if'], data=[]] return # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if hasattr(template, 'read'): # set in-file handle to provided template ifh = template # depends on [control=['if'], data=[]] else: # Assume template is a string of the path to the template ifh = open(template, 'rt') template_str = ifh.read() kwargs_Template = {'input_encoding': 'utf-8', 'output_encoding': 'utf-8'} kwargs_Template.update(kwargs) with open(outpath, 'wb') as ofh: from mako.template import Template from mako.exceptions import text_error_template try: rendered = Template(template_str, **kwargs_Template).render(**subsd) # depends on [control=['try'], data=[]] except: if logger: logger.error(text_error_template().render()) # depends on [control=['if'], data=[]] else: print(text_error_template().render()) raise # depends on [control=['except'], data=[]] if logger: logger.info("Rendering '{}' to '{}'...".format(ifh.name, outpath)) # depends on [control=['if'], data=[]] ofh.write(rendered) # depends on [control=['with'], data=['ofh']] return outpath
def get_empty_ids(self): """ Get documents id with missing targeted field """ cursor = self.get_collection().find( { '_id': {'$in': self._document_ids}, self._field: {'$exists': True} }, {'_id': True} ) return set(self._document_ids) - {doc['_id'] for doc in cursor}
def function[get_empty_ids, parameter[self]]: constant[ Get documents id with missing targeted field ] variable[cursor] assign[=] call[call[name[self].get_collection, parameter[]].find, parameter[dictionary[[<ast.Constant object at 0x7da1b1fa12a0>, <ast.Attribute object at 0x7da18c4cd450>], [<ast.Dict object at 0x7da18c4cfdc0>, <ast.Dict object at 0x7da18c4cfd30>]], dictionary[[<ast.Constant object at 0x7da18c4cd930>], [<ast.Constant object at 0x7da18c4ceb30>]]]] return[binary_operation[call[name[set], parameter[name[self]._document_ids]] - <ast.SetComp object at 0x7da18dc05ff0>]]
keyword[def] identifier[get_empty_ids] ( identifier[self] ): literal[string] identifier[cursor] = identifier[self] . identifier[get_collection] (). identifier[find] ( { literal[string] :{ literal[string] : identifier[self] . identifier[_document_ids] }, identifier[self] . identifier[_field] :{ literal[string] : keyword[True] } }, { literal[string] : keyword[True] } ) keyword[return] identifier[set] ( identifier[self] . identifier[_document_ids] )-{ identifier[doc] [ literal[string] ] keyword[for] identifier[doc] keyword[in] identifier[cursor] }
def get_empty_ids(self): """ Get documents id with missing targeted field """ cursor = self.get_collection().find({'_id': {'$in': self._document_ids}, self._field: {'$exists': True}}, {'_id': True}) return set(self._document_ids) - {doc['_id'] for doc in cursor}
def transform(self, X): """ Args: X: DataFrame with NaN's Returns: Dictionary with one key - 'X' corresponding to given DataFrame but without nan's """ if self.fill_missing: X = self.filler.complete(X) return {'X': X}
def function[transform, parameter[self, X]]: constant[ Args: X: DataFrame with NaN's Returns: Dictionary with one key - 'X' corresponding to given DataFrame but without nan's ] if name[self].fill_missing begin[:] variable[X] assign[=] call[name[self].filler.complete, parameter[name[X]]] return[dictionary[[<ast.Constant object at 0x7da18f813250>], [<ast.Name object at 0x7da18f813520>]]]
keyword[def] identifier[transform] ( identifier[self] , identifier[X] ): literal[string] keyword[if] identifier[self] . identifier[fill_missing] : identifier[X] = identifier[self] . identifier[filler] . identifier[complete] ( identifier[X] ) keyword[return] { literal[string] : identifier[X] }
def transform(self, X): """ Args: X: DataFrame with NaN's Returns: Dictionary with one key - 'X' corresponding to given DataFrame but without nan's """ if self.fill_missing: X = self.filler.complete(X) # depends on [control=['if'], data=[]] return {'X': X}
def service_changed(self, event): """ Called by the framework when a service event occurs """ if ( self._ipopo_instance is None or not self._ipopo_instance.check_event(event) ): # stop() and clean() may have been called after we have been put # inside a listener list copy... # or we've been told to ignore this event return # Call sub-methods kind = event.get_kind() svc_ref = event.get_service_reference() if kind == ServiceEvent.REGISTERED: # Service coming self.on_service_arrival(svc_ref) elif kind in ( ServiceEvent.UNREGISTERING, ServiceEvent.MODIFIED_ENDMATCH, ): # Service gone or not matching anymore self.on_service_departure(svc_ref) elif kind == ServiceEvent.MODIFIED: # Modified properties (can be a new injection) self.on_service_modify(svc_ref, event.get_previous_properties())
def function[service_changed, parameter[self, event]]: constant[ Called by the framework when a service event occurs ] if <ast.BoolOp object at 0x7da1b0390f40> begin[:] return[None] variable[kind] assign[=] call[name[event].get_kind, parameter[]] variable[svc_ref] assign[=] call[name[event].get_service_reference, parameter[]] if compare[name[kind] equal[==] name[ServiceEvent].REGISTERED] begin[:] call[name[self].on_service_arrival, parameter[name[svc_ref]]]
keyword[def] identifier[service_changed] ( identifier[self] , identifier[event] ): literal[string] keyword[if] ( identifier[self] . identifier[_ipopo_instance] keyword[is] keyword[None] keyword[or] keyword[not] identifier[self] . identifier[_ipopo_instance] . identifier[check_event] ( identifier[event] ) ): keyword[return] identifier[kind] = identifier[event] . identifier[get_kind] () identifier[svc_ref] = identifier[event] . identifier[get_service_reference] () keyword[if] identifier[kind] == identifier[ServiceEvent] . identifier[REGISTERED] : identifier[self] . identifier[on_service_arrival] ( identifier[svc_ref] ) keyword[elif] identifier[kind] keyword[in] ( identifier[ServiceEvent] . identifier[UNREGISTERING] , identifier[ServiceEvent] . identifier[MODIFIED_ENDMATCH] , ): identifier[self] . identifier[on_service_departure] ( identifier[svc_ref] ) keyword[elif] identifier[kind] == identifier[ServiceEvent] . identifier[MODIFIED] : identifier[self] . identifier[on_service_modify] ( identifier[svc_ref] , identifier[event] . identifier[get_previous_properties] ())
def service_changed(self, event): """ Called by the framework when a service event occurs """ if self._ipopo_instance is None or not self._ipopo_instance.check_event(event): # stop() and clean() may have been called after we have been put # inside a listener list copy... # or we've been told to ignore this event return # depends on [control=['if'], data=[]] # Call sub-methods kind = event.get_kind() svc_ref = event.get_service_reference() if kind == ServiceEvent.REGISTERED: # Service coming self.on_service_arrival(svc_ref) # depends on [control=['if'], data=[]] elif kind in (ServiceEvent.UNREGISTERING, ServiceEvent.MODIFIED_ENDMATCH): # Service gone or not matching anymore self.on_service_departure(svc_ref) # depends on [control=['if'], data=[]] elif kind == ServiceEvent.MODIFIED: # Modified properties (can be a new injection) self.on_service_modify(svc_ref, event.get_previous_properties()) # depends on [control=['if'], data=[]]
def get_managed_configurations(self): """Get the configurations managed by this scheduler The configuration managed by a scheduler is the self configuration got by the scheduler during the dispatching. :return: a dict of scheduler links with instance_id as key and hash, push_flavor and configuration identifier as values :rtype: dict """ # for scheduler_link in list(self.schedulers.values()): # res[scheduler_link.instance_id] = { # 'hash': scheduler_link.hash, # 'push_flavor': scheduler_link.push_flavor, # 'managed_conf_id': scheduler_link.managed_conf_id # } res = {} if self.sched.pushed_conf and self.cur_conf and 'instance_id' in self.cur_conf: res[self.cur_conf['instance_id']] = { 'hash': self.cur_conf['hash'], 'push_flavor': self.cur_conf['push_flavor'], 'managed_conf_id': self.cur_conf['managed_conf_id'] } logger.debug("Get managed configuration: %s", res) return res
def function[get_managed_configurations, parameter[self]]: constant[Get the configurations managed by this scheduler The configuration managed by a scheduler is the self configuration got by the scheduler during the dispatching. :return: a dict of scheduler links with instance_id as key and hash, push_flavor and configuration identifier as values :rtype: dict ] variable[res] assign[=] dictionary[[], []] if <ast.BoolOp object at 0x7da204345ea0> begin[:] call[name[res]][call[name[self].cur_conf][constant[instance_id]]] assign[=] dictionary[[<ast.Constant object at 0x7da204344430>, <ast.Constant object at 0x7da2043472b0>, <ast.Constant object at 0x7da204344a30>], [<ast.Subscript object at 0x7da2043457b0>, <ast.Subscript object at 0x7da2041d9ea0>, <ast.Subscript object at 0x7da2041d9ae0>]] call[name[logger].debug, parameter[constant[Get managed configuration: %s], name[res]]] return[name[res]]
keyword[def] identifier[get_managed_configurations] ( identifier[self] ): literal[string] identifier[res] ={} keyword[if] identifier[self] . identifier[sched] . identifier[pushed_conf] keyword[and] identifier[self] . identifier[cur_conf] keyword[and] literal[string] keyword[in] identifier[self] . identifier[cur_conf] : identifier[res] [ identifier[self] . identifier[cur_conf] [ literal[string] ]]={ literal[string] : identifier[self] . identifier[cur_conf] [ literal[string] ], literal[string] : identifier[self] . identifier[cur_conf] [ literal[string] ], literal[string] : identifier[self] . identifier[cur_conf] [ literal[string] ] } identifier[logger] . identifier[debug] ( literal[string] , identifier[res] ) keyword[return] identifier[res]
def get_managed_configurations(self): """Get the configurations managed by this scheduler The configuration managed by a scheduler is the self configuration got by the scheduler during the dispatching. :return: a dict of scheduler links with instance_id as key and hash, push_flavor and configuration identifier as values :rtype: dict """ # for scheduler_link in list(self.schedulers.values()): # res[scheduler_link.instance_id] = { # 'hash': scheduler_link.hash, # 'push_flavor': scheduler_link.push_flavor, # 'managed_conf_id': scheduler_link.managed_conf_id # } res = {} if self.sched.pushed_conf and self.cur_conf and ('instance_id' in self.cur_conf): res[self.cur_conf['instance_id']] = {'hash': self.cur_conf['hash'], 'push_flavor': self.cur_conf['push_flavor'], 'managed_conf_id': self.cur_conf['managed_conf_id']} # depends on [control=['if'], data=[]] logger.debug('Get managed configuration: %s', res) return res
def LMLgrad(self,params=None): """ evaluates the gradient of the log marginal likelihood for the given hyperparameters """ if params is not None: self.setParams(params) KV = self._update_cache() W = KV['W'] LMLgrad = SP.zeros(self.covar.n_params) for i in range(self.covar.n_params): Kd = self.covar.Kgrad_param(i) LMLgrad[i] = 0.5 * (W*Kd).sum() return {'covar':LMLgrad}
def function[LMLgrad, parameter[self, params]]: constant[ evaluates the gradient of the log marginal likelihood for the given hyperparameters ] if compare[name[params] is_not constant[None]] begin[:] call[name[self].setParams, parameter[name[params]]] variable[KV] assign[=] call[name[self]._update_cache, parameter[]] variable[W] assign[=] call[name[KV]][constant[W]] variable[LMLgrad] assign[=] call[name[SP].zeros, parameter[name[self].covar.n_params]] for taget[name[i]] in starred[call[name[range], parameter[name[self].covar.n_params]]] begin[:] variable[Kd] assign[=] call[name[self].covar.Kgrad_param, parameter[name[i]]] call[name[LMLgrad]][name[i]] assign[=] binary_operation[constant[0.5] * call[binary_operation[name[W] * name[Kd]].sum, parameter[]]] return[dictionary[[<ast.Constant object at 0x7da18c4cc9d0>], [<ast.Name object at 0x7da18c4cde40>]]]
keyword[def] identifier[LMLgrad] ( identifier[self] , identifier[params] = keyword[None] ): literal[string] keyword[if] identifier[params] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[setParams] ( identifier[params] ) identifier[KV] = identifier[self] . identifier[_update_cache] () identifier[W] = identifier[KV] [ literal[string] ] identifier[LMLgrad] = identifier[SP] . identifier[zeros] ( identifier[self] . identifier[covar] . identifier[n_params] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[self] . identifier[covar] . identifier[n_params] ): identifier[Kd] = identifier[self] . identifier[covar] . identifier[Kgrad_param] ( identifier[i] ) identifier[LMLgrad] [ identifier[i] ]= literal[int] *( identifier[W] * identifier[Kd] ). identifier[sum] () keyword[return] { literal[string] : identifier[LMLgrad] }
def LMLgrad(self, params=None): """ evaluates the gradient of the log marginal likelihood for the given hyperparameters """ if params is not None: self.setParams(params) # depends on [control=['if'], data=['params']] KV = self._update_cache() W = KV['W'] LMLgrad = SP.zeros(self.covar.n_params) for i in range(self.covar.n_params): Kd = self.covar.Kgrad_param(i) LMLgrad[i] = 0.5 * (W * Kd).sum() # depends on [control=['for'], data=['i']] return {'covar': LMLgrad}
def error(name=None, message=''): ''' If name is None Then return empty dict Otherwise raise an exception with __name__ from name, message from message CLI Example: .. code-block:: bash salt-wheel error salt-wheel error.error name="Exception" message="This is an error." ''' ret = {} if name is not None: salt.utils.error.raise_error(name=name, message=message) return ret
def function[error, parameter[name, message]]: constant[ If name is None Then return empty dict Otherwise raise an exception with __name__ from name, message from message CLI Example: .. code-block:: bash salt-wheel error salt-wheel error.error name="Exception" message="This is an error." ] variable[ret] assign[=] dictionary[[], []] if compare[name[name] is_not constant[None]] begin[:] call[name[salt].utils.error.raise_error, parameter[]] return[name[ret]]
keyword[def] identifier[error] ( identifier[name] = keyword[None] , identifier[message] = literal[string] ): literal[string] identifier[ret] ={} keyword[if] identifier[name] keyword[is] keyword[not] keyword[None] : identifier[salt] . identifier[utils] . identifier[error] . identifier[raise_error] ( identifier[name] = identifier[name] , identifier[message] = identifier[message] ) keyword[return] identifier[ret]
def error(name=None, message=''): """ If name is None Then return empty dict Otherwise raise an exception with __name__ from name, message from message CLI Example: .. code-block:: bash salt-wheel error salt-wheel error.error name="Exception" message="This is an error." """ ret = {} if name is not None: salt.utils.error.raise_error(name=name, message=message) # depends on [control=['if'], data=['name']] return ret
def get_connected_components_as_subgraphs(graph): """Finds all connected components of the graph. Returns a list of graph objects, each representing a connected component. Returns an empty list for an empty graph. """ components = get_connected_components(graph) list_of_graphs = [] for c in components: edge_ids = set() nodes = [graph.get_node(node) for node in c] for n in nodes: # --Loop through the edges in each node, to determine if it should be included for e in n['edges']: # --Only add the edge to the subgraph if both ends are in the subgraph edge = graph.get_edge(e) a, b = edge['vertices'] if a in c and b in c: edge_ids.add(e) # --Build the subgraph and add it to the list list_of_edges = list(edge_ids) subgraph = make_subgraph(graph, c, list_of_edges) list_of_graphs.append(subgraph) return list_of_graphs
def function[get_connected_components_as_subgraphs, parameter[graph]]: constant[Finds all connected components of the graph. Returns a list of graph objects, each representing a connected component. Returns an empty list for an empty graph. ] variable[components] assign[=] call[name[get_connected_components], parameter[name[graph]]] variable[list_of_graphs] assign[=] list[[]] for taget[name[c]] in starred[name[components]] begin[:] variable[edge_ids] assign[=] call[name[set], parameter[]] variable[nodes] assign[=] <ast.ListComp object at 0x7da1b282a170> for taget[name[n]] in starred[name[nodes]] begin[:] for taget[name[e]] in starred[call[name[n]][constant[edges]]] begin[:] variable[edge] assign[=] call[name[graph].get_edge, parameter[name[e]]] <ast.Tuple object at 0x7da1b2828af0> assign[=] call[name[edge]][constant[vertices]] if <ast.BoolOp object at 0x7da1b2828bb0> begin[:] call[name[edge_ids].add, parameter[name[e]]] variable[list_of_edges] assign[=] call[name[list], parameter[name[edge_ids]]] variable[subgraph] assign[=] call[name[make_subgraph], parameter[name[graph], name[c], name[list_of_edges]]] call[name[list_of_graphs].append, parameter[name[subgraph]]] return[name[list_of_graphs]]
keyword[def] identifier[get_connected_components_as_subgraphs] ( identifier[graph] ): literal[string] identifier[components] = identifier[get_connected_components] ( identifier[graph] ) identifier[list_of_graphs] =[] keyword[for] identifier[c] keyword[in] identifier[components] : identifier[edge_ids] = identifier[set] () identifier[nodes] =[ identifier[graph] . identifier[get_node] ( identifier[node] ) keyword[for] identifier[node] keyword[in] identifier[c] ] keyword[for] identifier[n] keyword[in] identifier[nodes] : keyword[for] identifier[e] keyword[in] identifier[n] [ literal[string] ]: identifier[edge] = identifier[graph] . identifier[get_edge] ( identifier[e] ) identifier[a] , identifier[b] = identifier[edge] [ literal[string] ] keyword[if] identifier[a] keyword[in] identifier[c] keyword[and] identifier[b] keyword[in] identifier[c] : identifier[edge_ids] . identifier[add] ( identifier[e] ) identifier[list_of_edges] = identifier[list] ( identifier[edge_ids] ) identifier[subgraph] = identifier[make_subgraph] ( identifier[graph] , identifier[c] , identifier[list_of_edges] ) identifier[list_of_graphs] . identifier[append] ( identifier[subgraph] ) keyword[return] identifier[list_of_graphs]
def get_connected_components_as_subgraphs(graph): """Finds all connected components of the graph. Returns a list of graph objects, each representing a connected component. Returns an empty list for an empty graph. """ components = get_connected_components(graph) list_of_graphs = [] for c in components: edge_ids = set() nodes = [graph.get_node(node) for node in c] for n in nodes: # --Loop through the edges in each node, to determine if it should be included for e in n['edges']: # --Only add the edge to the subgraph if both ends are in the subgraph edge = graph.get_edge(e) (a, b) = edge['vertices'] if a in c and b in c: edge_ids.add(e) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['e']] # depends on [control=['for'], data=['n']] # --Build the subgraph and add it to the list list_of_edges = list(edge_ids) subgraph = make_subgraph(graph, c, list_of_edges) list_of_graphs.append(subgraph) # depends on [control=['for'], data=['c']] return list_of_graphs
def attrs(self, attribute_name): """ Retrieve HTML attribute values from the elements matched by the query. Example usage: .. code:: python # Assume that the query matches html elements: # <div class="foo"> and <div class="bar"> >> q.attrs('class') ['foo', 'bar'] Args: attribute_name (str): The name of the attribute values to retrieve. Returns: A list of attribute values for `attribute_name`. """ desc = u'attrs({!r})'.format(attribute_name) return self.map(lambda el: el.get_attribute(attribute_name), desc).results
def function[attrs, parameter[self, attribute_name]]: constant[ Retrieve HTML attribute values from the elements matched by the query. Example usage: .. code:: python # Assume that the query matches html elements: # <div class="foo"> and <div class="bar"> >> q.attrs('class') ['foo', 'bar'] Args: attribute_name (str): The name of the attribute values to retrieve. Returns: A list of attribute values for `attribute_name`. ] variable[desc] assign[=] call[constant[attrs({!r})].format, parameter[name[attribute_name]]] return[call[name[self].map, parameter[<ast.Lambda object at 0x7da18fe93850>, name[desc]]].results]
keyword[def] identifier[attrs] ( identifier[self] , identifier[attribute_name] ): literal[string] identifier[desc] = literal[string] . identifier[format] ( identifier[attribute_name] ) keyword[return] identifier[self] . identifier[map] ( keyword[lambda] identifier[el] : identifier[el] . identifier[get_attribute] ( identifier[attribute_name] ), identifier[desc] ). identifier[results]
def attrs(self, attribute_name): """ Retrieve HTML attribute values from the elements matched by the query. Example usage: .. code:: python # Assume that the query matches html elements: # <div class="foo"> and <div class="bar"> >> q.attrs('class') ['foo', 'bar'] Args: attribute_name (str): The name of the attribute values to retrieve. Returns: A list of attribute values for `attribute_name`. """ desc = u'attrs({!r})'.format(attribute_name) return self.map(lambda el: el.get_attribute(attribute_name), desc).results
def translate(translationAmt): """Create a translation matrix.""" if not isinstance(translationAmt, Vector3): raise ValueError("translationAmt must be a Vector3") ma4 = Matrix4((1, 0, 0, translationAmt.x), (0, 1, 0, translationAmt.y), (0, 0, 1, translationAmt.z), (0, 0, 0, 1)) return ma4
def function[translate, parameter[translationAmt]]: constant[Create a translation matrix.] if <ast.UnaryOp object at 0x7da20c6e4d30> begin[:] <ast.Raise object at 0x7da20c6e4430> variable[ma4] assign[=] call[name[Matrix4], parameter[tuple[[<ast.Constant object at 0x7da20c6e4a00>, <ast.Constant object at 0x7da20c6e4b80>, <ast.Constant object at 0x7da20c6e60b0>, <ast.Attribute object at 0x7da20c6e6a10>]], tuple[[<ast.Constant object at 0x7da20c6e6110>, <ast.Constant object at 0x7da20c6e4d60>, <ast.Constant object at 0x7da20c6e5750>, <ast.Attribute object at 0x7da20c6e6b60>]], tuple[[<ast.Constant object at 0x7da20c6e6650>, <ast.Constant object at 0x7da20c6e6320>, <ast.Constant object at 0x7da20c6e7130>, <ast.Attribute object at 0x7da20c6e7400>]], tuple[[<ast.Constant object at 0x7da20c6e6c50>, <ast.Constant object at 0x7da20c6e6a40>, <ast.Constant object at 0x7da20c6e74c0>, <ast.Constant object at 0x7da20c6e6980>]]]] return[name[ma4]]
keyword[def] identifier[translate] ( identifier[translationAmt] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[translationAmt] , identifier[Vector3] ): keyword[raise] identifier[ValueError] ( literal[string] ) identifier[ma4] = identifier[Matrix4] (( literal[int] , literal[int] , literal[int] , identifier[translationAmt] . identifier[x] ), ( literal[int] , literal[int] , literal[int] , identifier[translationAmt] . identifier[y] ), ( literal[int] , literal[int] , literal[int] , identifier[translationAmt] . identifier[z] ), ( literal[int] , literal[int] , literal[int] , literal[int] )) keyword[return] identifier[ma4]
def translate(translationAmt): """Create a translation matrix.""" if not isinstance(translationAmt, Vector3): raise ValueError('translationAmt must be a Vector3') # depends on [control=['if'], data=[]] ma4 = Matrix4((1, 0, 0, translationAmt.x), (0, 1, 0, translationAmt.y), (0, 0, 1, translationAmt.z), (0, 0, 0, 1)) return ma4
def unaccentuate(s): """ Replace accentuated chars in string by their non accentuated equivalent. """ return "".join(c for c in unicodedata.normalize("NFKD", s) if not unicodedata.combining(c))
def function[unaccentuate, parameter[s]]: constant[ Replace accentuated chars in string by their non accentuated equivalent. ] return[call[constant[].join, parameter[<ast.GeneratorExp object at 0x7da18eb57430>]]]
keyword[def] identifier[unaccentuate] ( identifier[s] ): literal[string] keyword[return] literal[string] . identifier[join] ( identifier[c] keyword[for] identifier[c] keyword[in] identifier[unicodedata] . identifier[normalize] ( literal[string] , identifier[s] ) keyword[if] keyword[not] identifier[unicodedata] . identifier[combining] ( identifier[c] ))
def unaccentuate(s): """ Replace accentuated chars in string by their non accentuated equivalent. """ return ''.join((c for c in unicodedata.normalize('NFKD', s) if not unicodedata.combining(c)))
def parse_args(args=None): """ Handles the parsing of options for LIVVkit's command line interface Args: args: The list of arguments, typically sys.argv[1:] """ parser = argparse.ArgumentParser(description="Main script to run LIVVkit.", formatter_class=argparse.ArgumentDefaultsHelpFormatter, fromfile_prefix_chars='@') parser.add_argument('-o', '--out-dir', default=os.path.join(os.getcwd(), "vv_" + time.strftime("%Y-%m-%d")), help='Location to output the LIVVkit webpages.' ) parser.add_argument('-v', '--verify', nargs=2, default=None, help=' '.join(['Specify the locations of the test and bench bundle to', 'compare (respectively).' ]) ) parser.add_argument('-V', '--validate', action='store', nargs='+', default=None, help=' '.join(['Specify the location of the configuration files for', 'validation tests.' ]) ) # FIXME: this just short-circuits to the validation option, and should become its own module parser.add_argument('-e', '--extension', action='store', nargs='+', default=None, dest='validate', metavar='EXTENSION', help=' '.join(['Specify the location of the configuration files for', 'LIVVkit extensions.' ]) ) parser.add_argument('-p', '--publish', action='store_true', help=' '.join(['Also produce a publication quality copy of the figure in', 'the output directory (eps, 600d pi).' ]) ) parser.add_argument('-s', '--serve', nargs='?', type=int, const=8000, help=' '.join(['Start a simple HTTP server for the output website specified', 'by OUT_DIR on port SERVE.' ]) ) parser.add_argument('--version', action='version', version='LIVVkit {}'.format(livvkit.__version__), help="Show LIVVkit's version number and exit" ) return init(parser.parse_args(args))
def function[parse_args, parameter[args]]: constant[ Handles the parsing of options for LIVVkit's command line interface Args: args: The list of arguments, typically sys.argv[1:] ] variable[parser] assign[=] call[name[argparse].ArgumentParser, parameter[]] call[name[parser].add_argument, parameter[constant[-o], constant[--out-dir]]] call[name[parser].add_argument, parameter[constant[-v], constant[--verify]]] call[name[parser].add_argument, parameter[constant[-V], constant[--validate]]] call[name[parser].add_argument, parameter[constant[-e], constant[--extension]]] call[name[parser].add_argument, parameter[constant[-p], constant[--publish]]] call[name[parser].add_argument, parameter[constant[-s], constant[--serve]]] call[name[parser].add_argument, parameter[constant[--version]]] return[call[name[init], parameter[call[name[parser].parse_args, parameter[name[args]]]]]]
keyword[def] identifier[parse_args] ( identifier[args] = keyword[None] ): literal[string] identifier[parser] = identifier[argparse] . identifier[ArgumentParser] ( identifier[description] = literal[string] , identifier[formatter_class] = identifier[argparse] . identifier[ArgumentDefaultsHelpFormatter] , identifier[fromfile_prefix_chars] = literal[string] ) identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[default] = identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[getcwd] (), literal[string] + identifier[time] . identifier[strftime] ( literal[string] )), identifier[help] = literal[string] ) identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[nargs] = literal[int] , identifier[default] = keyword[None] , identifier[help] = literal[string] . identifier[join] ([ literal[string] , literal[string] ]) ) identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[action] = literal[string] , identifier[nargs] = literal[string] , identifier[default] = keyword[None] , identifier[help] = literal[string] . identifier[join] ([ literal[string] , literal[string] ]) ) identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[action] = literal[string] , identifier[nargs] = literal[string] , identifier[default] = keyword[None] , identifier[dest] = literal[string] , identifier[metavar] = literal[string] , identifier[help] = literal[string] . identifier[join] ([ literal[string] , literal[string] ]) ) identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[action] = literal[string] , identifier[help] = literal[string] . identifier[join] ([ literal[string] , literal[string] ]) ) identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[nargs] = literal[string] , identifier[type] = identifier[int] , identifier[const] = literal[int] , identifier[help] = literal[string] . identifier[join] ([ literal[string] , literal[string] ]) ) identifier[parser] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] , identifier[version] = literal[string] . identifier[format] ( identifier[livvkit] . identifier[__version__] ), identifier[help] = literal[string] ) keyword[return] identifier[init] ( identifier[parser] . identifier[parse_args] ( identifier[args] ))
def parse_args(args=None): """ Handles the parsing of options for LIVVkit's command line interface Args: args: The list of arguments, typically sys.argv[1:] """ parser = argparse.ArgumentParser(description='Main script to run LIVVkit.', formatter_class=argparse.ArgumentDefaultsHelpFormatter, fromfile_prefix_chars='@') parser.add_argument('-o', '--out-dir', default=os.path.join(os.getcwd(), 'vv_' + time.strftime('%Y-%m-%d')), help='Location to output the LIVVkit webpages.') parser.add_argument('-v', '--verify', nargs=2, default=None, help=' '.join(['Specify the locations of the test and bench bundle to', 'compare (respectively).'])) parser.add_argument('-V', '--validate', action='store', nargs='+', default=None, help=' '.join(['Specify the location of the configuration files for', 'validation tests.'])) # FIXME: this just short-circuits to the validation option, and should become its own module parser.add_argument('-e', '--extension', action='store', nargs='+', default=None, dest='validate', metavar='EXTENSION', help=' '.join(['Specify the location of the configuration files for', 'LIVVkit extensions.'])) parser.add_argument('-p', '--publish', action='store_true', help=' '.join(['Also produce a publication quality copy of the figure in', 'the output directory (eps, 600d pi).'])) parser.add_argument('-s', '--serve', nargs='?', type=int, const=8000, help=' '.join(['Start a simple HTTP server for the output website specified', 'by OUT_DIR on port SERVE.'])) parser.add_argument('--version', action='version', version='LIVVkit {}'.format(livvkit.__version__), help="Show LIVVkit's version number and exit") return init(parser.parse_args(args))
def get_world_bbox(self): """Creates a bounding box of the entire world in EPSG: 3857 :return: Bounding box of entire world :rtype: BBox """ return BBox((-self.POP_WEB_MAX, -self.POP_WEB_MAX, self.POP_WEB_MAX, self.POP_WEB_MAX), crs=CRS.POP_WEB)
def function[get_world_bbox, parameter[self]]: constant[Creates a bounding box of the entire world in EPSG: 3857 :return: Bounding box of entire world :rtype: BBox ] return[call[name[BBox], parameter[tuple[[<ast.UnaryOp object at 0x7da18f00e830>, <ast.UnaryOp object at 0x7da18f00d750>, <ast.Attribute object at 0x7da18f00f250>, <ast.Attribute object at 0x7da18f00d990>]]]]]
keyword[def] identifier[get_world_bbox] ( identifier[self] ): literal[string] keyword[return] identifier[BBox] ((- identifier[self] . identifier[POP_WEB_MAX] ,- identifier[self] . identifier[POP_WEB_MAX] , identifier[self] . identifier[POP_WEB_MAX] , identifier[self] . identifier[POP_WEB_MAX] ), identifier[crs] = identifier[CRS] . identifier[POP_WEB] )
def get_world_bbox(self): """Creates a bounding box of the entire world in EPSG: 3857 :return: Bounding box of entire world :rtype: BBox """ return BBox((-self.POP_WEB_MAX, -self.POP_WEB_MAX, self.POP_WEB_MAX, self.POP_WEB_MAX), crs=CRS.POP_WEB)
def ml(line, cell=None): """Implements the datalab cell magic for MLWorkbench operations. Args: line: the contents of the ml command line. Returns: The results of executing the cell. """ parser = google.datalab.utils.commands.CommandParser( prog='%ml', description=textwrap.dedent("""\ Execute MLWorkbench operations Use "%ml <command> -h" for help on a specific command. """)) dataset_parser = parser.subcommand( 'dataset', formatter_class=argparse.RawTextHelpFormatter, help='Create or explore datasets.') dataset_sub_commands = dataset_parser.add_subparsers(dest='command') dataset_create_parser = dataset_sub_commands.add_parser( 'create', help='Create datasets', formatter_class=argparse.RawTextHelpFormatter, epilog=textwrap.dedent("""\ Example usage: %%ml dataset name: mydata format: csv train: path/to/train.csv eval: path/to/eval.csv schema: - name: news_label type: STRING - name: text type: STRING""")) dataset_create_parser.add_argument('--name', required=True, help='the name of the dataset to define. ') dataset_create_parser.add_argument('--format', required=True, choices=['csv', 'bigquery', 'transformed'], help='The format of the data.') dataset_create_parser.add_argument('--train', required=True, help='The path of the training file pattern if format ' + 'is csv or transformed, or table name if format ' + 'is bigquery.') dataset_create_parser.add_argument('--eval', required=True, help='The path of the eval file pattern if format ' + 'is csv or transformed, or table name if format ' + 'is bigquery.') dataset_create_parser.add_cell_argument('schema', help='yaml representation of CSV schema, or path to ' + 'schema file. Only needed if format is csv.') dataset_create_parser.set_defaults(func=_dataset_create) dataset_explore_parser = dataset_sub_commands.add_parser( 'explore', help='Explore training data.') dataset_explore_parser.add_argument('--name', required=True, help='The name of the dataset to explore.') dataset_explore_parser.add_argument('--overview', action='store_true', default=False, help='Plot overview of sampled data. Set "sample_size" ' + 'to change the default sample size.') dataset_explore_parser.add_argument('--facets', action='store_true', default=False, help='Plot facets view of sampled data. Set ' + '"sample_size" to change the default sample size.') dataset_explore_parser.add_argument('--sample_size', type=int, default=1000, help='sample size for overview or facets view. Only ' + 'used if either --overview or --facets is set.') dataset_explore_parser.set_defaults(func=_dataset_explore) analyze_parser = parser.subcommand( 'analyze', formatter_class=argparse.RawTextHelpFormatter, help='Analyze training data and generate stats, such as min/max/mean ' 'for numeric values, vocabulary for text columns.', epilog=textwrap.dedent("""\ Example usage: %%ml analyze [--cloud] output: path/to/dir data: $mydataset features: serialId: transform: key num1: transform: scale value: 1 num2: transform: identity text1: transform: bag_of_words Also supports in-notebook variables, such as: %%ml analyze --output path/to/dir training_data: $my_csv_dataset features: $features_def""")) analyze_parser.add_argument('--output', required=True, help='path of output directory.') analyze_parser.add_argument('--cloud', action='store_true', default=False, help='whether to run analysis in cloud or local.') analyze_parser.add_argument('--package', required=False, help='A local or GCS tarball path to use as the source. ' 'If not set, the default source package will be used.') analyze_parser.add_cell_argument( 'data', required=True, help="""Training data. A dataset defined by "%%ml dataset".""") analyze_parser.add_cell_argument( 'features', required=True, help=textwrap.dedent("""\ features config indicating how to transform data into features. The list of supported transforms: "transform: identity" does nothing (for numerical columns). "transform: scale value: x" scale a numerical column to [-a, a]. If value is missing, x defaults to 1. "transform: one_hot" treats the string column as categorical and makes one-hot encoding of it. "transform: embedding embedding_dim: d" treats the string column as categorical and makes embeddings of it with specified dimension size. "transform: bag_of_words" treats the string column as text and make bag of words transform of it. "transform: tfidf" treats the string column as text and make TFIDF transform of it. "transform: image_to_vec checkpoint: gs://b/o" from image gs url to embeddings. "checkpoint" is a inception v3 checkpoint. If absent, a default checkpoint is used. "transform: target" denotes the column is the target. If the schema type of this column is string, a one_hot encoding is automatically applied. If numerical, an identity transform is automatically applied. "transform: key" column contains metadata-like information and will be output as-is in prediction.""")) analyze_parser.set_defaults(func=_analyze) transform_parser = parser.subcommand( 'transform', formatter_class=argparse.RawTextHelpFormatter, help='Transform the data into tf.example which is more efficient in training.', epilog=textwrap.dedent("""\ Example usage: %%ml transform [--cloud] [--shuffle] analysis: path/to/analysis_output_folder output: path/to/dir batch_size: 100 data: $mydataset cloud: num_workers: 3 worker_machine_type: n1-standard-1 project_id: my_project_id""")) transform_parser.add_argument('--analysis', required=True, help='path of analysis output directory.') transform_parser.add_argument('--output', required=True, help='path of output directory.') transform_parser.add_argument('--cloud', action='store_true', default=False, help='whether to run transform in cloud or local.') transform_parser.add_argument('--shuffle', action='store_true', default=False, help='whether to shuffle the training data in output.') transform_parser.add_argument('--batch_size', type=int, default=100, help='number of instances in a batch to process once. ' 'Larger batch is more efficient but may consume more memory.') transform_parser.add_argument('--package', required=False, help='A local or GCS tarball path to use as the source. ' 'If not set, the default source package will be used.') transform_parser.add_cell_argument( 'data', required=True, help="""Training data. A dataset defined by "%%ml dataset".""") transform_parser.add_cell_argument( 'cloud_config', help=textwrap.dedent("""\ A dictionary of cloud config. All of them are optional. num_workers: Dataflow number of workers. If not set, DataFlow service will determine the number. worker_machine_type: a machine name from https://cloud.google.com/compute/docs/machine-types If not given, the service uses the default machine type. project_id: id of the project to use for DataFlow service. If not set, Datalab's default project (set by %%datalab project set) is used. job_name: Unique name for a Dataflow job to use. If not set, a random name will be used.""")) transform_parser.set_defaults(func=_transform) train_parser = parser.subcommand( 'train', formatter_class=argparse.RawTextHelpFormatter, help='Train a model.', epilog=textwrap.dedent("""\ Example usage: %%ml train [--cloud] analysis: path/to/analysis_output output: path/to/dir data: $mydataset model_args: model: linear_regression cloud_config: region: us-central1""")) train_parser.add_argument('--analysis', required=True, help='path of analysis output directory.') train_parser.add_argument('--output', required=True, help='path of trained model directory.') train_parser.add_argument('--cloud', action='store_true', default=False, help='whether to run training in cloud or local.') train_parser.add_argument('--notb', action='store_true', default=False, help='If set, tensorboard is not automatically started.') train_parser.add_argument('--package', required=False, help='A local or GCS tarball path to use as the source. ' 'If not set, the default source package will be used.') train_parser.add_cell_argument( 'data', required=True, help="""Training data. A dataset defined by "%%ml dataset".""") package_model_help = subprocess.Popen( ['python', '-m', 'trainer.task', '--datalab-help'], cwd=DEFAULT_PACKAGE_PATH, stdout=subprocess.PIPE).communicate()[0] package_model_help = ('model_args: a dictionary of model specific args, including:\n\n' + package_model_help.decode()) train_parser.add_cell_argument('model_args', help=package_model_help) train_parser.add_cell_argument( 'cloud_config', help=textwrap.dedent("""\ A dictionary of cloud training config, including: job_id: the name of the job. If not provided, a default job name is created. region: see {url} runtime_version: see "region". Must be a string like '1.2'. scale_tier: see "region".""".format( url='https://cloud.google.com/sdk/gcloud/reference/ml-engine/jobs/submit/training'))) train_parser.set_defaults(func=_train) predict_parser = parser.subcommand( 'predict', formatter_class=argparse.RawTextHelpFormatter, help='Predict with local or deployed models. (Good for small datasets).', epilog=textwrap.dedent("""\ Example usage: %%ml predict headers: key,num model: path/to/model data: - key1,value1 - key2,value2 Or, in another cell, define a list of dict: my_data = [{'key': 1, 'num': 1.2}, {'key': 2, 'num': 2.8}] Then: %%ml predict headers: key,num model: path/to/model data: $my_data""")) predict_parser.add_argument('--model', required=True, help='The model path.') predict_parser.add_argument('--no_show_image', action='store_true', default=False, help='If not set, add a column of images in output.') predict_parser.add_cell_argument( 'data', required=True, help=textwrap.dedent("""\ Prediction data can be 1) CSV lines in the input cell in yaml format or 2) a local variable which is one of a) list of dict b) list of strings of csv lines c) a Pandas DataFrame""")) predict_parser.set_defaults(func=_predict) batch_predict_parser = parser.subcommand( 'batch_predict', formatter_class=argparse.RawTextHelpFormatter, help='Batch prediction with local or deployed models. (Good for large datasets)', epilog=textwrap.dedent("""\ Example usage: %%ml batch_predict [--cloud] model: path/to/model output: path/to/output format: csv data: csv: path/to/file_pattern""")) batch_predict_parser.add_argument('--model', required=True, help='The model path if not --cloud, or the id in ' 'the form of model.version if --cloud.') batch_predict_parser.add_argument('--output', required=True, help='The path of output directory with prediction results. ' 'If --cloud, it has to be GCS path.') batch_predict_parser.add_argument('--format', help='csv or json. For cloud run, ' 'the only supported format is json.') batch_predict_parser.add_argument('--batch_size', type=int, default=100, help='number of instances in a batch to process once. ' 'Larger batch is more efficient but may consume ' 'more memory. Only used in local run.') batch_predict_parser.add_argument('--cloud', action='store_true', default=False, help='whether to run prediction in cloud or local.') batch_predict_parser.add_cell_argument( 'data', required=True, help='Data to predict with. Only csv is supported.') batch_predict_parser.add_cell_argument( 'cloud_config', help=textwrap.dedent("""\ A dictionary of cloud batch prediction config. job_id: the name of the job. If not provided, a default job name is created. region: see {url} max_worker_count: see reference in "region".""".format( url='https://cloud.google.com/sdk/gcloud/reference/ml-engine/jobs/submit/prediction'))) # noqa batch_predict_parser.set_defaults(func=_batch_predict) explain_parser = parser.subcommand( 'explain', formatter_class=argparse.RawTextHelpFormatter, help='Explain a prediction with LIME tool.') explain_parser.add_argument('--type', default='all', choices=['text', 'image', 'tabular', 'all'], help='the type of column to explain.') explain_parser.add_argument('--algorithm', choices=['lime', 'ig'], default='lime', help='"lime" is the open sourced project for prediction explainer.' + '"ig" means integrated gradients and currently only applies ' + 'to image.') explain_parser.add_argument('--model', required=True, help='path of the model directory used for prediction.') explain_parser.add_argument('--labels', required=True, help='comma separated labels to explain.') explain_parser.add_argument('--column_name', help='the name of the column to explain. Optional if text type ' + 'and there is only one text column, or image type and ' + 'there is only one image column.') explain_parser.add_cell_argument('data', required=True, help='Prediction Data. Can be a csv line, or a dict.') explain_parser.add_cell_argument('training_data', help='A csv or bigquery dataset defined by %%ml dataset. ' + 'Used by tabular explainer only to determine the ' + 'distribution of numeric and categorical values. ' + 'Suggest using original training dataset.') # options specific for lime explain_parser.add_argument('--num_features', type=int, help='number of features to analyze. In text, it is number of ' + 'words. In image, it is number of areas. For lime only.') explain_parser.add_argument('--num_samples', type=int, help='size of the neighborhood to learn the linear model. ' + 'For lime only.') explain_parser.add_argument('--hide_color', type=int, default=0, help='the color to use for perturbed area. If -1, average of ' + 'each channel is used for each channel. For image only.') explain_parser.add_argument('--include_negative', action='store_true', default=False, help='whether to show only positive areas. For lime image only.') explain_parser.add_argument('--overview', action='store_true', default=False, help='whether to show overview instead of details view.' + 'For lime text and tabular only.') explain_parser.add_argument('--batch_size', type=int, default=100, help='size of batches passed to prediction. For lime only.') # options specific for integrated gradients explain_parser.add_argument('--num_gradients', type=int, default=50, help='the number of scaled images to get gradients from. Larger ' + 'number usually produces better results but slower.') explain_parser.add_argument('--percent_show', type=int, default=10, help='the percentage of top impactful pixels to show.') explain_parser.set_defaults(func=_explain) tensorboard_parser = parser.subcommand( 'tensorboard', formatter_class=argparse.RawTextHelpFormatter, help='Start/stop/list TensorBoard instances.') tensorboard_sub_commands = tensorboard_parser.add_subparsers(dest='command') tensorboard_start_parser = tensorboard_sub_commands.add_parser( 'start', help='Start a tensorboard instance.') tensorboard_start_parser.add_argument('--logdir', required=True, help='The local or GCS logdir path.') tensorboard_start_parser.set_defaults(func=_tensorboard_start) tensorboard_stop_parser = tensorboard_sub_commands.add_parser( 'stop', help='Stop a tensorboard instance.') tensorboard_stop_parser.add_argument('--pid', required=True, type=int, help='The pid of the tensorboard instance.') tensorboard_stop_parser.set_defaults(func=_tensorboard_stop) tensorboard_list_parser = tensorboard_sub_commands.add_parser( 'list', help='List tensorboard instances.') tensorboard_list_parser.set_defaults(func=_tensorboard_list) evaluate_parser = parser.subcommand( 'evaluate', formatter_class=argparse.RawTextHelpFormatter, help='Analyze model evaluation results, such as confusion matrix, ROC, RMSE.') evaluate_sub_commands = evaluate_parser.add_subparsers(dest='command') def _add_data_params_for_evaluate(parser): parser.add_argument('--csv', help='csv file path patterns.') parser.add_argument('--headers', help='csv file headers. Required if csv is specified and ' + 'predict_results_schema.json does not exist in the same directory.') parser.add_argument('--bigquery', help='can be bigquery table, query as a string, or ' + 'a pre-defined query (%%bq query --name).') evaluate_cm_parser = evaluate_sub_commands.add_parser( 'confusion_matrix', help='Get confusion matrix from evaluation results.') _add_data_params_for_evaluate(evaluate_cm_parser) evaluate_cm_parser.add_argument('--plot', action='store_true', default=False, help='Whether to plot confusion matrix as graph.') evaluate_cm_parser.add_argument('--size', type=int, default=10, help='The size of the confusion matrix.') evaluate_cm_parser.set_defaults(func=_evaluate_cm) evaluate_accuracy_parser = evaluate_sub_commands.add_parser( 'accuracy', help='Get accuracy results from classification evaluation results.') _add_data_params_for_evaluate(evaluate_accuracy_parser) evaluate_accuracy_parser.set_defaults(func=_evaluate_accuracy) evaluate_pr_parser = evaluate_sub_commands.add_parser( 'precision_recall', help='Get precision recall metrics from evaluation results.') _add_data_params_for_evaluate(evaluate_pr_parser) evaluate_pr_parser.add_argument('--plot', action='store_true', default=False, help='Whether to plot precision recall as graph.') evaluate_pr_parser.add_argument('--num_thresholds', type=int, default=20, help='Number of thresholds which determines how many ' + 'points in the graph.') evaluate_pr_parser.add_argument('--target_class', required=True, help='The target class to determine correctness of ' + 'a prediction.') evaluate_pr_parser.add_argument('--probability_column', help='The name of the column holding the probability ' + 'value of the target class. If absent, the value ' + 'of target class is used.') evaluate_pr_parser.set_defaults(func=_evaluate_pr) evaluate_roc_parser = evaluate_sub_commands.add_parser( 'roc', help='Get ROC metrics from evaluation results.') _add_data_params_for_evaluate(evaluate_roc_parser) evaluate_roc_parser.add_argument('--plot', action='store_true', default=False, help='Whether to plot ROC as graph.') evaluate_roc_parser.add_argument('--num_thresholds', type=int, default=20, help='Number of thresholds which determines how many ' + 'points in the graph.') evaluate_roc_parser.add_argument('--target_class', required=True, help='The target class to determine correctness of ' + 'a prediction.') evaluate_roc_parser.add_argument('--probability_column', help='The name of the column holding the probability ' + 'value of the target class. If absent, the value ' + 'of target class is used.') evaluate_roc_parser.set_defaults(func=_evaluate_roc) evaluate_regression_parser = evaluate_sub_commands.add_parser( 'regression', help='Get regression metrics from evaluation results.') _add_data_params_for_evaluate(evaluate_regression_parser) evaluate_regression_parser.set_defaults(func=_evaluate_regression) model_parser = parser.subcommand( 'model', help='Models and versions management such as deployment, deletion, listing.') model_sub_commands = model_parser.add_subparsers(dest='command') model_list_parser = model_sub_commands.add_parser( 'list', help='List models and versions.') model_list_parser.add_argument('--name', help='If absent, list all models of specified or current ' + 'project. If provided, list all versions of the ' + 'model.') model_list_parser.add_argument('--project', help='The project to list model(s) or version(s). If absent, ' + 'use Datalab\'s default project.') model_list_parser.set_defaults(func=_model_list) model_delete_parser = model_sub_commands.add_parser( 'delete', help='Delete models or versions.') model_delete_parser.add_argument('--name', required=True, help='If no "." in the name, try deleting the specified ' + 'model. If "model.version" is provided, try deleting ' + 'the specified version.') model_delete_parser.add_argument('--project', help='The project to delete model or version. If absent, ' + 'use Datalab\'s default project.') model_delete_parser.set_defaults(func=_model_delete) model_deploy_parser = model_sub_commands.add_parser( 'deploy', help='Deploy a model version.') model_deploy_parser.add_argument('--name', required=True, help='Must be model.version to indicate the model ' + 'and version name to deploy.') model_deploy_parser.add_argument('--path', required=True, help='The GCS path of the model to be deployed.') model_deploy_parser.add_argument('--runtime_version', help='The TensorFlow version to use for this model. ' + 'For example, "1.2.1". If absent, the current ' + 'TensorFlow version installed in Datalab will be used.') model_deploy_parser.add_argument('--project', help='The project to deploy a model version. If absent, ' + 'use Datalab\'s default project.') model_deploy_parser.set_defaults(func=_model_deploy) return google.datalab.utils.commands.handle_magic_line(line, cell, parser)
def function[ml, parameter[line, cell]]: constant[Implements the datalab cell magic for MLWorkbench operations. Args: line: the contents of the ml command line. Returns: The results of executing the cell. ] variable[parser] assign[=] call[name[google].datalab.utils.commands.CommandParser, parameter[]] variable[dataset_parser] assign[=] call[name[parser].subcommand, parameter[constant[dataset]]] variable[dataset_sub_commands] assign[=] call[name[dataset_parser].add_subparsers, parameter[]] variable[dataset_create_parser] assign[=] call[name[dataset_sub_commands].add_parser, parameter[constant[create]]] call[name[dataset_create_parser].add_argument, parameter[constant[--name]]] call[name[dataset_create_parser].add_argument, parameter[constant[--format]]] call[name[dataset_create_parser].add_argument, parameter[constant[--train]]] call[name[dataset_create_parser].add_argument, parameter[constant[--eval]]] call[name[dataset_create_parser].add_cell_argument, parameter[constant[schema]]] call[name[dataset_create_parser].set_defaults, parameter[]] variable[dataset_explore_parser] assign[=] call[name[dataset_sub_commands].add_parser, parameter[constant[explore]]] call[name[dataset_explore_parser].add_argument, parameter[constant[--name]]] call[name[dataset_explore_parser].add_argument, parameter[constant[--overview]]] call[name[dataset_explore_parser].add_argument, parameter[constant[--facets]]] call[name[dataset_explore_parser].add_argument, parameter[constant[--sample_size]]] call[name[dataset_explore_parser].set_defaults, parameter[]] variable[analyze_parser] assign[=] call[name[parser].subcommand, parameter[constant[analyze]]] call[name[analyze_parser].add_argument, parameter[constant[--output]]] call[name[analyze_parser].add_argument, parameter[constant[--cloud]]] call[name[analyze_parser].add_argument, parameter[constant[--package]]] call[name[analyze_parser].add_cell_argument, parameter[constant[data]]] call[name[analyze_parser].add_cell_argument, parameter[constant[features]]] call[name[analyze_parser].set_defaults, parameter[]] variable[transform_parser] assign[=] call[name[parser].subcommand, parameter[constant[transform]]] call[name[transform_parser].add_argument, parameter[constant[--analysis]]] call[name[transform_parser].add_argument, parameter[constant[--output]]] call[name[transform_parser].add_argument, parameter[constant[--cloud]]] call[name[transform_parser].add_argument, parameter[constant[--shuffle]]] call[name[transform_parser].add_argument, parameter[constant[--batch_size]]] call[name[transform_parser].add_argument, parameter[constant[--package]]] call[name[transform_parser].add_cell_argument, parameter[constant[data]]] call[name[transform_parser].add_cell_argument, parameter[constant[cloud_config]]] call[name[transform_parser].set_defaults, parameter[]] variable[train_parser] assign[=] call[name[parser].subcommand, parameter[constant[train]]] call[name[train_parser].add_argument, parameter[constant[--analysis]]] call[name[train_parser].add_argument, parameter[constant[--output]]] call[name[train_parser].add_argument, parameter[constant[--cloud]]] call[name[train_parser].add_argument, parameter[constant[--notb]]] call[name[train_parser].add_argument, parameter[constant[--package]]] call[name[train_parser].add_cell_argument, parameter[constant[data]]] variable[package_model_help] assign[=] call[call[call[name[subprocess].Popen, parameter[list[[<ast.Constant object at 0x7da18ede7310>, <ast.Constant object at 0x7da18ede4040>, <ast.Constant object at 0x7da18ede7880>, <ast.Constant object at 0x7da18ede5c30>]]]].communicate, parameter[]]][constant[0]] variable[package_model_help] assign[=] binary_operation[constant[model_args: a dictionary of model specific args, including: ] + call[name[package_model_help].decode, parameter[]]] call[name[train_parser].add_cell_argument, parameter[constant[model_args]]] call[name[train_parser].add_cell_argument, parameter[constant[cloud_config]]] call[name[train_parser].set_defaults, parameter[]] variable[predict_parser] assign[=] call[name[parser].subcommand, parameter[constant[predict]]] call[name[predict_parser].add_argument, parameter[constant[--model]]] call[name[predict_parser].add_argument, parameter[constant[--no_show_image]]] call[name[predict_parser].add_cell_argument, parameter[constant[data]]] call[name[predict_parser].set_defaults, parameter[]] variable[batch_predict_parser] assign[=] call[name[parser].subcommand, parameter[constant[batch_predict]]] call[name[batch_predict_parser].add_argument, parameter[constant[--model]]] call[name[batch_predict_parser].add_argument, parameter[constant[--output]]] call[name[batch_predict_parser].add_argument, parameter[constant[--format]]] call[name[batch_predict_parser].add_argument, parameter[constant[--batch_size]]] call[name[batch_predict_parser].add_argument, parameter[constant[--cloud]]] call[name[batch_predict_parser].add_cell_argument, parameter[constant[data]]] call[name[batch_predict_parser].add_cell_argument, parameter[constant[cloud_config]]] call[name[batch_predict_parser].set_defaults, parameter[]] variable[explain_parser] assign[=] call[name[parser].subcommand, parameter[constant[explain]]] call[name[explain_parser].add_argument, parameter[constant[--type]]] call[name[explain_parser].add_argument, parameter[constant[--algorithm]]] call[name[explain_parser].add_argument, parameter[constant[--model]]] call[name[explain_parser].add_argument, parameter[constant[--labels]]] call[name[explain_parser].add_argument, parameter[constant[--column_name]]] call[name[explain_parser].add_cell_argument, parameter[constant[data]]] call[name[explain_parser].add_cell_argument, parameter[constant[training_data]]] call[name[explain_parser].add_argument, parameter[constant[--num_features]]] call[name[explain_parser].add_argument, parameter[constant[--num_samples]]] call[name[explain_parser].add_argument, parameter[constant[--hide_color]]] call[name[explain_parser].add_argument, parameter[constant[--include_negative]]] call[name[explain_parser].add_argument, parameter[constant[--overview]]] call[name[explain_parser].add_argument, parameter[constant[--batch_size]]] call[name[explain_parser].add_argument, parameter[constant[--num_gradients]]] call[name[explain_parser].add_argument, parameter[constant[--percent_show]]] call[name[explain_parser].set_defaults, parameter[]] variable[tensorboard_parser] assign[=] call[name[parser].subcommand, parameter[constant[tensorboard]]] variable[tensorboard_sub_commands] assign[=] call[name[tensorboard_parser].add_subparsers, parameter[]] variable[tensorboard_start_parser] assign[=] call[name[tensorboard_sub_commands].add_parser, parameter[constant[start]]] call[name[tensorboard_start_parser].add_argument, parameter[constant[--logdir]]] call[name[tensorboard_start_parser].set_defaults, parameter[]] variable[tensorboard_stop_parser] assign[=] call[name[tensorboard_sub_commands].add_parser, parameter[constant[stop]]] call[name[tensorboard_stop_parser].add_argument, parameter[constant[--pid]]] call[name[tensorboard_stop_parser].set_defaults, parameter[]] variable[tensorboard_list_parser] assign[=] call[name[tensorboard_sub_commands].add_parser, parameter[constant[list]]] call[name[tensorboard_list_parser].set_defaults, parameter[]] variable[evaluate_parser] assign[=] call[name[parser].subcommand, parameter[constant[evaluate]]] variable[evaluate_sub_commands] assign[=] call[name[evaluate_parser].add_subparsers, parameter[]] def function[_add_data_params_for_evaluate, parameter[parser]]: call[name[parser].add_argument, parameter[constant[--csv]]] call[name[parser].add_argument, parameter[constant[--headers]]] call[name[parser].add_argument, parameter[constant[--bigquery]]] variable[evaluate_cm_parser] assign[=] call[name[evaluate_sub_commands].add_parser, parameter[constant[confusion_matrix]]] call[name[_add_data_params_for_evaluate], parameter[name[evaluate_cm_parser]]] call[name[evaluate_cm_parser].add_argument, parameter[constant[--plot]]] call[name[evaluate_cm_parser].add_argument, parameter[constant[--size]]] call[name[evaluate_cm_parser].set_defaults, parameter[]] variable[evaluate_accuracy_parser] assign[=] call[name[evaluate_sub_commands].add_parser, parameter[constant[accuracy]]] call[name[_add_data_params_for_evaluate], parameter[name[evaluate_accuracy_parser]]] call[name[evaluate_accuracy_parser].set_defaults, parameter[]] variable[evaluate_pr_parser] assign[=] call[name[evaluate_sub_commands].add_parser, parameter[constant[precision_recall]]] call[name[_add_data_params_for_evaluate], parameter[name[evaluate_pr_parser]]] call[name[evaluate_pr_parser].add_argument, parameter[constant[--plot]]] call[name[evaluate_pr_parser].add_argument, parameter[constant[--num_thresholds]]] call[name[evaluate_pr_parser].add_argument, parameter[constant[--target_class]]] call[name[evaluate_pr_parser].add_argument, parameter[constant[--probability_column]]] call[name[evaluate_pr_parser].set_defaults, parameter[]] variable[evaluate_roc_parser] assign[=] call[name[evaluate_sub_commands].add_parser, parameter[constant[roc]]] call[name[_add_data_params_for_evaluate], parameter[name[evaluate_roc_parser]]] call[name[evaluate_roc_parser].add_argument, parameter[constant[--plot]]] call[name[evaluate_roc_parser].add_argument, parameter[constant[--num_thresholds]]] call[name[evaluate_roc_parser].add_argument, parameter[constant[--target_class]]] call[name[evaluate_roc_parser].add_argument, parameter[constant[--probability_column]]] call[name[evaluate_roc_parser].set_defaults, parameter[]] variable[evaluate_regression_parser] assign[=] call[name[evaluate_sub_commands].add_parser, parameter[constant[regression]]] call[name[_add_data_params_for_evaluate], parameter[name[evaluate_regression_parser]]] call[name[evaluate_regression_parser].set_defaults, parameter[]] variable[model_parser] assign[=] call[name[parser].subcommand, parameter[constant[model]]] variable[model_sub_commands] assign[=] call[name[model_parser].add_subparsers, parameter[]] variable[model_list_parser] assign[=] call[name[model_sub_commands].add_parser, parameter[constant[list]]] call[name[model_list_parser].add_argument, parameter[constant[--name]]] call[name[model_list_parser].add_argument, parameter[constant[--project]]] call[name[model_list_parser].set_defaults, parameter[]] variable[model_delete_parser] assign[=] call[name[model_sub_commands].add_parser, parameter[constant[delete]]] call[name[model_delete_parser].add_argument, parameter[constant[--name]]] call[name[model_delete_parser].add_argument, parameter[constant[--project]]] call[name[model_delete_parser].set_defaults, parameter[]] variable[model_deploy_parser] assign[=] call[name[model_sub_commands].add_parser, parameter[constant[deploy]]] call[name[model_deploy_parser].add_argument, parameter[constant[--name]]] call[name[model_deploy_parser].add_argument, parameter[constant[--path]]] call[name[model_deploy_parser].add_argument, parameter[constant[--runtime_version]]] call[name[model_deploy_parser].add_argument, parameter[constant[--project]]] call[name[model_deploy_parser].set_defaults, parameter[]] return[call[name[google].datalab.utils.commands.handle_magic_line, parameter[name[line], name[cell], name[parser]]]]
keyword[def] identifier[ml] ( identifier[line] , identifier[cell] = keyword[None] ): literal[string] identifier[parser] = identifier[google] . identifier[datalab] . identifier[utils] . identifier[commands] . identifier[CommandParser] ( identifier[prog] = literal[string] , identifier[description] = identifier[textwrap] . identifier[dedent] ( literal[string] )) identifier[dataset_parser] = identifier[parser] . identifier[subcommand] ( literal[string] , identifier[formatter_class] = identifier[argparse] . identifier[RawTextHelpFormatter] , identifier[help] = literal[string] ) identifier[dataset_sub_commands] = identifier[dataset_parser] . identifier[add_subparsers] ( identifier[dest] = literal[string] ) identifier[dataset_create_parser] = identifier[dataset_sub_commands] . identifier[add_parser] ( literal[string] , identifier[help] = literal[string] , identifier[formatter_class] = identifier[argparse] . identifier[RawTextHelpFormatter] , identifier[epilog] = identifier[textwrap] . identifier[dedent] ( literal[string] )) identifier[dataset_create_parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] , identifier[help] = literal[string] ) identifier[dataset_create_parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] , identifier[choices] =[ literal[string] , literal[string] , literal[string] ], identifier[help] = literal[string] ) identifier[dataset_create_parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] , identifier[help] = literal[string] + literal[string] + literal[string] ) identifier[dataset_create_parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] , identifier[help] = literal[string] + literal[string] + literal[string] ) identifier[dataset_create_parser] . identifier[add_cell_argument] ( literal[string] , identifier[help] = literal[string] + literal[string] ) identifier[dataset_create_parser] . identifier[set_defaults] ( identifier[func] = identifier[_dataset_create] ) identifier[dataset_explore_parser] = identifier[dataset_sub_commands] . identifier[add_parser] ( literal[string] , identifier[help] = literal[string] ) identifier[dataset_explore_parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] , identifier[help] = literal[string] ) identifier[dataset_explore_parser] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] , identifier[default] = keyword[False] , identifier[help] = literal[string] + literal[string] ) identifier[dataset_explore_parser] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] , identifier[default] = keyword[False] , identifier[help] = literal[string] + literal[string] ) identifier[dataset_explore_parser] . identifier[add_argument] ( literal[string] , identifier[type] = identifier[int] , identifier[default] = literal[int] , identifier[help] = literal[string] + literal[string] ) identifier[dataset_explore_parser] . identifier[set_defaults] ( identifier[func] = identifier[_dataset_explore] ) identifier[analyze_parser] = identifier[parser] . identifier[subcommand] ( literal[string] , identifier[formatter_class] = identifier[argparse] . identifier[RawTextHelpFormatter] , identifier[help] = literal[string] literal[string] , identifier[epilog] = identifier[textwrap] . identifier[dedent] ( literal[string] )) identifier[analyze_parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] , identifier[help] = literal[string] ) identifier[analyze_parser] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] , identifier[default] = keyword[False] , identifier[help] = literal[string] ) identifier[analyze_parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[False] , identifier[help] = literal[string] literal[string] ) identifier[analyze_parser] . identifier[add_cell_argument] ( literal[string] , identifier[required] = keyword[True] , identifier[help] = literal[string] ) identifier[analyze_parser] . identifier[add_cell_argument] ( literal[string] , identifier[required] = keyword[True] , identifier[help] = identifier[textwrap] . identifier[dedent] ( literal[string] )) identifier[analyze_parser] . identifier[set_defaults] ( identifier[func] = identifier[_analyze] ) identifier[transform_parser] = identifier[parser] . identifier[subcommand] ( literal[string] , identifier[formatter_class] = identifier[argparse] . identifier[RawTextHelpFormatter] , identifier[help] = literal[string] , identifier[epilog] = identifier[textwrap] . identifier[dedent] ( literal[string] )) identifier[transform_parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] , identifier[help] = literal[string] ) identifier[transform_parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] , identifier[help] = literal[string] ) identifier[transform_parser] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] , identifier[default] = keyword[False] , identifier[help] = literal[string] ) identifier[transform_parser] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] , identifier[default] = keyword[False] , identifier[help] = literal[string] ) identifier[transform_parser] . identifier[add_argument] ( literal[string] , identifier[type] = identifier[int] , identifier[default] = literal[int] , identifier[help] = literal[string] literal[string] ) identifier[transform_parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[False] , identifier[help] = literal[string] literal[string] ) identifier[transform_parser] . identifier[add_cell_argument] ( literal[string] , identifier[required] = keyword[True] , identifier[help] = literal[string] ) identifier[transform_parser] . identifier[add_cell_argument] ( literal[string] , identifier[help] = identifier[textwrap] . identifier[dedent] ( literal[string] )) identifier[transform_parser] . identifier[set_defaults] ( identifier[func] = identifier[_transform] ) identifier[train_parser] = identifier[parser] . identifier[subcommand] ( literal[string] , identifier[formatter_class] = identifier[argparse] . identifier[RawTextHelpFormatter] , identifier[help] = literal[string] , identifier[epilog] = identifier[textwrap] . identifier[dedent] ( literal[string] )) identifier[train_parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] , identifier[help] = literal[string] ) identifier[train_parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] , identifier[help] = literal[string] ) identifier[train_parser] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] , identifier[default] = keyword[False] , identifier[help] = literal[string] ) identifier[train_parser] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] , identifier[default] = keyword[False] , identifier[help] = literal[string] ) identifier[train_parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[False] , identifier[help] = literal[string] literal[string] ) identifier[train_parser] . identifier[add_cell_argument] ( literal[string] , identifier[required] = keyword[True] , identifier[help] = literal[string] ) identifier[package_model_help] = identifier[subprocess] . identifier[Popen] ( [ literal[string] , literal[string] , literal[string] , literal[string] ], identifier[cwd] = identifier[DEFAULT_PACKAGE_PATH] , identifier[stdout] = identifier[subprocess] . identifier[PIPE] ). identifier[communicate] ()[ literal[int] ] identifier[package_model_help] =( literal[string] + identifier[package_model_help] . identifier[decode] ()) identifier[train_parser] . identifier[add_cell_argument] ( literal[string] , identifier[help] = identifier[package_model_help] ) identifier[train_parser] . identifier[add_cell_argument] ( literal[string] , identifier[help] = identifier[textwrap] . identifier[dedent] ( literal[string] . identifier[format] ( identifier[url] = literal[string] ))) identifier[train_parser] . identifier[set_defaults] ( identifier[func] = identifier[_train] ) identifier[predict_parser] = identifier[parser] . identifier[subcommand] ( literal[string] , identifier[formatter_class] = identifier[argparse] . identifier[RawTextHelpFormatter] , identifier[help] = literal[string] , identifier[epilog] = identifier[textwrap] . identifier[dedent] ( literal[string] )) identifier[predict_parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] , identifier[help] = literal[string] ) identifier[predict_parser] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] , identifier[default] = keyword[False] , identifier[help] = literal[string] ) identifier[predict_parser] . identifier[add_cell_argument] ( literal[string] , identifier[required] = keyword[True] , identifier[help] = identifier[textwrap] . identifier[dedent] ( literal[string] )) identifier[predict_parser] . identifier[set_defaults] ( identifier[func] = identifier[_predict] ) identifier[batch_predict_parser] = identifier[parser] . identifier[subcommand] ( literal[string] , identifier[formatter_class] = identifier[argparse] . identifier[RawTextHelpFormatter] , identifier[help] = literal[string] , identifier[epilog] = identifier[textwrap] . identifier[dedent] ( literal[string] )) identifier[batch_predict_parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] , identifier[help] = literal[string] literal[string] ) identifier[batch_predict_parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] , identifier[help] = literal[string] literal[string] ) identifier[batch_predict_parser] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] literal[string] ) identifier[batch_predict_parser] . identifier[add_argument] ( literal[string] , identifier[type] = identifier[int] , identifier[default] = literal[int] , identifier[help] = literal[string] literal[string] literal[string] ) identifier[batch_predict_parser] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] , identifier[default] = keyword[False] , identifier[help] = literal[string] ) identifier[batch_predict_parser] . identifier[add_cell_argument] ( literal[string] , identifier[required] = keyword[True] , identifier[help] = literal[string] ) identifier[batch_predict_parser] . identifier[add_cell_argument] ( literal[string] , identifier[help] = identifier[textwrap] . identifier[dedent] ( literal[string] . identifier[format] ( identifier[url] = literal[string] ))) identifier[batch_predict_parser] . identifier[set_defaults] ( identifier[func] = identifier[_batch_predict] ) identifier[explain_parser] = identifier[parser] . identifier[subcommand] ( literal[string] , identifier[formatter_class] = identifier[argparse] . identifier[RawTextHelpFormatter] , identifier[help] = literal[string] ) identifier[explain_parser] . identifier[add_argument] ( literal[string] , identifier[default] = literal[string] , identifier[choices] =[ literal[string] , literal[string] , literal[string] , literal[string] ], identifier[help] = literal[string] ) identifier[explain_parser] . identifier[add_argument] ( literal[string] , identifier[choices] =[ literal[string] , literal[string] ], identifier[default] = literal[string] , identifier[help] = literal[string] + literal[string] + literal[string] ) identifier[explain_parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] , identifier[help] = literal[string] ) identifier[explain_parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] , identifier[help] = literal[string] ) identifier[explain_parser] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] + literal[string] + literal[string] ) identifier[explain_parser] . identifier[add_cell_argument] ( literal[string] , identifier[required] = keyword[True] , identifier[help] = literal[string] ) identifier[explain_parser] . identifier[add_cell_argument] ( literal[string] , identifier[help] = literal[string] + literal[string] + literal[string] + literal[string] ) identifier[explain_parser] . identifier[add_argument] ( literal[string] , identifier[type] = identifier[int] , identifier[help] = literal[string] + literal[string] ) identifier[explain_parser] . identifier[add_argument] ( literal[string] , identifier[type] = identifier[int] , identifier[help] = literal[string] + literal[string] ) identifier[explain_parser] . identifier[add_argument] ( literal[string] , identifier[type] = identifier[int] , identifier[default] = literal[int] , identifier[help] = literal[string] + literal[string] ) identifier[explain_parser] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] , identifier[default] = keyword[False] , identifier[help] = literal[string] ) identifier[explain_parser] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] , identifier[default] = keyword[False] , identifier[help] = literal[string] + literal[string] ) identifier[explain_parser] . identifier[add_argument] ( literal[string] , identifier[type] = identifier[int] , identifier[default] = literal[int] , identifier[help] = literal[string] ) identifier[explain_parser] . identifier[add_argument] ( literal[string] , identifier[type] = identifier[int] , identifier[default] = literal[int] , identifier[help] = literal[string] + literal[string] ) identifier[explain_parser] . identifier[add_argument] ( literal[string] , identifier[type] = identifier[int] , identifier[default] = literal[int] , identifier[help] = literal[string] ) identifier[explain_parser] . identifier[set_defaults] ( identifier[func] = identifier[_explain] ) identifier[tensorboard_parser] = identifier[parser] . identifier[subcommand] ( literal[string] , identifier[formatter_class] = identifier[argparse] . identifier[RawTextHelpFormatter] , identifier[help] = literal[string] ) identifier[tensorboard_sub_commands] = identifier[tensorboard_parser] . identifier[add_subparsers] ( identifier[dest] = literal[string] ) identifier[tensorboard_start_parser] = identifier[tensorboard_sub_commands] . identifier[add_parser] ( literal[string] , identifier[help] = literal[string] ) identifier[tensorboard_start_parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] , identifier[help] = literal[string] ) identifier[tensorboard_start_parser] . identifier[set_defaults] ( identifier[func] = identifier[_tensorboard_start] ) identifier[tensorboard_stop_parser] = identifier[tensorboard_sub_commands] . identifier[add_parser] ( literal[string] , identifier[help] = literal[string] ) identifier[tensorboard_stop_parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] , identifier[type] = identifier[int] , identifier[help] = literal[string] ) identifier[tensorboard_stop_parser] . identifier[set_defaults] ( identifier[func] = identifier[_tensorboard_stop] ) identifier[tensorboard_list_parser] = identifier[tensorboard_sub_commands] . identifier[add_parser] ( literal[string] , identifier[help] = literal[string] ) identifier[tensorboard_list_parser] . identifier[set_defaults] ( identifier[func] = identifier[_tensorboard_list] ) identifier[evaluate_parser] = identifier[parser] . identifier[subcommand] ( literal[string] , identifier[formatter_class] = identifier[argparse] . identifier[RawTextHelpFormatter] , identifier[help] = literal[string] ) identifier[evaluate_sub_commands] = identifier[evaluate_parser] . identifier[add_subparsers] ( identifier[dest] = literal[string] ) keyword[def] identifier[_add_data_params_for_evaluate] ( identifier[parser] ): identifier[parser] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] ) identifier[parser] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] + literal[string] ) identifier[parser] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] + literal[string] ) identifier[evaluate_cm_parser] = identifier[evaluate_sub_commands] . identifier[add_parser] ( literal[string] , identifier[help] = literal[string] ) identifier[_add_data_params_for_evaluate] ( identifier[evaluate_cm_parser] ) identifier[evaluate_cm_parser] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] , identifier[default] = keyword[False] , identifier[help] = literal[string] ) identifier[evaluate_cm_parser] . identifier[add_argument] ( literal[string] , identifier[type] = identifier[int] , identifier[default] = literal[int] , identifier[help] = literal[string] ) identifier[evaluate_cm_parser] . identifier[set_defaults] ( identifier[func] = identifier[_evaluate_cm] ) identifier[evaluate_accuracy_parser] = identifier[evaluate_sub_commands] . identifier[add_parser] ( literal[string] , identifier[help] = literal[string] ) identifier[_add_data_params_for_evaluate] ( identifier[evaluate_accuracy_parser] ) identifier[evaluate_accuracy_parser] . identifier[set_defaults] ( identifier[func] = identifier[_evaluate_accuracy] ) identifier[evaluate_pr_parser] = identifier[evaluate_sub_commands] . identifier[add_parser] ( literal[string] , identifier[help] = literal[string] ) identifier[_add_data_params_for_evaluate] ( identifier[evaluate_pr_parser] ) identifier[evaluate_pr_parser] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] , identifier[default] = keyword[False] , identifier[help] = literal[string] ) identifier[evaluate_pr_parser] . identifier[add_argument] ( literal[string] , identifier[type] = identifier[int] , identifier[default] = literal[int] , identifier[help] = literal[string] + literal[string] ) identifier[evaluate_pr_parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] , identifier[help] = literal[string] + literal[string] ) identifier[evaluate_pr_parser] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] + literal[string] + literal[string] ) identifier[evaluate_pr_parser] . identifier[set_defaults] ( identifier[func] = identifier[_evaluate_pr] ) identifier[evaluate_roc_parser] = identifier[evaluate_sub_commands] . identifier[add_parser] ( literal[string] , identifier[help] = literal[string] ) identifier[_add_data_params_for_evaluate] ( identifier[evaluate_roc_parser] ) identifier[evaluate_roc_parser] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] , identifier[default] = keyword[False] , identifier[help] = literal[string] ) identifier[evaluate_roc_parser] . identifier[add_argument] ( literal[string] , identifier[type] = identifier[int] , identifier[default] = literal[int] , identifier[help] = literal[string] + literal[string] ) identifier[evaluate_roc_parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] , identifier[help] = literal[string] + literal[string] ) identifier[evaluate_roc_parser] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] + literal[string] + literal[string] ) identifier[evaluate_roc_parser] . identifier[set_defaults] ( identifier[func] = identifier[_evaluate_roc] ) identifier[evaluate_regression_parser] = identifier[evaluate_sub_commands] . identifier[add_parser] ( literal[string] , identifier[help] = literal[string] ) identifier[_add_data_params_for_evaluate] ( identifier[evaluate_regression_parser] ) identifier[evaluate_regression_parser] . identifier[set_defaults] ( identifier[func] = identifier[_evaluate_regression] ) identifier[model_parser] = identifier[parser] . identifier[subcommand] ( literal[string] , identifier[help] = literal[string] ) identifier[model_sub_commands] = identifier[model_parser] . identifier[add_subparsers] ( identifier[dest] = literal[string] ) identifier[model_list_parser] = identifier[model_sub_commands] . identifier[add_parser] ( literal[string] , identifier[help] = literal[string] ) identifier[model_list_parser] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] + literal[string] + literal[string] ) identifier[model_list_parser] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] + literal[string] ) identifier[model_list_parser] . identifier[set_defaults] ( identifier[func] = identifier[_model_list] ) identifier[model_delete_parser] = identifier[model_sub_commands] . identifier[add_parser] ( literal[string] , identifier[help] = literal[string] ) identifier[model_delete_parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] , identifier[help] = literal[string] + literal[string] + literal[string] ) identifier[model_delete_parser] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] + literal[string] ) identifier[model_delete_parser] . identifier[set_defaults] ( identifier[func] = identifier[_model_delete] ) identifier[model_deploy_parser] = identifier[model_sub_commands] . identifier[add_parser] ( literal[string] , identifier[help] = literal[string] ) identifier[model_deploy_parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] , identifier[help] = literal[string] + literal[string] ) identifier[model_deploy_parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] , identifier[help] = literal[string] ) identifier[model_deploy_parser] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] + literal[string] + literal[string] ) identifier[model_deploy_parser] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] + literal[string] ) identifier[model_deploy_parser] . identifier[set_defaults] ( identifier[func] = identifier[_model_deploy] ) keyword[return] identifier[google] . identifier[datalab] . identifier[utils] . identifier[commands] . identifier[handle_magic_line] ( identifier[line] , identifier[cell] , identifier[parser] )
def ml(line, cell=None): """Implements the datalab cell magic for MLWorkbench operations. Args: line: the contents of the ml command line. Returns: The results of executing the cell. """ parser = google.datalab.utils.commands.CommandParser(prog='%ml', description=textwrap.dedent(' Execute MLWorkbench operations\n\n Use "%ml <command> -h" for help on a specific command.\n ')) dataset_parser = parser.subcommand('dataset', formatter_class=argparse.RawTextHelpFormatter, help='Create or explore datasets.') dataset_sub_commands = dataset_parser.add_subparsers(dest='command') dataset_create_parser = dataset_sub_commands.add_parser('create', help='Create datasets', formatter_class=argparse.RawTextHelpFormatter, epilog=textwrap.dedent(' Example usage:\n\n %%ml dataset\n name: mydata\n format: csv\n train: path/to/train.csv\n eval: path/to/eval.csv\n schema:\n - name: news_label\n type: STRING\n - name: text\n type: STRING')) dataset_create_parser.add_argument('--name', required=True, help='the name of the dataset to define. ') dataset_create_parser.add_argument('--format', required=True, choices=['csv', 'bigquery', 'transformed'], help='The format of the data.') dataset_create_parser.add_argument('--train', required=True, help='The path of the training file pattern if format ' + 'is csv or transformed, or table name if format ' + 'is bigquery.') dataset_create_parser.add_argument('--eval', required=True, help='The path of the eval file pattern if format ' + 'is csv or transformed, or table name if format ' + 'is bigquery.') dataset_create_parser.add_cell_argument('schema', help='yaml representation of CSV schema, or path to ' + 'schema file. Only needed if format is csv.') dataset_create_parser.set_defaults(func=_dataset_create) dataset_explore_parser = dataset_sub_commands.add_parser('explore', help='Explore training data.') dataset_explore_parser.add_argument('--name', required=True, help='The name of the dataset to explore.') dataset_explore_parser.add_argument('--overview', action='store_true', default=False, help='Plot overview of sampled data. Set "sample_size" ' + 'to change the default sample size.') dataset_explore_parser.add_argument('--facets', action='store_true', default=False, help='Plot facets view of sampled data. Set ' + '"sample_size" to change the default sample size.') dataset_explore_parser.add_argument('--sample_size', type=int, default=1000, help='sample size for overview or facets view. Only ' + 'used if either --overview or --facets is set.') dataset_explore_parser.set_defaults(func=_dataset_explore) analyze_parser = parser.subcommand('analyze', formatter_class=argparse.RawTextHelpFormatter, help='Analyze training data and generate stats, such as min/max/mean for numeric values, vocabulary for text columns.', epilog=textwrap.dedent(' Example usage:\n\n %%ml analyze [--cloud]\n output: path/to/dir\n data: $mydataset\n features:\n serialId:\n transform: key\n num1:\n transform: scale\n value: 1\n num2:\n transform: identity\n text1:\n transform: bag_of_words\n\n Also supports in-notebook variables, such as:\n %%ml analyze --output path/to/dir\n training_data: $my_csv_dataset\n features: $features_def')) analyze_parser.add_argument('--output', required=True, help='path of output directory.') analyze_parser.add_argument('--cloud', action='store_true', default=False, help='whether to run analysis in cloud or local.') analyze_parser.add_argument('--package', required=False, help='A local or GCS tarball path to use as the source. If not set, the default source package will be used.') analyze_parser.add_cell_argument('data', required=True, help='Training data. A dataset defined by "%%ml dataset".') analyze_parser.add_cell_argument('features', required=True, help=textwrap.dedent(' features config indicating how to transform data into features. The\n list of supported transforms:\n "transform: identity"\n does nothing (for numerical columns).\n "transform: scale\n value: x"\n scale a numerical column to [-a, a]. If value is missing, x\n defaults to 1.\n "transform: one_hot"\n treats the string column as categorical and makes one-hot\n encoding of it.\n "transform: embedding\n embedding_dim: d"\n treats the string column as categorical and makes embeddings of\n it with specified dimension size.\n "transform: bag_of_words"\n treats the string column as text and make bag of words\n transform of it.\n "transform: tfidf"\n treats the string column as text and make TFIDF transform of it.\n "transform: image_to_vec\n checkpoint: gs://b/o"\n from image gs url to embeddings. "checkpoint" is a inception v3\n checkpoint. If absent, a default checkpoint is used.\n "transform: target"\n denotes the column is the target. If the schema type of this\n column is string, a one_hot encoding is automatically applied.\n If numerical, an identity transform is automatically applied.\n "transform: key"\n column contains metadata-like information and will be output\n as-is in prediction.')) analyze_parser.set_defaults(func=_analyze) transform_parser = parser.subcommand('transform', formatter_class=argparse.RawTextHelpFormatter, help='Transform the data into tf.example which is more efficient in training.', epilog=textwrap.dedent(' Example usage:\n\n %%ml transform [--cloud] [--shuffle]\n analysis: path/to/analysis_output_folder\n output: path/to/dir\n batch_size: 100\n data: $mydataset\n cloud:\n num_workers: 3\n worker_machine_type: n1-standard-1\n project_id: my_project_id')) transform_parser.add_argument('--analysis', required=True, help='path of analysis output directory.') transform_parser.add_argument('--output', required=True, help='path of output directory.') transform_parser.add_argument('--cloud', action='store_true', default=False, help='whether to run transform in cloud or local.') transform_parser.add_argument('--shuffle', action='store_true', default=False, help='whether to shuffle the training data in output.') transform_parser.add_argument('--batch_size', type=int, default=100, help='number of instances in a batch to process once. Larger batch is more efficient but may consume more memory.') transform_parser.add_argument('--package', required=False, help='A local or GCS tarball path to use as the source. If not set, the default source package will be used.') transform_parser.add_cell_argument('data', required=True, help='Training data. A dataset defined by "%%ml dataset".') transform_parser.add_cell_argument('cloud_config', help=textwrap.dedent(" A dictionary of cloud config. All of them are optional.\n num_workers: Dataflow number of workers. If not set, DataFlow\n service will determine the number.\n worker_machine_type: a machine name from\n https://cloud.google.com/compute/docs/machine-types\n If not given, the service uses the default machine type.\n project_id: id of the project to use for DataFlow service. If not set,\n Datalab's default project (set by %%datalab project set) is used.\n job_name: Unique name for a Dataflow job to use. If not set, a\n random name will be used.")) transform_parser.set_defaults(func=_transform) train_parser = parser.subcommand('train', formatter_class=argparse.RawTextHelpFormatter, help='Train a model.', epilog=textwrap.dedent(' Example usage:\n\n %%ml train [--cloud]\n analysis: path/to/analysis_output\n output: path/to/dir\n data: $mydataset\n model_args:\n model: linear_regression\n cloud_config:\n region: us-central1')) train_parser.add_argument('--analysis', required=True, help='path of analysis output directory.') train_parser.add_argument('--output', required=True, help='path of trained model directory.') train_parser.add_argument('--cloud', action='store_true', default=False, help='whether to run training in cloud or local.') train_parser.add_argument('--notb', action='store_true', default=False, help='If set, tensorboard is not automatically started.') train_parser.add_argument('--package', required=False, help='A local or GCS tarball path to use as the source. If not set, the default source package will be used.') train_parser.add_cell_argument('data', required=True, help='Training data. A dataset defined by "%%ml dataset".') package_model_help = subprocess.Popen(['python', '-m', 'trainer.task', '--datalab-help'], cwd=DEFAULT_PACKAGE_PATH, stdout=subprocess.PIPE).communicate()[0] package_model_help = 'model_args: a dictionary of model specific args, including:\n\n' + package_model_help.decode() train_parser.add_cell_argument('model_args', help=package_model_help) train_parser.add_cell_argument('cloud_config', help=textwrap.dedent(' A dictionary of cloud training config, including:\n job_id: the name of the job. If not provided, a default job name is created.\n region: see {url}\n runtime_version: see "region". Must be a string like \'1.2\'.\n scale_tier: see "region".'.format(url='https://cloud.google.com/sdk/gcloud/reference/ml-engine/jobs/submit/training'))) train_parser.set_defaults(func=_train) predict_parser = parser.subcommand('predict', formatter_class=argparse.RawTextHelpFormatter, help='Predict with local or deployed models. (Good for small datasets).', epilog=textwrap.dedent(" Example usage:\n\n %%ml predict\n headers: key,num\n model: path/to/model\n data:\n - key1,value1\n - key2,value2\n\n Or, in another cell, define a list of dict:\n\n my_data = [{'key': 1, 'num': 1.2}, {'key': 2, 'num': 2.8}]\n\n Then:\n\n %%ml predict\n headers: key,num\n model: path/to/model\n data: $my_data")) predict_parser.add_argument('--model', required=True, help='The model path.') predict_parser.add_argument('--no_show_image', action='store_true', default=False, help='If not set, add a column of images in output.') predict_parser.add_cell_argument('data', required=True, help=textwrap.dedent(' Prediction data can be\n 1) CSV lines in the input cell in yaml format or\n 2) a local variable which is one of\n a) list of dict\n b) list of strings of csv lines\n c) a Pandas DataFrame')) predict_parser.set_defaults(func=_predict) batch_predict_parser = parser.subcommand('batch_predict', formatter_class=argparse.RawTextHelpFormatter, help='Batch prediction with local or deployed models. (Good for large datasets)', epilog=textwrap.dedent('\n Example usage:\n\n %%ml batch_predict [--cloud]\n model: path/to/model\n output: path/to/output\n format: csv\n data:\n csv: path/to/file_pattern')) batch_predict_parser.add_argument('--model', required=True, help='The model path if not --cloud, or the id in the form of model.version if --cloud.') batch_predict_parser.add_argument('--output', required=True, help='The path of output directory with prediction results. If --cloud, it has to be GCS path.') batch_predict_parser.add_argument('--format', help='csv or json. For cloud run, the only supported format is json.') batch_predict_parser.add_argument('--batch_size', type=int, default=100, help='number of instances in a batch to process once. Larger batch is more efficient but may consume more memory. Only used in local run.') batch_predict_parser.add_argument('--cloud', action='store_true', default=False, help='whether to run prediction in cloud or local.') batch_predict_parser.add_cell_argument('data', required=True, help='Data to predict with. Only csv is supported.') batch_predict_parser.add_cell_argument('cloud_config', help=textwrap.dedent(' A dictionary of cloud batch prediction config.\n job_id: the name of the job. If not provided, a default job name is created.\n region: see {url}\n max_worker_count: see reference in "region".'.format(url='https://cloud.google.com/sdk/gcloud/reference/ml-engine/jobs/submit/prediction'))) # noqa batch_predict_parser.set_defaults(func=_batch_predict) explain_parser = parser.subcommand('explain', formatter_class=argparse.RawTextHelpFormatter, help='Explain a prediction with LIME tool.') explain_parser.add_argument('--type', default='all', choices=['text', 'image', 'tabular', 'all'], help='the type of column to explain.') explain_parser.add_argument('--algorithm', choices=['lime', 'ig'], default='lime', help='"lime" is the open sourced project for prediction explainer.' + '"ig" means integrated gradients and currently only applies ' + 'to image.') explain_parser.add_argument('--model', required=True, help='path of the model directory used for prediction.') explain_parser.add_argument('--labels', required=True, help='comma separated labels to explain.') explain_parser.add_argument('--column_name', help='the name of the column to explain. Optional if text type ' + 'and there is only one text column, or image type and ' + 'there is only one image column.') explain_parser.add_cell_argument('data', required=True, help='Prediction Data. Can be a csv line, or a dict.') explain_parser.add_cell_argument('training_data', help='A csv or bigquery dataset defined by %%ml dataset. ' + 'Used by tabular explainer only to determine the ' + 'distribution of numeric and categorical values. ' + 'Suggest using original training dataset.') # options specific for lime explain_parser.add_argument('--num_features', type=int, help='number of features to analyze. In text, it is number of ' + 'words. In image, it is number of areas. For lime only.') explain_parser.add_argument('--num_samples', type=int, help='size of the neighborhood to learn the linear model. ' + 'For lime only.') explain_parser.add_argument('--hide_color', type=int, default=0, help='the color to use for perturbed area. If -1, average of ' + 'each channel is used for each channel. For image only.') explain_parser.add_argument('--include_negative', action='store_true', default=False, help='whether to show only positive areas. For lime image only.') explain_parser.add_argument('--overview', action='store_true', default=False, help='whether to show overview instead of details view.' + 'For lime text and tabular only.') explain_parser.add_argument('--batch_size', type=int, default=100, help='size of batches passed to prediction. For lime only.') # options specific for integrated gradients explain_parser.add_argument('--num_gradients', type=int, default=50, help='the number of scaled images to get gradients from. Larger ' + 'number usually produces better results but slower.') explain_parser.add_argument('--percent_show', type=int, default=10, help='the percentage of top impactful pixels to show.') explain_parser.set_defaults(func=_explain) tensorboard_parser = parser.subcommand('tensorboard', formatter_class=argparse.RawTextHelpFormatter, help='Start/stop/list TensorBoard instances.') tensorboard_sub_commands = tensorboard_parser.add_subparsers(dest='command') tensorboard_start_parser = tensorboard_sub_commands.add_parser('start', help='Start a tensorboard instance.') tensorboard_start_parser.add_argument('--logdir', required=True, help='The local or GCS logdir path.') tensorboard_start_parser.set_defaults(func=_tensorboard_start) tensorboard_stop_parser = tensorboard_sub_commands.add_parser('stop', help='Stop a tensorboard instance.') tensorboard_stop_parser.add_argument('--pid', required=True, type=int, help='The pid of the tensorboard instance.') tensorboard_stop_parser.set_defaults(func=_tensorboard_stop) tensorboard_list_parser = tensorboard_sub_commands.add_parser('list', help='List tensorboard instances.') tensorboard_list_parser.set_defaults(func=_tensorboard_list) evaluate_parser = parser.subcommand('evaluate', formatter_class=argparse.RawTextHelpFormatter, help='Analyze model evaluation results, such as confusion matrix, ROC, RMSE.') evaluate_sub_commands = evaluate_parser.add_subparsers(dest='command') def _add_data_params_for_evaluate(parser): parser.add_argument('--csv', help='csv file path patterns.') parser.add_argument('--headers', help='csv file headers. Required if csv is specified and ' + 'predict_results_schema.json does not exist in the same directory.') parser.add_argument('--bigquery', help='can be bigquery table, query as a string, or ' + 'a pre-defined query (%%bq query --name).') evaluate_cm_parser = evaluate_sub_commands.add_parser('confusion_matrix', help='Get confusion matrix from evaluation results.') _add_data_params_for_evaluate(evaluate_cm_parser) evaluate_cm_parser.add_argument('--plot', action='store_true', default=False, help='Whether to plot confusion matrix as graph.') evaluate_cm_parser.add_argument('--size', type=int, default=10, help='The size of the confusion matrix.') evaluate_cm_parser.set_defaults(func=_evaluate_cm) evaluate_accuracy_parser = evaluate_sub_commands.add_parser('accuracy', help='Get accuracy results from classification evaluation results.') _add_data_params_for_evaluate(evaluate_accuracy_parser) evaluate_accuracy_parser.set_defaults(func=_evaluate_accuracy) evaluate_pr_parser = evaluate_sub_commands.add_parser('precision_recall', help='Get precision recall metrics from evaluation results.') _add_data_params_for_evaluate(evaluate_pr_parser) evaluate_pr_parser.add_argument('--plot', action='store_true', default=False, help='Whether to plot precision recall as graph.') evaluate_pr_parser.add_argument('--num_thresholds', type=int, default=20, help='Number of thresholds which determines how many ' + 'points in the graph.') evaluate_pr_parser.add_argument('--target_class', required=True, help='The target class to determine correctness of ' + 'a prediction.') evaluate_pr_parser.add_argument('--probability_column', help='The name of the column holding the probability ' + 'value of the target class. If absent, the value ' + 'of target class is used.') evaluate_pr_parser.set_defaults(func=_evaluate_pr) evaluate_roc_parser = evaluate_sub_commands.add_parser('roc', help='Get ROC metrics from evaluation results.') _add_data_params_for_evaluate(evaluate_roc_parser) evaluate_roc_parser.add_argument('--plot', action='store_true', default=False, help='Whether to plot ROC as graph.') evaluate_roc_parser.add_argument('--num_thresholds', type=int, default=20, help='Number of thresholds which determines how many ' + 'points in the graph.') evaluate_roc_parser.add_argument('--target_class', required=True, help='The target class to determine correctness of ' + 'a prediction.') evaluate_roc_parser.add_argument('--probability_column', help='The name of the column holding the probability ' + 'value of the target class. If absent, the value ' + 'of target class is used.') evaluate_roc_parser.set_defaults(func=_evaluate_roc) evaluate_regression_parser = evaluate_sub_commands.add_parser('regression', help='Get regression metrics from evaluation results.') _add_data_params_for_evaluate(evaluate_regression_parser) evaluate_regression_parser.set_defaults(func=_evaluate_regression) model_parser = parser.subcommand('model', help='Models and versions management such as deployment, deletion, listing.') model_sub_commands = model_parser.add_subparsers(dest='command') model_list_parser = model_sub_commands.add_parser('list', help='List models and versions.') model_list_parser.add_argument('--name', help='If absent, list all models of specified or current ' + 'project. If provided, list all versions of the ' + 'model.') model_list_parser.add_argument('--project', help='The project to list model(s) or version(s). If absent, ' + "use Datalab's default project.") model_list_parser.set_defaults(func=_model_list) model_delete_parser = model_sub_commands.add_parser('delete', help='Delete models or versions.') model_delete_parser.add_argument('--name', required=True, help='If no "." in the name, try deleting the specified ' + 'model. If "model.version" is provided, try deleting ' + 'the specified version.') model_delete_parser.add_argument('--project', help='The project to delete model or version. If absent, ' + "use Datalab's default project.") model_delete_parser.set_defaults(func=_model_delete) model_deploy_parser = model_sub_commands.add_parser('deploy', help='Deploy a model version.') model_deploy_parser.add_argument('--name', required=True, help='Must be model.version to indicate the model ' + 'and version name to deploy.') model_deploy_parser.add_argument('--path', required=True, help='The GCS path of the model to be deployed.') model_deploy_parser.add_argument('--runtime_version', help='The TensorFlow version to use for this model. ' + 'For example, "1.2.1". If absent, the current ' + 'TensorFlow version installed in Datalab will be used.') model_deploy_parser.add_argument('--project', help='The project to deploy a model version. If absent, ' + "use Datalab's default project.") model_deploy_parser.set_defaults(func=_model_deploy) return google.datalab.utils.commands.handle_magic_line(line, cell, parser)
def invalid_type_error(method_name, arg_name, got_value, expected_type, version='0.13.0'): """Raise a CompilationException when an adapter method available to macros has changed. """ got_type = type(got_value) msg = ("As of {version}, 'adapter.{method_name}' expects argument " "'{arg_name}' to be of type '{expected_type}', instead got " "{got_value} ({got_type})") raise_compiler_error(msg.format(version=version, method_name=method_name, arg_name=arg_name, expected_type=expected_type, got_value=got_value, got_type=got_type))
def function[invalid_type_error, parameter[method_name, arg_name, got_value, expected_type, version]]: constant[Raise a CompilationException when an adapter method available to macros has changed. ] variable[got_type] assign[=] call[name[type], parameter[name[got_value]]] variable[msg] assign[=] constant[As of {version}, 'adapter.{method_name}' expects argument '{arg_name}' to be of type '{expected_type}', instead got {got_value} ({got_type})] call[name[raise_compiler_error], parameter[call[name[msg].format, parameter[]]]]
keyword[def] identifier[invalid_type_error] ( identifier[method_name] , identifier[arg_name] , identifier[got_value] , identifier[expected_type] , identifier[version] = literal[string] ): literal[string] identifier[got_type] = identifier[type] ( identifier[got_value] ) identifier[msg] =( literal[string] literal[string] literal[string] ) identifier[raise_compiler_error] ( identifier[msg] . identifier[format] ( identifier[version] = identifier[version] , identifier[method_name] = identifier[method_name] , identifier[arg_name] = identifier[arg_name] , identifier[expected_type] = identifier[expected_type] , identifier[got_value] = identifier[got_value] , identifier[got_type] = identifier[got_type] ))
def invalid_type_error(method_name, arg_name, got_value, expected_type, version='0.13.0'): """Raise a CompilationException when an adapter method available to macros has changed. """ got_type = type(got_value) msg = "As of {version}, 'adapter.{method_name}' expects argument '{arg_name}' to be of type '{expected_type}', instead got {got_value} ({got_type})" raise_compiler_error(msg.format(version=version, method_name=method_name, arg_name=arg_name, expected_type=expected_type, got_value=got_value, got_type=got_type))
def pow(cls, x: 'TensorFluent', y: 'TensorFluent') -> 'TensorFluent': '''Returns a TensorFluent for the pow function.TensorFluent Args: x: The first operand. y: The second operand. Returns: A TensorFluent wrapping the pow function. ''' return cls._binary_op(x, y, tf.pow, tf.float32)
def function[pow, parameter[cls, x, y]]: constant[Returns a TensorFluent for the pow function.TensorFluent Args: x: The first operand. y: The second operand. Returns: A TensorFluent wrapping the pow function. ] return[call[name[cls]._binary_op, parameter[name[x], name[y], name[tf].pow, name[tf].float32]]]
keyword[def] identifier[pow] ( identifier[cls] , identifier[x] : literal[string] , identifier[y] : literal[string] )-> literal[string] : literal[string] keyword[return] identifier[cls] . identifier[_binary_op] ( identifier[x] , identifier[y] , identifier[tf] . identifier[pow] , identifier[tf] . identifier[float32] )
def pow(cls, x: 'TensorFluent', y: 'TensorFluent') -> 'TensorFluent': """Returns a TensorFluent for the pow function.TensorFluent Args: x: The first operand. y: The second operand. Returns: A TensorFluent wrapping the pow function. """ return cls._binary_op(x, y, tf.pow, tf.float32)
def get_ntp_peers(self): """Return the NTP peers configured on the device.""" ntp_table = junos_views.junos_ntp_peers_config_table(self.device) ntp_table.get() ntp_peers = ntp_table.items() if not ntp_peers: return {} return {napalm_base.helpers.ip(peer[0]): {} for peer in ntp_peers}
def function[get_ntp_peers, parameter[self]]: constant[Return the NTP peers configured on the device.] variable[ntp_table] assign[=] call[name[junos_views].junos_ntp_peers_config_table, parameter[name[self].device]] call[name[ntp_table].get, parameter[]] variable[ntp_peers] assign[=] call[name[ntp_table].items, parameter[]] if <ast.UnaryOp object at 0x7da1b0f13ee0> begin[:] return[dictionary[[], []]] return[<ast.DictComp object at 0x7da1b0f13100>]
keyword[def] identifier[get_ntp_peers] ( identifier[self] ): literal[string] identifier[ntp_table] = identifier[junos_views] . identifier[junos_ntp_peers_config_table] ( identifier[self] . identifier[device] ) identifier[ntp_table] . identifier[get] () identifier[ntp_peers] = identifier[ntp_table] . identifier[items] () keyword[if] keyword[not] identifier[ntp_peers] : keyword[return] {} keyword[return] { identifier[napalm_base] . identifier[helpers] . identifier[ip] ( identifier[peer] [ literal[int] ]):{} keyword[for] identifier[peer] keyword[in] identifier[ntp_peers] }
def get_ntp_peers(self): """Return the NTP peers configured on the device.""" ntp_table = junos_views.junos_ntp_peers_config_table(self.device) ntp_table.get() ntp_peers = ntp_table.items() if not ntp_peers: return {} # depends on [control=['if'], data=[]] return {napalm_base.helpers.ip(peer[0]): {} for peer in ntp_peers}
def get_insert_fields_and_values_from_dict(dictionary, datetime_format='%Y-%m-%d %H:%M:%S', db_escape=True): """Formats a dictionary to strings of fields and values for insert statements @param dictionary: The dictionary whose keys and values are to be inserted @param db_escape: If true, will db escape values @return: Tuple of strings containing string fields and values, e.g. ('user_id, username', '5, "pandaman"') """ if db_escape: CoyoteDb.escape_dictionary(dictionary, datetime_format=datetime_format) fields = get_delimited_string_from_list(dictionary.keys(), delimiter=',') # keys have no quotes vals = get_delimited_string_from_list(dictionary.values(), delimiter=',') # strings get quotes return fields, vals
def function[get_insert_fields_and_values_from_dict, parameter[dictionary, datetime_format, db_escape]]: constant[Formats a dictionary to strings of fields and values for insert statements @param dictionary: The dictionary whose keys and values are to be inserted @param db_escape: If true, will db escape values @return: Tuple of strings containing string fields and values, e.g. ('user_id, username', '5, "pandaman"') ] if name[db_escape] begin[:] call[name[CoyoteDb].escape_dictionary, parameter[name[dictionary]]] variable[fields] assign[=] call[name[get_delimited_string_from_list], parameter[call[name[dictionary].keys, parameter[]]]] variable[vals] assign[=] call[name[get_delimited_string_from_list], parameter[call[name[dictionary].values, parameter[]]]] return[tuple[[<ast.Name object at 0x7da1b10c1d50>, <ast.Name object at 0x7da1b10c24a0>]]]
keyword[def] identifier[get_insert_fields_and_values_from_dict] ( identifier[dictionary] , identifier[datetime_format] = literal[string] , identifier[db_escape] = keyword[True] ): literal[string] keyword[if] identifier[db_escape] : identifier[CoyoteDb] . identifier[escape_dictionary] ( identifier[dictionary] , identifier[datetime_format] = identifier[datetime_format] ) identifier[fields] = identifier[get_delimited_string_from_list] ( identifier[dictionary] . identifier[keys] (), identifier[delimiter] = literal[string] ) identifier[vals] = identifier[get_delimited_string_from_list] ( identifier[dictionary] . identifier[values] (), identifier[delimiter] = literal[string] ) keyword[return] identifier[fields] , identifier[vals]
def get_insert_fields_and_values_from_dict(dictionary, datetime_format='%Y-%m-%d %H:%M:%S', db_escape=True): """Formats a dictionary to strings of fields and values for insert statements @param dictionary: The dictionary whose keys and values are to be inserted @param db_escape: If true, will db escape values @return: Tuple of strings containing string fields and values, e.g. ('user_id, username', '5, "pandaman"') """ if db_escape: CoyoteDb.escape_dictionary(dictionary, datetime_format=datetime_format) # depends on [control=['if'], data=[]] fields = get_delimited_string_from_list(dictionary.keys(), delimiter=',') # keys have no quotes vals = get_delimited_string_from_list(dictionary.values(), delimiter=',') # strings get quotes return (fields, vals)
def shake_shake_branch(x, output_filters, stride, rand_forward, rand_backward, hparams): """Building a 2 branching convnet.""" is_training = hparams.mode == tf.estimator.ModeKeys.TRAIN x = tf.nn.relu(x) x = tf.layers.conv2d( x, output_filters, (3, 3), strides=(stride, stride), padding="SAME", name="conv1") x = tf.layers.batch_normalization(x, training=is_training, name="bn1") x = tf.nn.relu(x) x = tf.layers.conv2d(x, output_filters, (3, 3), padding="SAME", name="conv2") x = tf.layers.batch_normalization(x, training=is_training, name="bn2") if is_training: x = x * rand_backward + tf.stop_gradient(x * rand_forward - x * rand_backward) else: x *= 1.0 / hparams.shake_shake_num_branches return x
def function[shake_shake_branch, parameter[x, output_filters, stride, rand_forward, rand_backward, hparams]]: constant[Building a 2 branching convnet.] variable[is_training] assign[=] compare[name[hparams].mode equal[==] name[tf].estimator.ModeKeys.TRAIN] variable[x] assign[=] call[name[tf].nn.relu, parameter[name[x]]] variable[x] assign[=] call[name[tf].layers.conv2d, parameter[name[x], name[output_filters], tuple[[<ast.Constant object at 0x7da18ede4880>, <ast.Constant object at 0x7da18ede6320>]]]] variable[x] assign[=] call[name[tf].layers.batch_normalization, parameter[name[x]]] variable[x] assign[=] call[name[tf].nn.relu, parameter[name[x]]] variable[x] assign[=] call[name[tf].layers.conv2d, parameter[name[x], name[output_filters], tuple[[<ast.Constant object at 0x7da20cabd480>, <ast.Constant object at 0x7da20cabc2b0>]]]] variable[x] assign[=] call[name[tf].layers.batch_normalization, parameter[name[x]]] if name[is_training] begin[:] variable[x] assign[=] binary_operation[binary_operation[name[x] * name[rand_backward]] + call[name[tf].stop_gradient, parameter[binary_operation[binary_operation[name[x] * name[rand_forward]] - binary_operation[name[x] * name[rand_backward]]]]]] return[name[x]]
keyword[def] identifier[shake_shake_branch] ( identifier[x] , identifier[output_filters] , identifier[stride] , identifier[rand_forward] , identifier[rand_backward] , identifier[hparams] ): literal[string] identifier[is_training] = identifier[hparams] . identifier[mode] == identifier[tf] . identifier[estimator] . identifier[ModeKeys] . identifier[TRAIN] identifier[x] = identifier[tf] . identifier[nn] . identifier[relu] ( identifier[x] ) identifier[x] = identifier[tf] . identifier[layers] . identifier[conv2d] ( identifier[x] , identifier[output_filters] ,( literal[int] , literal[int] ), identifier[strides] =( identifier[stride] , identifier[stride] ), identifier[padding] = literal[string] , identifier[name] = literal[string] ) identifier[x] = identifier[tf] . identifier[layers] . identifier[batch_normalization] ( identifier[x] , identifier[training] = identifier[is_training] , identifier[name] = literal[string] ) identifier[x] = identifier[tf] . identifier[nn] . identifier[relu] ( identifier[x] ) identifier[x] = identifier[tf] . identifier[layers] . identifier[conv2d] ( identifier[x] , identifier[output_filters] ,( literal[int] , literal[int] ), identifier[padding] = literal[string] , identifier[name] = literal[string] ) identifier[x] = identifier[tf] . identifier[layers] . identifier[batch_normalization] ( identifier[x] , identifier[training] = identifier[is_training] , identifier[name] = literal[string] ) keyword[if] identifier[is_training] : identifier[x] = identifier[x] * identifier[rand_backward] + identifier[tf] . identifier[stop_gradient] ( identifier[x] * identifier[rand_forward] - identifier[x] * identifier[rand_backward] ) keyword[else] : identifier[x] *= literal[int] / identifier[hparams] . identifier[shake_shake_num_branches] keyword[return] identifier[x]
def shake_shake_branch(x, output_filters, stride, rand_forward, rand_backward, hparams): """Building a 2 branching convnet.""" is_training = hparams.mode == tf.estimator.ModeKeys.TRAIN x = tf.nn.relu(x) x = tf.layers.conv2d(x, output_filters, (3, 3), strides=(stride, stride), padding='SAME', name='conv1') x = tf.layers.batch_normalization(x, training=is_training, name='bn1') x = tf.nn.relu(x) x = tf.layers.conv2d(x, output_filters, (3, 3), padding='SAME', name='conv2') x = tf.layers.batch_normalization(x, training=is_training, name='bn2') if is_training: x = x * rand_backward + tf.stop_gradient(x * rand_forward - x * rand_backward) # depends on [control=['if'], data=[]] else: x *= 1.0 / hparams.shake_shake_num_branches return x