code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def save(self): """Save/updates the user""" import json payload = {} payload.update(self._store) payload["user"] = payload["username"] payload["passwd"] = payload["password"] del(payload["username"]) del(payload["password"]) payload = json.dumps(payload, default=str) if not self.URL : if "username" not in self._store or "password" not in self._store : raise KeyError("You must define self['name'] and self['password'] to be able to create a new user") r = self.connection.session.post(self.users.URL, data = payload) data = r.json() if r.status_code == 201 : self._set(data) else : raise CreationError("Unable to create new user", data) else : r = self.connection.session.put(self.URL, data = payload) data = r.json() if r.status_code == 200 : self._set(data) else : raise UpdateError("Unable to update user, status: %s" %r.status_code, data)
def function[save, parameter[self]]: constant[Save/updates the user] import module[json] variable[payload] assign[=] dictionary[[], []] call[name[payload].update, parameter[name[self]._store]] call[name[payload]][constant[user]] assign[=] call[name[payload]][constant[username]] call[name[payload]][constant[passwd]] assign[=] call[name[payload]][constant[password]] <ast.Delete object at 0x7da1b0f0e980> <ast.Delete object at 0x7da1b0f0da80> variable[payload] assign[=] call[name[json].dumps, parameter[name[payload]]] if <ast.UnaryOp object at 0x7da1b0f0e350> begin[:] if <ast.BoolOp object at 0x7da1b0f0d660> begin[:] <ast.Raise object at 0x7da1b0dc06a0> variable[r] assign[=] call[name[self].connection.session.post, parameter[name[self].users.URL]] variable[data] assign[=] call[name[r].json, parameter[]] if compare[name[r].status_code equal[==] constant[201]] begin[:] call[name[self]._set, parameter[name[data]]]
keyword[def] identifier[save] ( identifier[self] ): literal[string] keyword[import] identifier[json] identifier[payload] ={} identifier[payload] . identifier[update] ( identifier[self] . identifier[_store] ) identifier[payload] [ literal[string] ]= identifier[payload] [ literal[string] ] identifier[payload] [ literal[string] ]= identifier[payload] [ literal[string] ] keyword[del] ( identifier[payload] [ literal[string] ]) keyword[del] ( identifier[payload] [ literal[string] ]) identifier[payload] = identifier[json] . identifier[dumps] ( identifier[payload] , identifier[default] = identifier[str] ) keyword[if] keyword[not] identifier[self] . identifier[URL] : keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[_store] keyword[or] literal[string] keyword[not] keyword[in] identifier[self] . identifier[_store] : keyword[raise] identifier[KeyError] ( literal[string] ) identifier[r] = identifier[self] . identifier[connection] . identifier[session] . identifier[post] ( identifier[self] . identifier[users] . identifier[URL] , identifier[data] = identifier[payload] ) identifier[data] = identifier[r] . identifier[json] () keyword[if] identifier[r] . identifier[status_code] == literal[int] : identifier[self] . identifier[_set] ( identifier[data] ) keyword[else] : keyword[raise] identifier[CreationError] ( literal[string] , identifier[data] ) keyword[else] : identifier[r] = identifier[self] . identifier[connection] . identifier[session] . identifier[put] ( identifier[self] . identifier[URL] , identifier[data] = identifier[payload] ) identifier[data] = identifier[r] . identifier[json] () keyword[if] identifier[r] . identifier[status_code] == literal[int] : identifier[self] . identifier[_set] ( identifier[data] ) keyword[else] : keyword[raise] identifier[UpdateError] ( literal[string] % identifier[r] . identifier[status_code] , identifier[data] )
def save(self): """Save/updates the user""" import json payload = {} payload.update(self._store) payload['user'] = payload['username'] payload['passwd'] = payload['password'] del payload['username'] del payload['password'] payload = json.dumps(payload, default=str) if not self.URL: if 'username' not in self._store or 'password' not in self._store: raise KeyError("You must define self['name'] and self['password'] to be able to create a new user") # depends on [control=['if'], data=[]] r = self.connection.session.post(self.users.URL, data=payload) data = r.json() if r.status_code == 201: self._set(data) # depends on [control=['if'], data=[]] else: raise CreationError('Unable to create new user', data) # depends on [control=['if'], data=[]] else: r = self.connection.session.put(self.URL, data=payload) data = r.json() if r.status_code == 200: self._set(data) # depends on [control=['if'], data=[]] else: raise UpdateError('Unable to update user, status: %s' % r.status_code, data)
def inc(self): """Get index for new entry.""" self.lock.acquire() cur = self.counter self.counter += 1 self.lock.release() return cur
def function[inc, parameter[self]]: constant[Get index for new entry.] call[name[self].lock.acquire, parameter[]] variable[cur] assign[=] name[self].counter <ast.AugAssign object at 0x7da2054a5d50> call[name[self].lock.release, parameter[]] return[name[cur]]
keyword[def] identifier[inc] ( identifier[self] ): literal[string] identifier[self] . identifier[lock] . identifier[acquire] () identifier[cur] = identifier[self] . identifier[counter] identifier[self] . identifier[counter] += literal[int] identifier[self] . identifier[lock] . identifier[release] () keyword[return] identifier[cur]
def inc(self): """Get index for new entry.""" self.lock.acquire() cur = self.counter self.counter += 1 self.lock.release() return cur
def insert(self, index: int, string: str) -> None: """Insert the given string before the specified index. This method has the same effect as ``self[index:index] = string``; it only avoids some condition checks as it rules out the possibility of the key being an slice, or the need to shrink any of the sub-spans. If parse is False, don't parse the inserted string. """ ss, se = self._span lststr = self._lststr lststr0 = lststr[0] if index < 0: index += se - ss if index < 0: index = 0 elif index > se - ss: # Note that it is not >=. Index can be new. index = se - ss index += ss # Update lststr lststr[0] = lststr0[:index] + string + lststr0[index:] string_len = len(string) # Update spans self._insert_update( index=index, length=string_len) # Remember newly added spans by the string. type_to_spans = self._type_to_spans for type_, spans in parse_to_spans( bytearray(string, 'ascii', 'replace') ).items(): for s, e in spans: insort(type_to_spans[type_], [index + s, index + e])
def function[insert, parameter[self, index, string]]: constant[Insert the given string before the specified index. This method has the same effect as ``self[index:index] = string``; it only avoids some condition checks as it rules out the possibility of the key being an slice, or the need to shrink any of the sub-spans. If parse is False, don't parse the inserted string. ] <ast.Tuple object at 0x7da20c993a60> assign[=] name[self]._span variable[lststr] assign[=] name[self]._lststr variable[lststr0] assign[=] call[name[lststr]][constant[0]] if compare[name[index] less[<] constant[0]] begin[:] <ast.AugAssign object at 0x7da20c9916c0> if compare[name[index] less[<] constant[0]] begin[:] variable[index] assign[=] constant[0] <ast.AugAssign object at 0x7da1b025ff10> call[name[lststr]][constant[0]] assign[=] binary_operation[binary_operation[call[name[lststr0]][<ast.Slice object at 0x7da1b025fe50>] + name[string]] + call[name[lststr0]][<ast.Slice object at 0x7da1b025ef50>]] variable[string_len] assign[=] call[name[len], parameter[name[string]]] call[name[self]._insert_update, parameter[]] variable[type_to_spans] assign[=] name[self]._type_to_spans for taget[tuple[[<ast.Name object at 0x7da1b025d600>, <ast.Name object at 0x7da1b025e890>]]] in starred[call[call[name[parse_to_spans], parameter[call[name[bytearray], parameter[name[string], constant[ascii], constant[replace]]]]].items, parameter[]]] begin[:] for taget[tuple[[<ast.Name object at 0x7da1b025fbe0>, <ast.Name object at 0x7da1b025c400>]]] in starred[name[spans]] begin[:] call[name[insort], parameter[call[name[type_to_spans]][name[type_]], list[[<ast.BinOp object at 0x7da1b025d510>, <ast.BinOp object at 0x7da1b025d8a0>]]]]
keyword[def] identifier[insert] ( identifier[self] , identifier[index] : identifier[int] , identifier[string] : identifier[str] )-> keyword[None] : literal[string] identifier[ss] , identifier[se] = identifier[self] . identifier[_span] identifier[lststr] = identifier[self] . identifier[_lststr] identifier[lststr0] = identifier[lststr] [ literal[int] ] keyword[if] identifier[index] < literal[int] : identifier[index] += identifier[se] - identifier[ss] keyword[if] identifier[index] < literal[int] : identifier[index] = literal[int] keyword[elif] identifier[index] > identifier[se] - identifier[ss] : identifier[index] = identifier[se] - identifier[ss] identifier[index] += identifier[ss] identifier[lststr] [ literal[int] ]= identifier[lststr0] [: identifier[index] ]+ identifier[string] + identifier[lststr0] [ identifier[index] :] identifier[string_len] = identifier[len] ( identifier[string] ) identifier[self] . identifier[_insert_update] ( identifier[index] = identifier[index] , identifier[length] = identifier[string_len] ) identifier[type_to_spans] = identifier[self] . identifier[_type_to_spans] keyword[for] identifier[type_] , identifier[spans] keyword[in] identifier[parse_to_spans] ( identifier[bytearray] ( identifier[string] , literal[string] , literal[string] ) ). identifier[items] (): keyword[for] identifier[s] , identifier[e] keyword[in] identifier[spans] : identifier[insort] ( identifier[type_to_spans] [ identifier[type_] ],[ identifier[index] + identifier[s] , identifier[index] + identifier[e] ])
def insert(self, index: int, string: str) -> None: """Insert the given string before the specified index. This method has the same effect as ``self[index:index] = string``; it only avoids some condition checks as it rules out the possibility of the key being an slice, or the need to shrink any of the sub-spans. If parse is False, don't parse the inserted string. """ (ss, se) = self._span lststr = self._lststr lststr0 = lststr[0] if index < 0: index += se - ss if index < 0: index = 0 # depends on [control=['if'], data=['index']] # depends on [control=['if'], data=['index']] elif index > se - ss: # Note that it is not >=. Index can be new. index = se - ss # depends on [control=['if'], data=['index']] index += ss # Update lststr lststr[0] = lststr0[:index] + string + lststr0[index:] string_len = len(string) # Update spans self._insert_update(index=index, length=string_len) # Remember newly added spans by the string. type_to_spans = self._type_to_spans for (type_, spans) in parse_to_spans(bytearray(string, 'ascii', 'replace')).items(): for (s, e) in spans: insort(type_to_spans[type_], [index + s, index + e]) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
def start_group(self, scol, typ): """Start a new group""" return Group(parent=self, level=scol, typ=typ)
def function[start_group, parameter[self, scol, typ]]: constant[Start a new group] return[call[name[Group], parameter[]]]
keyword[def] identifier[start_group] ( identifier[self] , identifier[scol] , identifier[typ] ): literal[string] keyword[return] identifier[Group] ( identifier[parent] = identifier[self] , identifier[level] = identifier[scol] , identifier[typ] = identifier[typ] )
def start_group(self, scol, typ): """Start a new group""" return Group(parent=self, level=scol, typ=typ)
def ekacec(handle, segno, recno, column, nvals, cvals, isnull): """ Add data to a character column in a specified EK record. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekacec_c.html :param handle: EK file handle. :type handle: int :param segno: Index of segment containing record. :type segno: int :param recno: Record to which data is to be added. :type recno: int :param column: Column name. :type column: str :param nvals: Number of values to add to column. :type nvals: int :param cvals: Character values to add to column. :type cvals: list of str. :param isnull: Flag indicating whether column entry is null. :type isnull: bool """ handle = ctypes.c_int(handle) segno = ctypes.c_int(segno) recno = ctypes.c_int(recno) column = stypes.stringToCharP(column) nvals = ctypes.c_int(nvals) vallen = ctypes.c_int(len(max(cvals, key=len)) + 1) cvals = stypes.listToCharArrayPtr(cvals) isnull = ctypes.c_int(isnull) libspice.ekacec_c(handle, segno, recno, column, nvals, vallen, cvals, isnull)
def function[ekacec, parameter[handle, segno, recno, column, nvals, cvals, isnull]]: constant[ Add data to a character column in a specified EK record. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekacec_c.html :param handle: EK file handle. :type handle: int :param segno: Index of segment containing record. :type segno: int :param recno: Record to which data is to be added. :type recno: int :param column: Column name. :type column: str :param nvals: Number of values to add to column. :type nvals: int :param cvals: Character values to add to column. :type cvals: list of str. :param isnull: Flag indicating whether column entry is null. :type isnull: bool ] variable[handle] assign[=] call[name[ctypes].c_int, parameter[name[handle]]] variable[segno] assign[=] call[name[ctypes].c_int, parameter[name[segno]]] variable[recno] assign[=] call[name[ctypes].c_int, parameter[name[recno]]] variable[column] assign[=] call[name[stypes].stringToCharP, parameter[name[column]]] variable[nvals] assign[=] call[name[ctypes].c_int, parameter[name[nvals]]] variable[vallen] assign[=] call[name[ctypes].c_int, parameter[binary_operation[call[name[len], parameter[call[name[max], parameter[name[cvals]]]]] + constant[1]]]] variable[cvals] assign[=] call[name[stypes].listToCharArrayPtr, parameter[name[cvals]]] variable[isnull] assign[=] call[name[ctypes].c_int, parameter[name[isnull]]] call[name[libspice].ekacec_c, parameter[name[handle], name[segno], name[recno], name[column], name[nvals], name[vallen], name[cvals], name[isnull]]]
keyword[def] identifier[ekacec] ( identifier[handle] , identifier[segno] , identifier[recno] , identifier[column] , identifier[nvals] , identifier[cvals] , identifier[isnull] ): literal[string] identifier[handle] = identifier[ctypes] . identifier[c_int] ( identifier[handle] ) identifier[segno] = identifier[ctypes] . identifier[c_int] ( identifier[segno] ) identifier[recno] = identifier[ctypes] . identifier[c_int] ( identifier[recno] ) identifier[column] = identifier[stypes] . identifier[stringToCharP] ( identifier[column] ) identifier[nvals] = identifier[ctypes] . identifier[c_int] ( identifier[nvals] ) identifier[vallen] = identifier[ctypes] . identifier[c_int] ( identifier[len] ( identifier[max] ( identifier[cvals] , identifier[key] = identifier[len] ))+ literal[int] ) identifier[cvals] = identifier[stypes] . identifier[listToCharArrayPtr] ( identifier[cvals] ) identifier[isnull] = identifier[ctypes] . identifier[c_int] ( identifier[isnull] ) identifier[libspice] . identifier[ekacec_c] ( identifier[handle] , identifier[segno] , identifier[recno] , identifier[column] , identifier[nvals] , identifier[vallen] , identifier[cvals] , identifier[isnull] )
def ekacec(handle, segno, recno, column, nvals, cvals, isnull): """ Add data to a character column in a specified EK record. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekacec_c.html :param handle: EK file handle. :type handle: int :param segno: Index of segment containing record. :type segno: int :param recno: Record to which data is to be added. :type recno: int :param column: Column name. :type column: str :param nvals: Number of values to add to column. :type nvals: int :param cvals: Character values to add to column. :type cvals: list of str. :param isnull: Flag indicating whether column entry is null. :type isnull: bool """ handle = ctypes.c_int(handle) segno = ctypes.c_int(segno) recno = ctypes.c_int(recno) column = stypes.stringToCharP(column) nvals = ctypes.c_int(nvals) vallen = ctypes.c_int(len(max(cvals, key=len)) + 1) cvals = stypes.listToCharArrayPtr(cvals) isnull = ctypes.c_int(isnull) libspice.ekacec_c(handle, segno, recno, column, nvals, vallen, cvals, isnull)
def upload_fail(message=None): """Return a upload failed response, for CKEditor >= 4.5. For example:: from flask import send_from_directory from flask_ckeditor import upload_success, upload_fail app.config['CKEDITOR_FILE_UPLOADER'] = 'upload' # this value can be endpoint or url @app.route('/files/<path:filename>') def uploaded_files(filename): path = '/the/uploaded/directory' return send_from_directory(path, filename) @app.route('/upload', methods=['POST']) def upload(): f = request.files.get('upload') if extension not in ['jpg', 'gif', 'png', 'jpeg']: return upload_fail(message='Image only!') # <-- f.save(os.path.join('/the/uploaded/directory', f.filename)) url = url_for('uploaded_files', filename=f.filename) return upload_success(url=url) :param message: error message. .. versionadded:: 0.4.0 """ if message is None: message = current_app.config['CKEDITOR_UPLOAD_ERROR_MESSAGE'] return jsonify(uploaded=0, error={'message': message})
def function[upload_fail, parameter[message]]: constant[Return a upload failed response, for CKEditor >= 4.5. For example:: from flask import send_from_directory from flask_ckeditor import upload_success, upload_fail app.config['CKEDITOR_FILE_UPLOADER'] = 'upload' # this value can be endpoint or url @app.route('/files/<path:filename>') def uploaded_files(filename): path = '/the/uploaded/directory' return send_from_directory(path, filename) @app.route('/upload', methods=['POST']) def upload(): f = request.files.get('upload') if extension not in ['jpg', 'gif', 'png', 'jpeg']: return upload_fail(message='Image only!') # <-- f.save(os.path.join('/the/uploaded/directory', f.filename)) url = url_for('uploaded_files', filename=f.filename) return upload_success(url=url) :param message: error message. .. versionadded:: 0.4.0 ] if compare[name[message] is constant[None]] begin[:] variable[message] assign[=] call[name[current_app].config][constant[CKEDITOR_UPLOAD_ERROR_MESSAGE]] return[call[name[jsonify], parameter[]]]
keyword[def] identifier[upload_fail] ( identifier[message] = keyword[None] ): literal[string] keyword[if] identifier[message] keyword[is] keyword[None] : identifier[message] = identifier[current_app] . identifier[config] [ literal[string] ] keyword[return] identifier[jsonify] ( identifier[uploaded] = literal[int] , identifier[error] ={ literal[string] : identifier[message] })
def upload_fail(message=None): """Return a upload failed response, for CKEditor >= 4.5. For example:: from flask import send_from_directory from flask_ckeditor import upload_success, upload_fail app.config['CKEDITOR_FILE_UPLOADER'] = 'upload' # this value can be endpoint or url @app.route('/files/<path:filename>') def uploaded_files(filename): path = '/the/uploaded/directory' return send_from_directory(path, filename) @app.route('/upload', methods=['POST']) def upload(): f = request.files.get('upload') if extension not in ['jpg', 'gif', 'png', 'jpeg']: return upload_fail(message='Image only!') # <-- f.save(os.path.join('/the/uploaded/directory', f.filename)) url = url_for('uploaded_files', filename=f.filename) return upload_success(url=url) :param message: error message. .. versionadded:: 0.4.0 """ if message is None: message = current_app.config['CKEDITOR_UPLOAD_ERROR_MESSAGE'] # depends on [control=['if'], data=['message']] return jsonify(uploaded=0, error={'message': message})
def remove(cls, id): """ Deletes an index with id :param id string/document-handle """ api = Client.instance().api api.index(id).delete()
def function[remove, parameter[cls, id]]: constant[ Deletes an index with id :param id string/document-handle ] variable[api] assign[=] call[name[Client].instance, parameter[]].api call[call[name[api].index, parameter[name[id]]].delete, parameter[]]
keyword[def] identifier[remove] ( identifier[cls] , identifier[id] ): literal[string] identifier[api] = identifier[Client] . identifier[instance] (). identifier[api] identifier[api] . identifier[index] ( identifier[id] ). identifier[delete] ()
def remove(cls, id): """ Deletes an index with id :param id string/document-handle """ api = Client.instance().api api.index(id).delete()
def run(self, data_cb): """Run the event loop.""" if self._error: err = self._error if isinstance(self._error, KeyboardInterrupt): # KeyboardInterrupt is not destructive(it may be used in # the REPL). # After throwing KeyboardInterrupt, cleanup the _error field # so the loop may be started again self._error = None raise err self._on_data = data_cb if threading.current_thread() == main_thread: self._setup_signals([signal.SIGINT, signal.SIGTERM]) debug('Entering event loop') self._run() debug('Exited event loop') if threading.current_thread() == main_thread: self._teardown_signals() signal.signal(signal.SIGINT, default_int_handler) self._on_data = None
def function[run, parameter[self, data_cb]]: constant[Run the event loop.] if name[self]._error begin[:] variable[err] assign[=] name[self]._error if call[name[isinstance], parameter[name[self]._error, name[KeyboardInterrupt]]] begin[:] name[self]._error assign[=] constant[None] <ast.Raise object at 0x7da1b1df9270> name[self]._on_data assign[=] name[data_cb] if compare[call[name[threading].current_thread, parameter[]] equal[==] name[main_thread]] begin[:] call[name[self]._setup_signals, parameter[list[[<ast.Attribute object at 0x7da1b1dfb820>, <ast.Attribute object at 0x7da1b1df9450>]]]] call[name[debug], parameter[constant[Entering event loop]]] call[name[self]._run, parameter[]] call[name[debug], parameter[constant[Exited event loop]]] if compare[call[name[threading].current_thread, parameter[]] equal[==] name[main_thread]] begin[:] call[name[self]._teardown_signals, parameter[]] call[name[signal].signal, parameter[name[signal].SIGINT, name[default_int_handler]]] name[self]._on_data assign[=] constant[None]
keyword[def] identifier[run] ( identifier[self] , identifier[data_cb] ): literal[string] keyword[if] identifier[self] . identifier[_error] : identifier[err] = identifier[self] . identifier[_error] keyword[if] identifier[isinstance] ( identifier[self] . identifier[_error] , identifier[KeyboardInterrupt] ): identifier[self] . identifier[_error] = keyword[None] keyword[raise] identifier[err] identifier[self] . identifier[_on_data] = identifier[data_cb] keyword[if] identifier[threading] . identifier[current_thread] ()== identifier[main_thread] : identifier[self] . identifier[_setup_signals] ([ identifier[signal] . identifier[SIGINT] , identifier[signal] . identifier[SIGTERM] ]) identifier[debug] ( literal[string] ) identifier[self] . identifier[_run] () identifier[debug] ( literal[string] ) keyword[if] identifier[threading] . identifier[current_thread] ()== identifier[main_thread] : identifier[self] . identifier[_teardown_signals] () identifier[signal] . identifier[signal] ( identifier[signal] . identifier[SIGINT] , identifier[default_int_handler] ) identifier[self] . identifier[_on_data] = keyword[None]
def run(self, data_cb): """Run the event loop.""" if self._error: err = self._error if isinstance(self._error, KeyboardInterrupt): # KeyboardInterrupt is not destructive(it may be used in # the REPL). # After throwing KeyboardInterrupt, cleanup the _error field # so the loop may be started again self._error = None # depends on [control=['if'], data=[]] raise err # depends on [control=['if'], data=[]] self._on_data = data_cb if threading.current_thread() == main_thread: self._setup_signals([signal.SIGINT, signal.SIGTERM]) # depends on [control=['if'], data=[]] debug('Entering event loop') self._run() debug('Exited event loop') if threading.current_thread() == main_thread: self._teardown_signals() signal.signal(signal.SIGINT, default_int_handler) # depends on [control=['if'], data=[]] self._on_data = None
def print_all(msg): """Print all objects. Print a table of all active libvips objects. Handy for debugging. """ gc.collect() logger.debug(msg) vips_lib.vips_object_print_all() logger.debug()
def function[print_all, parameter[msg]]: constant[Print all objects. Print a table of all active libvips objects. Handy for debugging. ] call[name[gc].collect, parameter[]] call[name[logger].debug, parameter[name[msg]]] call[name[vips_lib].vips_object_print_all, parameter[]] call[name[logger].debug, parameter[]]
keyword[def] identifier[print_all] ( identifier[msg] ): literal[string] identifier[gc] . identifier[collect] () identifier[logger] . identifier[debug] ( identifier[msg] ) identifier[vips_lib] . identifier[vips_object_print_all] () identifier[logger] . identifier[debug] ()
def print_all(msg): """Print all objects. Print a table of all active libvips objects. Handy for debugging. """ gc.collect() logger.debug(msg) vips_lib.vips_object_print_all() logger.debug()
def index(self, item): """ Return the 0-based position of `item` in this IpRange. >>> r = IpRange('127.0.0.1', '127.255.255.255') >>> r.index('127.0.0.1') 0 >>> r.index('127.255.255.255') 16777214 >>> r.index('10.0.0.1') Traceback (most recent call last): ... ValueError: 10.0.0.1 is not in range :param item: Dotted-quad ip address. :type item: str :returns: Index of ip address in range """ item = self._cast(item) offset = item - self.startIp if offset >= 0 and offset < self._len: return offset raise ValueError('%s is not in range' % self._ipver.long2ip(item))
def function[index, parameter[self, item]]: constant[ Return the 0-based position of `item` in this IpRange. >>> r = IpRange('127.0.0.1', '127.255.255.255') >>> r.index('127.0.0.1') 0 >>> r.index('127.255.255.255') 16777214 >>> r.index('10.0.0.1') Traceback (most recent call last): ... ValueError: 10.0.0.1 is not in range :param item: Dotted-quad ip address. :type item: str :returns: Index of ip address in range ] variable[item] assign[=] call[name[self]._cast, parameter[name[item]]] variable[offset] assign[=] binary_operation[name[item] - name[self].startIp] if <ast.BoolOp object at 0x7da207f037f0> begin[:] return[name[offset]] <ast.Raise object at 0x7da207f03820>
keyword[def] identifier[index] ( identifier[self] , identifier[item] ): literal[string] identifier[item] = identifier[self] . identifier[_cast] ( identifier[item] ) identifier[offset] = identifier[item] - identifier[self] . identifier[startIp] keyword[if] identifier[offset] >= literal[int] keyword[and] identifier[offset] < identifier[self] . identifier[_len] : keyword[return] identifier[offset] keyword[raise] identifier[ValueError] ( literal[string] % identifier[self] . identifier[_ipver] . identifier[long2ip] ( identifier[item] ))
def index(self, item): """ Return the 0-based position of `item` in this IpRange. >>> r = IpRange('127.0.0.1', '127.255.255.255') >>> r.index('127.0.0.1') 0 >>> r.index('127.255.255.255') 16777214 >>> r.index('10.0.0.1') Traceback (most recent call last): ... ValueError: 10.0.0.1 is not in range :param item: Dotted-quad ip address. :type item: str :returns: Index of ip address in range """ item = self._cast(item) offset = item - self.startIp if offset >= 0 and offset < self._len: return offset # depends on [control=['if'], data=[]] raise ValueError('%s is not in range' % self._ipver.long2ip(item))
def get_workflows(): """Returns a mapping of id->workflow """ wftool = api.get_tool("portal_workflow") wfs = {} for wfid in wftool.objectIds(): wf = wftool.getWorkflowById(wfid) if hasattr(aq_base(wf), "updateRoleMappingsFor"): wfs[wfid] = wf return wfs
def function[get_workflows, parameter[]]: constant[Returns a mapping of id->workflow ] variable[wftool] assign[=] call[name[api].get_tool, parameter[constant[portal_workflow]]] variable[wfs] assign[=] dictionary[[], []] for taget[name[wfid]] in starred[call[name[wftool].objectIds, parameter[]]] begin[:] variable[wf] assign[=] call[name[wftool].getWorkflowById, parameter[name[wfid]]] if call[name[hasattr], parameter[call[name[aq_base], parameter[name[wf]]], constant[updateRoleMappingsFor]]] begin[:] call[name[wfs]][name[wfid]] assign[=] name[wf] return[name[wfs]]
keyword[def] identifier[get_workflows] (): literal[string] identifier[wftool] = identifier[api] . identifier[get_tool] ( literal[string] ) identifier[wfs] ={} keyword[for] identifier[wfid] keyword[in] identifier[wftool] . identifier[objectIds] (): identifier[wf] = identifier[wftool] . identifier[getWorkflowById] ( identifier[wfid] ) keyword[if] identifier[hasattr] ( identifier[aq_base] ( identifier[wf] ), literal[string] ): identifier[wfs] [ identifier[wfid] ]= identifier[wf] keyword[return] identifier[wfs]
def get_workflows(): """Returns a mapping of id->workflow """ wftool = api.get_tool('portal_workflow') wfs = {} for wfid in wftool.objectIds(): wf = wftool.getWorkflowById(wfid) if hasattr(aq_base(wf), 'updateRoleMappingsFor'): wfs[wfid] = wf # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['wfid']] return wfs
def matches(self, other): """Do a loose equivalency test suitable for comparing page names. *other* can be any string-like object, including :class:`.Wikicode`, or an iterable of these. This operation is symmetric; both sides are adjusted. Specifically, whitespace and markup is stripped and the first letter's case is normalized. Typical usage is ``if template.name.matches("stub"): ...``. """ cmp = lambda a, b: (a[0].upper() + a[1:] == b[0].upper() + b[1:] if a and b else a == b) this = self.strip_code().strip() if isinstance(other, (str, bytes, Wikicode, Node)): that = parse_anything(other).strip_code().strip() return cmp(this, that) for obj in other: that = parse_anything(obj).strip_code().strip() if cmp(this, that): return True return False
def function[matches, parameter[self, other]]: constant[Do a loose equivalency test suitable for comparing page names. *other* can be any string-like object, including :class:`.Wikicode`, or an iterable of these. This operation is symmetric; both sides are adjusted. Specifically, whitespace and markup is stripped and the first letter's case is normalized. Typical usage is ``if template.name.matches("stub"): ...``. ] variable[cmp] assign[=] <ast.Lambda object at 0x7da18bcc8ca0> variable[this] assign[=] call[call[name[self].strip_code, parameter[]].strip, parameter[]] if call[name[isinstance], parameter[name[other], tuple[[<ast.Name object at 0x7da18bcc8b50>, <ast.Name object at 0x7da18bcca680>, <ast.Name object at 0x7da18bcc9120>, <ast.Name object at 0x7da18bcc8df0>]]]] begin[:] variable[that] assign[=] call[call[call[name[parse_anything], parameter[name[other]]].strip_code, parameter[]].strip, parameter[]] return[call[name[cmp], parameter[name[this], name[that]]]] for taget[name[obj]] in starred[name[other]] begin[:] variable[that] assign[=] call[call[call[name[parse_anything], parameter[name[obj]]].strip_code, parameter[]].strip, parameter[]] if call[name[cmp], parameter[name[this], name[that]]] begin[:] return[constant[True]] return[constant[False]]
keyword[def] identifier[matches] ( identifier[self] , identifier[other] ): literal[string] identifier[cmp] = keyword[lambda] identifier[a] , identifier[b] :( identifier[a] [ literal[int] ]. identifier[upper] ()+ identifier[a] [ literal[int] :]== identifier[b] [ literal[int] ]. identifier[upper] ()+ identifier[b] [ literal[int] :] keyword[if] identifier[a] keyword[and] identifier[b] keyword[else] identifier[a] == identifier[b] ) identifier[this] = identifier[self] . identifier[strip_code] (). identifier[strip] () keyword[if] identifier[isinstance] ( identifier[other] ,( identifier[str] , identifier[bytes] , identifier[Wikicode] , identifier[Node] )): identifier[that] = identifier[parse_anything] ( identifier[other] ). identifier[strip_code] (). identifier[strip] () keyword[return] identifier[cmp] ( identifier[this] , identifier[that] ) keyword[for] identifier[obj] keyword[in] identifier[other] : identifier[that] = identifier[parse_anything] ( identifier[obj] ). identifier[strip_code] (). identifier[strip] () keyword[if] identifier[cmp] ( identifier[this] , identifier[that] ): keyword[return] keyword[True] keyword[return] keyword[False]
def matches(self, other): """Do a loose equivalency test suitable for comparing page names. *other* can be any string-like object, including :class:`.Wikicode`, or an iterable of these. This operation is symmetric; both sides are adjusted. Specifically, whitespace and markup is stripped and the first letter's case is normalized. Typical usage is ``if template.name.matches("stub"): ...``. """ cmp = lambda a, b: a[0].upper() + a[1:] == b[0].upper() + b[1:] if a and b else a == b this = self.strip_code().strip() if isinstance(other, (str, bytes, Wikicode, Node)): that = parse_anything(other).strip_code().strip() return cmp(this, that) # depends on [control=['if'], data=[]] for obj in other: that = parse_anything(obj).strip_code().strip() if cmp(this, that): return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['obj']] return False
def _save_payload(self, files, directories, links): ''' Save payload (unmanaged files) :param files: :param directories: :param links: :return: ''' idx = 0 for p_type, p_list in (('f', files), ('d', directories), ('l', links,),): for p_obj in p_list: stats = os.stat(p_obj) payload = PayloadFile() payload.id = idx payload.path = p_obj payload.p_type = p_type payload.mode = stats.st_mode payload.uid = stats.st_uid payload.gid = stats.st_gid payload.p_size = stats.st_size payload.atime = stats.st_atime payload.mtime = stats.st_mtime payload.ctime = stats.st_ctime idx += 1 self.db.store(payload)
def function[_save_payload, parameter[self, files, directories, links]]: constant[ Save payload (unmanaged files) :param files: :param directories: :param links: :return: ] variable[idx] assign[=] constant[0] for taget[tuple[[<ast.Name object at 0x7da20c9936a0>, <ast.Name object at 0x7da20c9922f0>]]] in starred[tuple[[<ast.Tuple object at 0x7da20c993910>, <ast.Tuple object at 0x7da20c991720>, <ast.Tuple object at 0x7da20c990a90>]]] begin[:] for taget[name[p_obj]] in starred[name[p_list]] begin[:] variable[stats] assign[=] call[name[os].stat, parameter[name[p_obj]]] variable[payload] assign[=] call[name[PayloadFile], parameter[]] name[payload].id assign[=] name[idx] name[payload].path assign[=] name[p_obj] name[payload].p_type assign[=] name[p_type] name[payload].mode assign[=] name[stats].st_mode name[payload].uid assign[=] name[stats].st_uid name[payload].gid assign[=] name[stats].st_gid name[payload].p_size assign[=] name[stats].st_size name[payload].atime assign[=] name[stats].st_atime name[payload].mtime assign[=] name[stats].st_mtime name[payload].ctime assign[=] name[stats].st_ctime <ast.AugAssign object at 0x7da20c76c8e0> call[name[self].db.store, parameter[name[payload]]]
keyword[def] identifier[_save_payload] ( identifier[self] , identifier[files] , identifier[directories] , identifier[links] ): literal[string] identifier[idx] = literal[int] keyword[for] identifier[p_type] , identifier[p_list] keyword[in] (( literal[string] , identifier[files] ),( literal[string] , identifier[directories] ),( literal[string] , identifier[links] ,),): keyword[for] identifier[p_obj] keyword[in] identifier[p_list] : identifier[stats] = identifier[os] . identifier[stat] ( identifier[p_obj] ) identifier[payload] = identifier[PayloadFile] () identifier[payload] . identifier[id] = identifier[idx] identifier[payload] . identifier[path] = identifier[p_obj] identifier[payload] . identifier[p_type] = identifier[p_type] identifier[payload] . identifier[mode] = identifier[stats] . identifier[st_mode] identifier[payload] . identifier[uid] = identifier[stats] . identifier[st_uid] identifier[payload] . identifier[gid] = identifier[stats] . identifier[st_gid] identifier[payload] . identifier[p_size] = identifier[stats] . identifier[st_size] identifier[payload] . identifier[atime] = identifier[stats] . identifier[st_atime] identifier[payload] . identifier[mtime] = identifier[stats] . identifier[st_mtime] identifier[payload] . identifier[ctime] = identifier[stats] . identifier[st_ctime] identifier[idx] += literal[int] identifier[self] . identifier[db] . identifier[store] ( identifier[payload] )
def _save_payload(self, files, directories, links): """ Save payload (unmanaged files) :param files: :param directories: :param links: :return: """ idx = 0 for (p_type, p_list) in (('f', files), ('d', directories), ('l', links)): for p_obj in p_list: stats = os.stat(p_obj) payload = PayloadFile() payload.id = idx payload.path = p_obj payload.p_type = p_type payload.mode = stats.st_mode payload.uid = stats.st_uid payload.gid = stats.st_gid payload.p_size = stats.st_size payload.atime = stats.st_atime payload.mtime = stats.st_mtime payload.ctime = stats.st_ctime idx += 1 self.db.store(payload) # depends on [control=['for'], data=['p_obj']] # depends on [control=['for'], data=[]]
def expand_filenames(self, filenames): """ Expand a list of filenames using environment variables, followed by expansion of shell-style wildcards. """ results = [] for filename in filenames: result = filename if "$" in filename: template = Template(filename) result = template.substitute(**self.environment) logging.debug( "Expanding {} to {}.".format(filename, result)) if any([pattern in result for pattern in "*[]?"]): expanded = glob.glob(result) if len(expanded) > 0: result = expanded else: result = "NONEXISTENT" if isinstance(result, list): results.extend(result) else: results.append(result) return sorted(list(set(results)))
def function[expand_filenames, parameter[self, filenames]]: constant[ Expand a list of filenames using environment variables, followed by expansion of shell-style wildcards. ] variable[results] assign[=] list[[]] for taget[name[filename]] in starred[name[filenames]] begin[:] variable[result] assign[=] name[filename] if compare[constant[$] in name[filename]] begin[:] variable[template] assign[=] call[name[Template], parameter[name[filename]]] variable[result] assign[=] call[name[template].substitute, parameter[]] call[name[logging].debug, parameter[call[constant[Expanding {} to {}.].format, parameter[name[filename], name[result]]]]] if call[name[any], parameter[<ast.ListComp object at 0x7da1b14c6350>]] begin[:] variable[expanded] assign[=] call[name[glob].glob, parameter[name[result]]] if compare[call[name[len], parameter[name[expanded]]] greater[>] constant[0]] begin[:] variable[result] assign[=] name[expanded] if call[name[isinstance], parameter[name[result], name[list]]] begin[:] call[name[results].extend, parameter[name[result]]] return[call[name[sorted], parameter[call[name[list], parameter[call[name[set], parameter[name[results]]]]]]]]
keyword[def] identifier[expand_filenames] ( identifier[self] , identifier[filenames] ): literal[string] identifier[results] =[] keyword[for] identifier[filename] keyword[in] identifier[filenames] : identifier[result] = identifier[filename] keyword[if] literal[string] keyword[in] identifier[filename] : identifier[template] = identifier[Template] ( identifier[filename] ) identifier[result] = identifier[template] . identifier[substitute] (** identifier[self] . identifier[environment] ) identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[filename] , identifier[result] )) keyword[if] identifier[any] ([ identifier[pattern] keyword[in] identifier[result] keyword[for] identifier[pattern] keyword[in] literal[string] ]): identifier[expanded] = identifier[glob] . identifier[glob] ( identifier[result] ) keyword[if] identifier[len] ( identifier[expanded] )> literal[int] : identifier[result] = identifier[expanded] keyword[else] : identifier[result] = literal[string] keyword[if] identifier[isinstance] ( identifier[result] , identifier[list] ): identifier[results] . identifier[extend] ( identifier[result] ) keyword[else] : identifier[results] . identifier[append] ( identifier[result] ) keyword[return] identifier[sorted] ( identifier[list] ( identifier[set] ( identifier[results] )))
def expand_filenames(self, filenames): """ Expand a list of filenames using environment variables, followed by expansion of shell-style wildcards. """ results = [] for filename in filenames: result = filename if '$' in filename: template = Template(filename) result = template.substitute(**self.environment) logging.debug('Expanding {} to {}.'.format(filename, result)) # depends on [control=['if'], data=['filename']] if any([pattern in result for pattern in '*[]?']): expanded = glob.glob(result) if len(expanded) > 0: result = expanded # depends on [control=['if'], data=[]] else: result = 'NONEXISTENT' # depends on [control=['if'], data=[]] if isinstance(result, list): results.extend(result) # depends on [control=['if'], data=[]] else: results.append(result) # depends on [control=['for'], data=['filename']] return sorted(list(set(results)))
def require_prebuilt_dist(func): """Decorator for ToolchainCL methods. If present, the method will automatically make sure a dist has been built before continuing or, if no dists are present or can be obtained, will raise an error. """ @wraps(func) def wrapper_func(self, args): ctx = self.ctx ctx.set_archs(self._archs) ctx.prepare_build_environment(user_sdk_dir=self.sdk_dir, user_ndk_dir=self.ndk_dir, user_android_api=self.android_api, user_ndk_api=self.ndk_api) dist = self._dist if dist.needs_build: if dist.folder_exists(): # possible if the dist is being replaced dist.delete() info_notify('No dist exists that meets your requirements, ' 'so one will be built.') build_dist_from_args(ctx, dist, args) func(self, args) return wrapper_func
def function[require_prebuilt_dist, parameter[func]]: constant[Decorator for ToolchainCL methods. If present, the method will automatically make sure a dist has been built before continuing or, if no dists are present or can be obtained, will raise an error. ] def function[wrapper_func, parameter[self, args]]: variable[ctx] assign[=] name[self].ctx call[name[ctx].set_archs, parameter[name[self]._archs]] call[name[ctx].prepare_build_environment, parameter[]] variable[dist] assign[=] name[self]._dist if name[dist].needs_build begin[:] if call[name[dist].folder_exists, parameter[]] begin[:] call[name[dist].delete, parameter[]] call[name[info_notify], parameter[constant[No dist exists that meets your requirements, so one will be built.]]] call[name[build_dist_from_args], parameter[name[ctx], name[dist], name[args]]] call[name[func], parameter[name[self], name[args]]] return[name[wrapper_func]]
keyword[def] identifier[require_prebuilt_dist] ( identifier[func] ): literal[string] @ identifier[wraps] ( identifier[func] ) keyword[def] identifier[wrapper_func] ( identifier[self] , identifier[args] ): identifier[ctx] = identifier[self] . identifier[ctx] identifier[ctx] . identifier[set_archs] ( identifier[self] . identifier[_archs] ) identifier[ctx] . identifier[prepare_build_environment] ( identifier[user_sdk_dir] = identifier[self] . identifier[sdk_dir] , identifier[user_ndk_dir] = identifier[self] . identifier[ndk_dir] , identifier[user_android_api] = identifier[self] . identifier[android_api] , identifier[user_ndk_api] = identifier[self] . identifier[ndk_api] ) identifier[dist] = identifier[self] . identifier[_dist] keyword[if] identifier[dist] . identifier[needs_build] : keyword[if] identifier[dist] . identifier[folder_exists] (): identifier[dist] . identifier[delete] () identifier[info_notify] ( literal[string] literal[string] ) identifier[build_dist_from_args] ( identifier[ctx] , identifier[dist] , identifier[args] ) identifier[func] ( identifier[self] , identifier[args] ) keyword[return] identifier[wrapper_func]
def require_prebuilt_dist(func): """Decorator for ToolchainCL methods. If present, the method will automatically make sure a dist has been built before continuing or, if no dists are present or can be obtained, will raise an error. """ @wraps(func) def wrapper_func(self, args): ctx = self.ctx ctx.set_archs(self._archs) ctx.prepare_build_environment(user_sdk_dir=self.sdk_dir, user_ndk_dir=self.ndk_dir, user_android_api=self.android_api, user_ndk_api=self.ndk_api) dist = self._dist if dist.needs_build: if dist.folder_exists(): # possible if the dist is being replaced dist.delete() # depends on [control=['if'], data=[]] info_notify('No dist exists that meets your requirements, so one will be built.') build_dist_from_args(ctx, dist, args) # depends on [control=['if'], data=[]] func(self, args) return wrapper_func
def get_for_update(self, key): """ Locks the key and then gets and returns the value to which the specified key is mapped. Lock will be released at the end of the transaction (either commit or rollback). :param key: (object), the specified key. :return: (object), the value for the specified key. .. seealso:: :func:`Map.get(key) <hazelcast.proxy.map.Map.get>` """ check_not_none(key, "key can't be none") return self._encode_invoke(transactional_map_get_for_update_codec, key=self._to_data(key))
def function[get_for_update, parameter[self, key]]: constant[ Locks the key and then gets and returns the value to which the specified key is mapped. Lock will be released at the end of the transaction (either commit or rollback). :param key: (object), the specified key. :return: (object), the value for the specified key. .. seealso:: :func:`Map.get(key) <hazelcast.proxy.map.Map.get>` ] call[name[check_not_none], parameter[name[key], constant[key can't be none]]] return[call[name[self]._encode_invoke, parameter[name[transactional_map_get_for_update_codec]]]]
keyword[def] identifier[get_for_update] ( identifier[self] , identifier[key] ): literal[string] identifier[check_not_none] ( identifier[key] , literal[string] ) keyword[return] identifier[self] . identifier[_encode_invoke] ( identifier[transactional_map_get_for_update_codec] , identifier[key] = identifier[self] . identifier[_to_data] ( identifier[key] ))
def get_for_update(self, key): """ Locks the key and then gets and returns the value to which the specified key is mapped. Lock will be released at the end of the transaction (either commit or rollback). :param key: (object), the specified key. :return: (object), the value for the specified key. .. seealso:: :func:`Map.get(key) <hazelcast.proxy.map.Map.get>` """ check_not_none(key, "key can't be none") return self._encode_invoke(transactional_map_get_for_update_codec, key=self._to_data(key))
def DeleteCronJob(self, cronjob_id): """Deletes a cronjob along with all its runs.""" if cronjob_id not in self.cronjobs: raise db.UnknownCronJobError("Cron job %s not known." % cronjob_id) del self.cronjobs[cronjob_id] try: del self.cronjob_leases[cronjob_id] except KeyError: pass for job_run in self.ReadCronJobRuns(cronjob_id): del self.cronjob_runs[(cronjob_id, job_run.run_id)]
def function[DeleteCronJob, parameter[self, cronjob_id]]: constant[Deletes a cronjob along with all its runs.] if compare[name[cronjob_id] <ast.NotIn object at 0x7da2590d7190> name[self].cronjobs] begin[:] <ast.Raise object at 0x7da1b1d92770> <ast.Delete object at 0x7da1b1d93f40> <ast.Try object at 0x7da1b1d90220> for taget[name[job_run]] in starred[call[name[self].ReadCronJobRuns, parameter[name[cronjob_id]]]] begin[:] <ast.Delete object at 0x7da1b1d92b30>
keyword[def] identifier[DeleteCronJob] ( identifier[self] , identifier[cronjob_id] ): literal[string] keyword[if] identifier[cronjob_id] keyword[not] keyword[in] identifier[self] . identifier[cronjobs] : keyword[raise] identifier[db] . identifier[UnknownCronJobError] ( literal[string] % identifier[cronjob_id] ) keyword[del] identifier[self] . identifier[cronjobs] [ identifier[cronjob_id] ] keyword[try] : keyword[del] identifier[self] . identifier[cronjob_leases] [ identifier[cronjob_id] ] keyword[except] identifier[KeyError] : keyword[pass] keyword[for] identifier[job_run] keyword[in] identifier[self] . identifier[ReadCronJobRuns] ( identifier[cronjob_id] ): keyword[del] identifier[self] . identifier[cronjob_runs] [( identifier[cronjob_id] , identifier[job_run] . identifier[run_id] )]
def DeleteCronJob(self, cronjob_id): """Deletes a cronjob along with all its runs.""" if cronjob_id not in self.cronjobs: raise db.UnknownCronJobError('Cron job %s not known.' % cronjob_id) # depends on [control=['if'], data=['cronjob_id']] del self.cronjobs[cronjob_id] try: del self.cronjob_leases[cronjob_id] # depends on [control=['try'], data=[]] except KeyError: pass # depends on [control=['except'], data=[]] for job_run in self.ReadCronJobRuns(cronjob_id): del self.cronjob_runs[cronjob_id, job_run.run_id] # depends on [control=['for'], data=['job_run']]
def receive(self): ''' Return the message received and the address. ''' try: msg = next(self.consumer) except ValueError as error: log.error('Received kafka error: %s', error, exc_info=True) raise ListenerException(error) log_source = msg.key try: decoded = json.loads(msg.value.decode('utf-8')) except ValueError: log.error('Not in json format: %s', msg.value.decode('utf-8')) return '', '' log_message = decoded.get('message') log.debug('[%s] Received %s from %s', log_message, log_source, time.time()) return log_message, log_source
def function[receive, parameter[self]]: constant[ Return the message received and the address. ] <ast.Try object at 0x7da1b15f1600> variable[log_source] assign[=] name[msg].key <ast.Try object at 0x7da1b15f2620> variable[log_message] assign[=] call[name[decoded].get, parameter[constant[message]]] call[name[log].debug, parameter[constant[[%s] Received %s from %s], name[log_message], name[log_source], call[name[time].time, parameter[]]]] return[tuple[[<ast.Name object at 0x7da1b15f2e90>, <ast.Name object at 0x7da1b15f2ef0>]]]
keyword[def] identifier[receive] ( identifier[self] ): literal[string] keyword[try] : identifier[msg] = identifier[next] ( identifier[self] . identifier[consumer] ) keyword[except] identifier[ValueError] keyword[as] identifier[error] : identifier[log] . identifier[error] ( literal[string] , identifier[error] , identifier[exc_info] = keyword[True] ) keyword[raise] identifier[ListenerException] ( identifier[error] ) identifier[log_source] = identifier[msg] . identifier[key] keyword[try] : identifier[decoded] = identifier[json] . identifier[loads] ( identifier[msg] . identifier[value] . identifier[decode] ( literal[string] )) keyword[except] identifier[ValueError] : identifier[log] . identifier[error] ( literal[string] , identifier[msg] . identifier[value] . identifier[decode] ( literal[string] )) keyword[return] literal[string] , literal[string] identifier[log_message] = identifier[decoded] . identifier[get] ( literal[string] ) identifier[log] . identifier[debug] ( literal[string] , identifier[log_message] , identifier[log_source] , identifier[time] . identifier[time] ()) keyword[return] identifier[log_message] , identifier[log_source]
def receive(self): """ Return the message received and the address. """ try: msg = next(self.consumer) # depends on [control=['try'], data=[]] except ValueError as error: log.error('Received kafka error: %s', error, exc_info=True) raise ListenerException(error) # depends on [control=['except'], data=['error']] log_source = msg.key try: decoded = json.loads(msg.value.decode('utf-8')) # depends on [control=['try'], data=[]] except ValueError: log.error('Not in json format: %s', msg.value.decode('utf-8')) return ('', '') # depends on [control=['except'], data=[]] log_message = decoded.get('message') log.debug('[%s] Received %s from %s', log_message, log_source, time.time()) return (log_message, log_source)
def inten(function): "Decorator. Attempts to convert return value to int" def wrapper(*args, **kwargs): return coerce_to_int(function(*args, **kwargs)) return wrapper
def function[inten, parameter[function]]: constant[Decorator. Attempts to convert return value to int] def function[wrapper, parameter[]]: return[call[name[coerce_to_int], parameter[call[name[function], parameter[<ast.Starred object at 0x7da18eb572b0>]]]]] return[name[wrapper]]
keyword[def] identifier[inten] ( identifier[function] ): literal[string] keyword[def] identifier[wrapper] (* identifier[args] ,** identifier[kwargs] ): keyword[return] identifier[coerce_to_int] ( identifier[function] (* identifier[args] ,** identifier[kwargs] )) keyword[return] identifier[wrapper]
def inten(function): """Decorator. Attempts to convert return value to int""" def wrapper(*args, **kwargs): return coerce_to_int(function(*args, **kwargs)) return wrapper
def lsof(name): ''' Retrieve the lsof information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.lsof apache2 ''' sanitize_name = six.text_type(name) lsof_infos = __salt__['cmd.run']("lsof -c " + sanitize_name) ret = [] ret.extend([sanitize_name, lsof_infos]) return ret
def function[lsof, parameter[name]]: constant[ Retrieve the lsof information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.lsof apache2 ] variable[sanitize_name] assign[=] call[name[six].text_type, parameter[name[name]]] variable[lsof_infos] assign[=] call[call[name[__salt__]][constant[cmd.run]], parameter[binary_operation[constant[lsof -c ] + name[sanitize_name]]]] variable[ret] assign[=] list[[]] call[name[ret].extend, parameter[list[[<ast.Name object at 0x7da18dc065c0>, <ast.Name object at 0x7da18dc04340>]]]] return[name[ret]]
keyword[def] identifier[lsof] ( identifier[name] ): literal[string] identifier[sanitize_name] = identifier[six] . identifier[text_type] ( identifier[name] ) identifier[lsof_infos] = identifier[__salt__] [ literal[string] ]( literal[string] + identifier[sanitize_name] ) identifier[ret] =[] identifier[ret] . identifier[extend] ([ identifier[sanitize_name] , identifier[lsof_infos] ]) keyword[return] identifier[ret]
def lsof(name): """ Retrieve the lsof information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.lsof apache2 """ sanitize_name = six.text_type(name) lsof_infos = __salt__['cmd.run']('lsof -c ' + sanitize_name) ret = [] ret.extend([sanitize_name, lsof_infos]) return ret
def subsets_of_fileinfo_from_txt(filename): """Returns a dictionary with subsets of FileInfo instances from a TXT file. Each subset of files must be preceded by a line: @ <number> <label> where <number> indicates the number of files in that subset, and <label> is a label for that subset. Any additional text beyond <label> in the same line is ignored. Note that blank lines or lines starting by the hash symbol are also ignored. The name of the files comprising each subset will be obtained from the first contiguous character string in every line (thus, the rest of the line will be discarded). Parameters ---------- filename : string Name of a TXT file containing a list of FITS files grouped in different subsets by the @ symbol. Returns ------- dict_of_subsets_of_fileinfo : dictionary Dictionary containing as many entries as different subsets of files available. Each value of the dictionary is a dictionary with a label (sequential number starting at zero) and the list of FileInfo instances within subset. """ # check for input file if not os.path.isfile(filename): raise ValueError("File " + filename + " not found!") # read input file with open(filename) as f: file_content = f.read().splitlines() # obtain the different subsets of files dict_of_subsets_of_fileinfo = {} label = None sublist_of_fileinfo = [] idict = 0 ifiles = 0 nfiles = 0 sublist_finished = True for line in file_content: if len(line) > 0: if line[0] != '#': if label is None: if line[0] == "@": nfiles = int(line[1:].split()[0]) label = line[1:].split()[1] sublist_of_fileinfo = [] ifiles = 0 sublist_finished = False else: raise ValueError("Expected @ symbol not found!") else: if line[0] == "@": raise ValueError("Unexpected @ symbol found!") tmplist = line.split() tmpfile = tmplist[0] if len(tmplist) > 1: tmpinfo = tmplist[1:] else: tmpinfo = None if not os.path.isfile(tmpfile): raise ValueError("File " + tmpfile + " not found!") sublist_of_fileinfo.append(FileInfo(tmpfile, tmpinfo)) ifiles += 1 if ifiles == nfiles: dict_of_subsets_of_fileinfo[idict] = {} tmpdict = dict_of_subsets_of_fileinfo[idict] tmpdict['label'] = label tmpdict['list_of_fileinfo'] = sublist_of_fileinfo idict += 1 label = None sublist_of_fileinfo = [] ifiles = 0 sublist_finished = True if not sublist_finished: raise ValueError("Unexpected end of sublist of files.") return dict_of_subsets_of_fileinfo
def function[subsets_of_fileinfo_from_txt, parameter[filename]]: constant[Returns a dictionary with subsets of FileInfo instances from a TXT file. Each subset of files must be preceded by a line: @ <number> <label> where <number> indicates the number of files in that subset, and <label> is a label for that subset. Any additional text beyond <label> in the same line is ignored. Note that blank lines or lines starting by the hash symbol are also ignored. The name of the files comprising each subset will be obtained from the first contiguous character string in every line (thus, the rest of the line will be discarded). Parameters ---------- filename : string Name of a TXT file containing a list of FITS files grouped in different subsets by the @ symbol. Returns ------- dict_of_subsets_of_fileinfo : dictionary Dictionary containing as many entries as different subsets of files available. Each value of the dictionary is a dictionary with a label (sequential number starting at zero) and the list of FileInfo instances within subset. ] if <ast.UnaryOp object at 0x7da18dc04dc0> begin[:] <ast.Raise object at 0x7da18dc04a30> with call[name[open], parameter[name[filename]]] begin[:] variable[file_content] assign[=] call[call[name[f].read, parameter[]].splitlines, parameter[]] variable[dict_of_subsets_of_fileinfo] assign[=] dictionary[[], []] variable[label] assign[=] constant[None] variable[sublist_of_fileinfo] assign[=] list[[]] variable[idict] assign[=] constant[0] variable[ifiles] assign[=] constant[0] variable[nfiles] assign[=] constant[0] variable[sublist_finished] assign[=] constant[True] for taget[name[line]] in starred[name[file_content]] begin[:] if compare[call[name[len], parameter[name[line]]] greater[>] constant[0]] begin[:] if compare[call[name[line]][constant[0]] not_equal[!=] constant[#]] begin[:] if compare[name[label] is constant[None]] begin[:] if compare[call[name[line]][constant[0]] equal[==] constant[@]] begin[:] variable[nfiles] assign[=] call[name[int], parameter[call[call[call[name[line]][<ast.Slice object at 0x7da18dc054b0>].split, parameter[]]][constant[0]]]] variable[label] assign[=] call[call[call[name[line]][<ast.Slice object at 0x7da18dc047c0>].split, parameter[]]][constant[1]] variable[sublist_of_fileinfo] assign[=] list[[]] variable[ifiles] assign[=] constant[0] variable[sublist_finished] assign[=] constant[False] if <ast.UnaryOp object at 0x7da1b24e66e0> begin[:] <ast.Raise object at 0x7da1b24e6980> return[name[dict_of_subsets_of_fileinfo]]
keyword[def] identifier[subsets_of_fileinfo_from_txt] ( identifier[filename] ): literal[string] keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[filename] ): keyword[raise] identifier[ValueError] ( literal[string] + identifier[filename] + literal[string] ) keyword[with] identifier[open] ( identifier[filename] ) keyword[as] identifier[f] : identifier[file_content] = identifier[f] . identifier[read] (). identifier[splitlines] () identifier[dict_of_subsets_of_fileinfo] ={} identifier[label] = keyword[None] identifier[sublist_of_fileinfo] =[] identifier[idict] = literal[int] identifier[ifiles] = literal[int] identifier[nfiles] = literal[int] identifier[sublist_finished] = keyword[True] keyword[for] identifier[line] keyword[in] identifier[file_content] : keyword[if] identifier[len] ( identifier[line] )> literal[int] : keyword[if] identifier[line] [ literal[int] ]!= literal[string] : keyword[if] identifier[label] keyword[is] keyword[None] : keyword[if] identifier[line] [ literal[int] ]== literal[string] : identifier[nfiles] = identifier[int] ( identifier[line] [ literal[int] :]. identifier[split] ()[ literal[int] ]) identifier[label] = identifier[line] [ literal[int] :]. identifier[split] ()[ literal[int] ] identifier[sublist_of_fileinfo] =[] identifier[ifiles] = literal[int] identifier[sublist_finished] = keyword[False] keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[else] : keyword[if] identifier[line] [ literal[int] ]== literal[string] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[tmplist] = identifier[line] . identifier[split] () identifier[tmpfile] = identifier[tmplist] [ literal[int] ] keyword[if] identifier[len] ( identifier[tmplist] )> literal[int] : identifier[tmpinfo] = identifier[tmplist] [ literal[int] :] keyword[else] : identifier[tmpinfo] = keyword[None] keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[tmpfile] ): keyword[raise] identifier[ValueError] ( literal[string] + identifier[tmpfile] + literal[string] ) identifier[sublist_of_fileinfo] . identifier[append] ( identifier[FileInfo] ( identifier[tmpfile] , identifier[tmpinfo] )) identifier[ifiles] += literal[int] keyword[if] identifier[ifiles] == identifier[nfiles] : identifier[dict_of_subsets_of_fileinfo] [ identifier[idict] ]={} identifier[tmpdict] = identifier[dict_of_subsets_of_fileinfo] [ identifier[idict] ] identifier[tmpdict] [ literal[string] ]= identifier[label] identifier[tmpdict] [ literal[string] ]= identifier[sublist_of_fileinfo] identifier[idict] += literal[int] identifier[label] = keyword[None] identifier[sublist_of_fileinfo] =[] identifier[ifiles] = literal[int] identifier[sublist_finished] = keyword[True] keyword[if] keyword[not] identifier[sublist_finished] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[return] identifier[dict_of_subsets_of_fileinfo]
def subsets_of_fileinfo_from_txt(filename): """Returns a dictionary with subsets of FileInfo instances from a TXT file. Each subset of files must be preceded by a line: @ <number> <label> where <number> indicates the number of files in that subset, and <label> is a label for that subset. Any additional text beyond <label> in the same line is ignored. Note that blank lines or lines starting by the hash symbol are also ignored. The name of the files comprising each subset will be obtained from the first contiguous character string in every line (thus, the rest of the line will be discarded). Parameters ---------- filename : string Name of a TXT file containing a list of FITS files grouped in different subsets by the @ symbol. Returns ------- dict_of_subsets_of_fileinfo : dictionary Dictionary containing as many entries as different subsets of files available. Each value of the dictionary is a dictionary with a label (sequential number starting at zero) and the list of FileInfo instances within subset. """ # check for input file if not os.path.isfile(filename): raise ValueError('File ' + filename + ' not found!') # depends on [control=['if'], data=[]] # read input file with open(filename) as f: file_content = f.read().splitlines() # depends on [control=['with'], data=['f']] # obtain the different subsets of files dict_of_subsets_of_fileinfo = {} label = None sublist_of_fileinfo = [] idict = 0 ifiles = 0 nfiles = 0 sublist_finished = True for line in file_content: if len(line) > 0: if line[0] != '#': if label is None: if line[0] == '@': nfiles = int(line[1:].split()[0]) label = line[1:].split()[1] sublist_of_fileinfo = [] ifiles = 0 sublist_finished = False # depends on [control=['if'], data=[]] else: raise ValueError('Expected @ symbol not found!') # depends on [control=['if'], data=['label']] else: if line[0] == '@': raise ValueError('Unexpected @ symbol found!') # depends on [control=['if'], data=[]] tmplist = line.split() tmpfile = tmplist[0] if len(tmplist) > 1: tmpinfo = tmplist[1:] # depends on [control=['if'], data=[]] else: tmpinfo = None if not os.path.isfile(tmpfile): raise ValueError('File ' + tmpfile + ' not found!') # depends on [control=['if'], data=[]] sublist_of_fileinfo.append(FileInfo(tmpfile, tmpinfo)) ifiles += 1 if ifiles == nfiles: dict_of_subsets_of_fileinfo[idict] = {} tmpdict = dict_of_subsets_of_fileinfo[idict] tmpdict['label'] = label tmpdict['list_of_fileinfo'] = sublist_of_fileinfo idict += 1 label = None sublist_of_fileinfo = [] ifiles = 0 sublist_finished = True # depends on [control=['if'], data=['ifiles']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] if not sublist_finished: raise ValueError('Unexpected end of sublist of files.') # depends on [control=['if'], data=[]] return dict_of_subsets_of_fileinfo
def mavlink_packet(self, m): '''handle mavlink packets''' if m.get_type() == 'GLOBAL_POSITION_INT': if self.settings.target_system == 0 or self.settings.target_system == m.get_srcSystem(): self.packets_mytarget += 1 else: self.packets_othertarget += 1
def function[mavlink_packet, parameter[self, m]]: constant[handle mavlink packets] if compare[call[name[m].get_type, parameter[]] equal[==] constant[GLOBAL_POSITION_INT]] begin[:] if <ast.BoolOp object at 0x7da2046238e0> begin[:] <ast.AugAssign object at 0x7da204623010>
keyword[def] identifier[mavlink_packet] ( identifier[self] , identifier[m] ): literal[string] keyword[if] identifier[m] . identifier[get_type] ()== literal[string] : keyword[if] identifier[self] . identifier[settings] . identifier[target_system] == literal[int] keyword[or] identifier[self] . identifier[settings] . identifier[target_system] == identifier[m] . identifier[get_srcSystem] (): identifier[self] . identifier[packets_mytarget] += literal[int] keyword[else] : identifier[self] . identifier[packets_othertarget] += literal[int]
def mavlink_packet(self, m): """handle mavlink packets""" if m.get_type() == 'GLOBAL_POSITION_INT': if self.settings.target_system == 0 or self.settings.target_system == m.get_srcSystem(): self.packets_mytarget += 1 # depends on [control=['if'], data=[]] else: self.packets_othertarget += 1 # depends on [control=['if'], data=[]]
def switch(self, gen_mode:bool=None): "Put the model in generator mode if `gen_mode`, in critic mode otherwise." self.gen_mode = (not self.gen_mode) if gen_mode is None else gen_mode
def function[switch, parameter[self, gen_mode]]: constant[Put the model in generator mode if `gen_mode`, in critic mode otherwise.] name[self].gen_mode assign[=] <ast.IfExp object at 0x7da1b1e9b0a0>
keyword[def] identifier[switch] ( identifier[self] , identifier[gen_mode] : identifier[bool] = keyword[None] ): literal[string] identifier[self] . identifier[gen_mode] =( keyword[not] identifier[self] . identifier[gen_mode] ) keyword[if] identifier[gen_mode] keyword[is] keyword[None] keyword[else] identifier[gen_mode]
def switch(self, gen_mode: bool=None): """Put the model in generator mode if `gen_mode`, in critic mode otherwise.""" self.gen_mode = not self.gen_mode if gen_mode is None else gen_mode
def normalize_parameters(params): """**Parameters Normalization** Per `section 3.4.1.3.2`_ of the spec. For example, the list of parameters from the previous section would be normalized as follows: Encoded:: +------------------------+------------------+ | Name | Value | +------------------------+------------------+ | b5 | %3D%253D | | a3 | a | | c%40 | | | a2 | r%20b | | oauth_consumer_key | 9djdj82h48djs9d2 | | oauth_token | kkk9d7dh3k39sjv7 | | oauth_signature_method | HMAC-SHA1 | | oauth_timestamp | 137131201 | | oauth_nonce | 7d8f3e4a | | c2 | | | a3 | 2%20q | +------------------------+------------------+ Sorted:: +------------------------+------------------+ | Name | Value | +------------------------+------------------+ | a2 | r%20b | | a3 | 2%20q | | a3 | a | | b5 | %3D%253D | | c%40 | | | c2 | | | oauth_consumer_key | 9djdj82h48djs9d2 | | oauth_nonce | 7d8f3e4a | | oauth_signature_method | HMAC-SHA1 | | oauth_timestamp | 137131201 | | oauth_token | kkk9d7dh3k39sjv7 | +------------------------+------------------+ Concatenated Pairs:: +-------------------------------------+ | Name=Value | +-------------------------------------+ | a2=r%20b | | a3=2%20q | | a3=a | | b5=%3D%253D | | c%40= | | c2= | | oauth_consumer_key=9djdj82h48djs9d2 | | oauth_nonce=7d8f3e4a | | oauth_signature_method=HMAC-SHA1 | | oauth_timestamp=137131201 | | oauth_token=kkk9d7dh3k39sjv7 | +-------------------------------------+ and concatenated together into a single string (line breaks are for display purposes only):: a2=r%20b&a3=2%20q&a3=a&b5=%3D%253D&c%40=&c2=&oauth_consumer_key=9dj dj82h48djs9d2&oauth_nonce=7d8f3e4a&oauth_signature_method=HMAC-SHA1 &oauth_timestamp=137131201&oauth_token=kkk9d7dh3k39sjv7 .. _`section 3.4.1.3.2`: https://tools.ietf.org/html/rfc5849#section-3.4.1.3.2 """ # The parameters collected in `Section 3.4.1.3`_ are normalized into a # single string as follows: # # .. _`Section 3.4.1.3`: https://tools.ietf.org/html/rfc5849#section-3.4.1.3 # 1. First, the name and value of each parameter are encoded # (`Section 3.6`_). # # .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6 key_values = [(utils.escape(k), utils.escape(v)) for k, v in params] # 2. The parameters are sorted by name, using ascending byte value # ordering. If two or more parameters share the same name, they # are sorted by their value. key_values.sort() # 3. The name of each parameter is concatenated to its corresponding # value using an "=" character (ASCII code 61) as a separator, even # if the value is empty. parameter_parts = ['{0}={1}'.format(k, v) for k, v in key_values] # 4. The sorted name/value pairs are concatenated together into a # single string by using an "&" character (ASCII code 38) as # separator. return '&'.join(parameter_parts)
def function[normalize_parameters, parameter[params]]: constant[**Parameters Normalization** Per `section 3.4.1.3.2`_ of the spec. For example, the list of parameters from the previous section would be normalized as follows: Encoded:: +------------------------+------------------+ | Name | Value | +------------------------+------------------+ | b5 | %3D%253D | | a3 | a | | c%40 | | | a2 | r%20b | | oauth_consumer_key | 9djdj82h48djs9d2 | | oauth_token | kkk9d7dh3k39sjv7 | | oauth_signature_method | HMAC-SHA1 | | oauth_timestamp | 137131201 | | oauth_nonce | 7d8f3e4a | | c2 | | | a3 | 2%20q | +------------------------+------------------+ Sorted:: +------------------------+------------------+ | Name | Value | +------------------------+------------------+ | a2 | r%20b | | a3 | 2%20q | | a3 | a | | b5 | %3D%253D | | c%40 | | | c2 | | | oauth_consumer_key | 9djdj82h48djs9d2 | | oauth_nonce | 7d8f3e4a | | oauth_signature_method | HMAC-SHA1 | | oauth_timestamp | 137131201 | | oauth_token | kkk9d7dh3k39sjv7 | +------------------------+------------------+ Concatenated Pairs:: +-------------------------------------+ | Name=Value | +-------------------------------------+ | a2=r%20b | | a3=2%20q | | a3=a | | b5=%3D%253D | | c%40= | | c2= | | oauth_consumer_key=9djdj82h48djs9d2 | | oauth_nonce=7d8f3e4a | | oauth_signature_method=HMAC-SHA1 | | oauth_timestamp=137131201 | | oauth_token=kkk9d7dh3k39sjv7 | +-------------------------------------+ and concatenated together into a single string (line breaks are for display purposes only):: a2=r%20b&a3=2%20q&a3=a&b5=%3D%253D&c%40=&c2=&oauth_consumer_key=9dj dj82h48djs9d2&oauth_nonce=7d8f3e4a&oauth_signature_method=HMAC-SHA1 &oauth_timestamp=137131201&oauth_token=kkk9d7dh3k39sjv7 .. _`section 3.4.1.3.2`: https://tools.ietf.org/html/rfc5849#section-3.4.1.3.2 ] variable[key_values] assign[=] <ast.ListComp object at 0x7da1b175d6f0> call[name[key_values].sort, parameter[]] variable[parameter_parts] assign[=] <ast.ListComp object at 0x7da1b175cb50> return[call[constant[&].join, parameter[name[parameter_parts]]]]
keyword[def] identifier[normalize_parameters] ( identifier[params] ): literal[string] identifier[key_values] =[( identifier[utils] . identifier[escape] ( identifier[k] ), identifier[utils] . identifier[escape] ( identifier[v] )) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[params] ] identifier[key_values] . identifier[sort] () identifier[parameter_parts] =[ literal[string] . identifier[format] ( identifier[k] , identifier[v] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[key_values] ] keyword[return] literal[string] . identifier[join] ( identifier[parameter_parts] )
def normalize_parameters(params): """**Parameters Normalization** Per `section 3.4.1.3.2`_ of the spec. For example, the list of parameters from the previous section would be normalized as follows: Encoded:: +------------------------+------------------+ | Name | Value | +------------------------+------------------+ | b5 | %3D%253D | | a3 | a | | c%40 | | | a2 | r%20b | | oauth_consumer_key | 9djdj82h48djs9d2 | | oauth_token | kkk9d7dh3k39sjv7 | | oauth_signature_method | HMAC-SHA1 | | oauth_timestamp | 137131201 | | oauth_nonce | 7d8f3e4a | | c2 | | | a3 | 2%20q | +------------------------+------------------+ Sorted:: +------------------------+------------------+ | Name | Value | +------------------------+------------------+ | a2 | r%20b | | a3 | 2%20q | | a3 | a | | b5 | %3D%253D | | c%40 | | | c2 | | | oauth_consumer_key | 9djdj82h48djs9d2 | | oauth_nonce | 7d8f3e4a | | oauth_signature_method | HMAC-SHA1 | | oauth_timestamp | 137131201 | | oauth_token | kkk9d7dh3k39sjv7 | +------------------------+------------------+ Concatenated Pairs:: +-------------------------------------+ | Name=Value | +-------------------------------------+ | a2=r%20b | | a3=2%20q | | a3=a | | b5=%3D%253D | | c%40= | | c2= | | oauth_consumer_key=9djdj82h48djs9d2 | | oauth_nonce=7d8f3e4a | | oauth_signature_method=HMAC-SHA1 | | oauth_timestamp=137131201 | | oauth_token=kkk9d7dh3k39sjv7 | +-------------------------------------+ and concatenated together into a single string (line breaks are for display purposes only):: a2=r%20b&a3=2%20q&a3=a&b5=%3D%253D&c%40=&c2=&oauth_consumer_key=9dj dj82h48djs9d2&oauth_nonce=7d8f3e4a&oauth_signature_method=HMAC-SHA1 &oauth_timestamp=137131201&oauth_token=kkk9d7dh3k39sjv7 .. _`section 3.4.1.3.2`: https://tools.ietf.org/html/rfc5849#section-3.4.1.3.2 """ # The parameters collected in `Section 3.4.1.3`_ are normalized into a # single string as follows: # # .. _`Section 3.4.1.3`: https://tools.ietf.org/html/rfc5849#section-3.4.1.3 # 1. First, the name and value of each parameter are encoded # (`Section 3.6`_). # # .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6 key_values = [(utils.escape(k), utils.escape(v)) for (k, v) in params] # 2. The parameters are sorted by name, using ascending byte value # ordering. If two or more parameters share the same name, they # are sorted by their value. key_values.sort() # 3. The name of each parameter is concatenated to its corresponding # value using an "=" character (ASCII code 61) as a separator, even # if the value is empty. parameter_parts = ['{0}={1}'.format(k, v) for (k, v) in key_values] # 4. The sorted name/value pairs are concatenated together into a # single string by using an "&" character (ASCII code 38) as # separator. return '&'.join(parameter_parts)
def _create_container(context, path, l_mtime, size): """ Creates container for segments of file with `path` """ new_context = context.copy() new_context.input_ = None new_context.headers = None new_context.query = None container = path.split('/', 1)[0] + '_segments' cli_put_container(new_context, container) prefix = container + '/' + path.split('/', 1)[1] prefix = '%s/%s/%s/' % (prefix, l_mtime, size) return prefix
def function[_create_container, parameter[context, path, l_mtime, size]]: constant[ Creates container for segments of file with `path` ] variable[new_context] assign[=] call[name[context].copy, parameter[]] name[new_context].input_ assign[=] constant[None] name[new_context].headers assign[=] constant[None] name[new_context].query assign[=] constant[None] variable[container] assign[=] binary_operation[call[call[name[path].split, parameter[constant[/], constant[1]]]][constant[0]] + constant[_segments]] call[name[cli_put_container], parameter[name[new_context], name[container]]] variable[prefix] assign[=] binary_operation[binary_operation[name[container] + constant[/]] + call[call[name[path].split, parameter[constant[/], constant[1]]]][constant[1]]] variable[prefix] assign[=] binary_operation[constant[%s/%s/%s/] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b05c8520>, <ast.Name object at 0x7da1b05cae60>, <ast.Name object at 0x7da204564460>]]] return[name[prefix]]
keyword[def] identifier[_create_container] ( identifier[context] , identifier[path] , identifier[l_mtime] , identifier[size] ): literal[string] identifier[new_context] = identifier[context] . identifier[copy] () identifier[new_context] . identifier[input_] = keyword[None] identifier[new_context] . identifier[headers] = keyword[None] identifier[new_context] . identifier[query] = keyword[None] identifier[container] = identifier[path] . identifier[split] ( literal[string] , literal[int] )[ literal[int] ]+ literal[string] identifier[cli_put_container] ( identifier[new_context] , identifier[container] ) identifier[prefix] = identifier[container] + literal[string] + identifier[path] . identifier[split] ( literal[string] , literal[int] )[ literal[int] ] identifier[prefix] = literal[string] %( identifier[prefix] , identifier[l_mtime] , identifier[size] ) keyword[return] identifier[prefix]
def _create_container(context, path, l_mtime, size): """ Creates container for segments of file with `path` """ new_context = context.copy() new_context.input_ = None new_context.headers = None new_context.query = None container = path.split('/', 1)[0] + '_segments' cli_put_container(new_context, container) prefix = container + '/' + path.split('/', 1)[1] prefix = '%s/%s/%s/' % (prefix, l_mtime, size) return prefix
def backprop(self, **args): """ Computes error and wed for back propagation of error. """ retval = self.compute_error(**args) if self.learning: self.compute_wed() return retval
def function[backprop, parameter[self]]: constant[ Computes error and wed for back propagation of error. ] variable[retval] assign[=] call[name[self].compute_error, parameter[]] if name[self].learning begin[:] call[name[self].compute_wed, parameter[]] return[name[retval]]
keyword[def] identifier[backprop] ( identifier[self] ,** identifier[args] ): literal[string] identifier[retval] = identifier[self] . identifier[compute_error] (** identifier[args] ) keyword[if] identifier[self] . identifier[learning] : identifier[self] . identifier[compute_wed] () keyword[return] identifier[retval]
def backprop(self, **args): """ Computes error and wed for back propagation of error. """ retval = self.compute_error(**args) if self.learning: self.compute_wed() # depends on [control=['if'], data=[]] return retval
def mutate_file(backup, context): """ :type backup: bool :type context: Context """ with open(context.filename) as f: code = f.read() context.source = code if backup: with open(context.filename + '.bak', 'w') as f: f.write(code) result, number_of_mutations_performed = mutate(context) with open(context.filename, 'w') as f: f.write(result) return number_of_mutations_performed
def function[mutate_file, parameter[backup, context]]: constant[ :type backup: bool :type context: Context ] with call[name[open], parameter[name[context].filename]] begin[:] variable[code] assign[=] call[name[f].read, parameter[]] name[context].source assign[=] name[code] if name[backup] begin[:] with call[name[open], parameter[binary_operation[name[context].filename + constant[.bak]], constant[w]]] begin[:] call[name[f].write, parameter[name[code]]] <ast.Tuple object at 0x7da204960610> assign[=] call[name[mutate], parameter[name[context]]] with call[name[open], parameter[name[context].filename, constant[w]]] begin[:] call[name[f].write, parameter[name[result]]] return[name[number_of_mutations_performed]]
keyword[def] identifier[mutate_file] ( identifier[backup] , identifier[context] ): literal[string] keyword[with] identifier[open] ( identifier[context] . identifier[filename] ) keyword[as] identifier[f] : identifier[code] = identifier[f] . identifier[read] () identifier[context] . identifier[source] = identifier[code] keyword[if] identifier[backup] : keyword[with] identifier[open] ( identifier[context] . identifier[filename] + literal[string] , literal[string] ) keyword[as] identifier[f] : identifier[f] . identifier[write] ( identifier[code] ) identifier[result] , identifier[number_of_mutations_performed] = identifier[mutate] ( identifier[context] ) keyword[with] identifier[open] ( identifier[context] . identifier[filename] , literal[string] ) keyword[as] identifier[f] : identifier[f] . identifier[write] ( identifier[result] ) keyword[return] identifier[number_of_mutations_performed]
def mutate_file(backup, context): """ :type backup: bool :type context: Context """ with open(context.filename) as f: code = f.read() # depends on [control=['with'], data=['f']] context.source = code if backup: with open(context.filename + '.bak', 'w') as f: f.write(code) # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]] (result, number_of_mutations_performed) = mutate(context) with open(context.filename, 'w') as f: f.write(result) # depends on [control=['with'], data=['f']] return number_of_mutations_performed
def makePlot(args): """ Make the plot with parallax horizons. The plot shows V-band magnitude vs distance for a number of spectral types and over the range 5.7<G<20. In addition a set of crudely drawn contours show the points where 0.1, 1, and 10 per cent relative parallax accracy are reached. Parameters ---------- args - Command line arguments. """ distances = 10.0**np.linspace(1,6,10001) spts=['B0V', 'A0V', 'F0V', 'G0V', 'K0V', 'K4V', 'K1III'] twokmsRV = [] twokmsV = [] vabsTwokms = [] fivekmsRV = [] fivekmsV = [] vabsFivekms = [] tenkmsRV = [] tenkmsV = [] vabsTenkms = [] fig=plt.figure(figsize=(11,7.8)) deltaHue = 240.0/(len(spts)-1) hues = (240.0-np.arange(len(spts))*deltaHue)/360.0 hsv=np.zeros((1,1,3)) hsv[0,0,1]=1.0 hsv[0,0,2]=0.9 for hue,spt in zip(hues, spts): hsv[0,0,0]=hue vmags = vabsFromSpt(spt)+5.0*np.log10(distances)-5.0 vmini=vminiFromSpt(spt) grvsmags = vmags - vminGrvsFromVmini(vmini) rvError = vradErrorSkyAvg(vmags, spt) observed = (grvsmags>=5.7) & (grvsmags<=16.1) rvError = rvError[observed] # Identify the points where the relative parallax accuracy is 0.1, 1, or 10 per cent. if (rvError.min()<=2.0): index = len(rvError[rvError<=2.0])-1 twokmsRV.append(distances[observed][index]) twokmsV.append(vmags[observed][index]) vabsTwokms.append(vabsFromSpt(spt)) if (rvError.min()<=5.0): index = len(rvError[rvError<=5.0])-1 fivekmsRV.append(distances[observed][index]) fivekmsV.append(vmags[observed][index]) vabsFivekms.append(vabsFromSpt(spt)) if (rvError.min()<=10.0): index = len(rvError[rvError<=10.0])-1 tenkmsRV.append(distances[observed][index]) tenkmsV.append(vmags[observed][index]) vabsTenkms.append(vabsFromSpt(spt)) plt.semilogx(distances[observed], vmags[observed], '-', label=spt, color=hsv_to_rgb(hsv)[0,0,:]) plt.text(distances[observed][-1], vmags[observed][-1], spt, horizontalalignment='center', verticalalignment='bottom', fontsize=14) # Draw the "contours" of constant radial velocity accuracy. twokmsRV = np.array(twokmsRV) twokmsV = np.array(twokmsV) indices = np.argsort(vabsTwokms) plt.semilogx(twokmsRV[indices],twokmsV[indices],'k--') plt.text(twokmsRV[indices][-1]*0.8,twokmsV[indices][-1],"$2$ km s$^{-1}$", ha='right', size=16, bbox=dict(boxstyle="round, pad=0.3", ec=(0.0, 0.0, 0.0), fc=(1.0, 1.0, 1.0),)) fivekmsRV = np.array(fivekmsRV) fivekmsV = np.array(fivekmsV) indices = np.argsort(vabsFivekms) plt.semilogx(fivekmsRV[indices],fivekmsV[indices],'k--') plt.text(fivekmsRV[indices][-1]*0.8,fivekmsV[indices][-1],"$5$ km s$^{-1}$", ha='right', size=16, bbox=dict(boxstyle="round, pad=0.3", ec=(0.0, 0.0, 0.0), fc=(1.0, 1.0, 1.0),)) tenkmsRV = np.array(tenkmsRV) tenkmsV = np.array(tenkmsV) indices = np.argsort(vabsTenkms) plt.semilogx(tenkmsRV[indices],tenkmsV[indices],'k--') plt.text(tenkmsRV[indices][-1]*0.8,tenkmsV[indices][-1]+0.5,"$10$ km s$^{-1}$", ha='right', size=16, bbox=dict(boxstyle="round, pad=0.3", ec=(0.0, 0.0, 0.0), fc=(1.0, 1.0, 1.0),)) plt.title('Radial velocity accuracy horizons ($A_V=0$)') plt.xlabel('Distance [pc]') plt.ylabel('V') plt.grid() #leg=plt.legend(loc=4, fontsize=14, labelspacing=0.5) plt.ylim(5,20) basename='RadialVelocityHorizons' if (args['pdfOutput']): plt.savefig(basename+'.pdf') elif (args['pngOutput']): plt.savefig(basename+'.png') else: plt.show()
def function[makePlot, parameter[args]]: constant[ Make the plot with parallax horizons. The plot shows V-band magnitude vs distance for a number of spectral types and over the range 5.7<G<20. In addition a set of crudely drawn contours show the points where 0.1, 1, and 10 per cent relative parallax accracy are reached. Parameters ---------- args - Command line arguments. ] variable[distances] assign[=] binary_operation[constant[10.0] ** call[name[np].linspace, parameter[constant[1], constant[6], constant[10001]]]] variable[spts] assign[=] list[[<ast.Constant object at 0x7da20c794d30>, <ast.Constant object at 0x7da20c795210>, <ast.Constant object at 0x7da20c794790>, <ast.Constant object at 0x7da20c795900>, <ast.Constant object at 0x7da20c796a40>, <ast.Constant object at 0x7da20c7957b0>, <ast.Constant object at 0x7da20c795fc0>]] variable[twokmsRV] assign[=] list[[]] variable[twokmsV] assign[=] list[[]] variable[vabsTwokms] assign[=] list[[]] variable[fivekmsRV] assign[=] list[[]] variable[fivekmsV] assign[=] list[[]] variable[vabsFivekms] assign[=] list[[]] variable[tenkmsRV] assign[=] list[[]] variable[tenkmsV] assign[=] list[[]] variable[vabsTenkms] assign[=] list[[]] variable[fig] assign[=] call[name[plt].figure, parameter[]] variable[deltaHue] assign[=] binary_operation[constant[240.0] / binary_operation[call[name[len], parameter[name[spts]]] - constant[1]]] variable[hues] assign[=] binary_operation[binary_operation[constant[240.0] - binary_operation[call[name[np].arange, parameter[call[name[len], parameter[name[spts]]]]] * name[deltaHue]]] / constant[360.0]] variable[hsv] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Constant object at 0x7da2047e90f0>, <ast.Constant object at 0x7da2047e96f0>, <ast.Constant object at 0x7da2047ea530>]]]] call[name[hsv]][tuple[[<ast.Constant object at 0x7da2047e9990>, <ast.Constant object at 0x7da2047e8e80>, <ast.Constant object at 0x7da2047ea440>]]] assign[=] constant[1.0] call[name[hsv]][tuple[[<ast.Constant object at 0x7da2047e92a0>, <ast.Constant object at 0x7da2047e9690>, <ast.Constant object at 0x7da2047ea140>]]] assign[=] constant[0.9] for taget[tuple[[<ast.Name object at 0x7da2047e95a0>, <ast.Name object at 0x7da2047e9ba0>]]] in starred[call[name[zip], parameter[name[hues], name[spts]]]] begin[:] call[name[hsv]][tuple[[<ast.Constant object at 0x7da2047eb5e0>, <ast.Constant object at 0x7da2047ebac0>, <ast.Constant object at 0x7da2047ea8c0>]]] assign[=] name[hue] variable[vmags] assign[=] binary_operation[binary_operation[call[name[vabsFromSpt], parameter[name[spt]]] + binary_operation[constant[5.0] * call[name[np].log10, parameter[name[distances]]]]] - constant[5.0]] variable[vmini] assign[=] call[name[vminiFromSpt], parameter[name[spt]]] variable[grvsmags] assign[=] binary_operation[name[vmags] - call[name[vminGrvsFromVmini], parameter[name[vmini]]]] variable[rvError] assign[=] call[name[vradErrorSkyAvg], parameter[name[vmags], name[spt]]] variable[observed] assign[=] binary_operation[compare[name[grvsmags] greater_or_equal[>=] constant[5.7]] <ast.BitAnd object at 0x7da2590d6b60> compare[name[grvsmags] less_or_equal[<=] constant[16.1]]] variable[rvError] assign[=] call[name[rvError]][name[observed]] if compare[call[name[rvError].min, parameter[]] less_or_equal[<=] constant[2.0]] begin[:] variable[index] assign[=] binary_operation[call[name[len], parameter[call[name[rvError]][compare[name[rvError] less_or_equal[<=] constant[2.0]]]]] - constant[1]] call[name[twokmsRV].append, parameter[call[call[name[distances]][name[observed]]][name[index]]]] call[name[twokmsV].append, parameter[call[call[name[vmags]][name[observed]]][name[index]]]] call[name[vabsTwokms].append, parameter[call[name[vabsFromSpt], parameter[name[spt]]]]] if compare[call[name[rvError].min, parameter[]] less_or_equal[<=] constant[5.0]] begin[:] variable[index] assign[=] binary_operation[call[name[len], parameter[call[name[rvError]][compare[name[rvError] less_or_equal[<=] constant[5.0]]]]] - constant[1]] call[name[fivekmsRV].append, parameter[call[call[name[distances]][name[observed]]][name[index]]]] call[name[fivekmsV].append, parameter[call[call[name[vmags]][name[observed]]][name[index]]]] call[name[vabsFivekms].append, parameter[call[name[vabsFromSpt], parameter[name[spt]]]]] if compare[call[name[rvError].min, parameter[]] less_or_equal[<=] constant[10.0]] begin[:] variable[index] assign[=] binary_operation[call[name[len], parameter[call[name[rvError]][compare[name[rvError] less_or_equal[<=] constant[10.0]]]]] - constant[1]] call[name[tenkmsRV].append, parameter[call[call[name[distances]][name[observed]]][name[index]]]] call[name[tenkmsV].append, parameter[call[call[name[vmags]][name[observed]]][name[index]]]] call[name[vabsTenkms].append, parameter[call[name[vabsFromSpt], parameter[name[spt]]]]] call[name[plt].semilogx, parameter[call[name[distances]][name[observed]], call[name[vmags]][name[observed]], constant[-]]] call[name[plt].text, parameter[call[call[name[distances]][name[observed]]][<ast.UnaryOp object at 0x7da2044c36d0>], call[call[name[vmags]][name[observed]]][<ast.UnaryOp object at 0x7da2044c2e00>], name[spt]]] variable[twokmsRV] assign[=] call[name[np].array, parameter[name[twokmsRV]]] variable[twokmsV] assign[=] call[name[np].array, parameter[name[twokmsV]]] variable[indices] assign[=] call[name[np].argsort, parameter[name[vabsTwokms]]] call[name[plt].semilogx, parameter[call[name[twokmsRV]][name[indices]], call[name[twokmsV]][name[indices]], constant[k--]]] call[name[plt].text, parameter[binary_operation[call[call[name[twokmsRV]][name[indices]]][<ast.UnaryOp object at 0x7da2044c3640>] * constant[0.8]], call[call[name[twokmsV]][name[indices]]][<ast.UnaryOp object at 0x7da20e9b3d00>], constant[$2$ km s$^{-1}$]]] variable[fivekmsRV] assign[=] call[name[np].array, parameter[name[fivekmsRV]]] variable[fivekmsV] assign[=] call[name[np].array, parameter[name[fivekmsV]]] variable[indices] assign[=] call[name[np].argsort, parameter[name[vabsFivekms]]] call[name[plt].semilogx, parameter[call[name[fivekmsRV]][name[indices]], call[name[fivekmsV]][name[indices]], constant[k--]]] call[name[plt].text, parameter[binary_operation[call[call[name[fivekmsRV]][name[indices]]][<ast.UnaryOp object at 0x7da18bccbc40>] * constant[0.8]], call[call[name[fivekmsV]][name[indices]]][<ast.UnaryOp object at 0x7da18bcc8a90>], constant[$5$ km s$^{-1}$]]] variable[tenkmsRV] assign[=] call[name[np].array, parameter[name[tenkmsRV]]] variable[tenkmsV] assign[=] call[name[np].array, parameter[name[tenkmsV]]] variable[indices] assign[=] call[name[np].argsort, parameter[name[vabsTenkms]]] call[name[plt].semilogx, parameter[call[name[tenkmsRV]][name[indices]], call[name[tenkmsV]][name[indices]], constant[k--]]] call[name[plt].text, parameter[binary_operation[call[call[name[tenkmsRV]][name[indices]]][<ast.UnaryOp object at 0x7da204620070>] * constant[0.8]], binary_operation[call[call[name[tenkmsV]][name[indices]]][<ast.UnaryOp object at 0x7da2046218a0>] + constant[0.5]], constant[$10$ km s$^{-1}$]]] call[name[plt].title, parameter[constant[Radial velocity accuracy horizons ($A_V=0$)]]] call[name[plt].xlabel, parameter[constant[Distance [pc]]]] call[name[plt].ylabel, parameter[constant[V]]] call[name[plt].grid, parameter[]] call[name[plt].ylim, parameter[constant[5], constant[20]]] variable[basename] assign[=] constant[RadialVelocityHorizons] if call[name[args]][constant[pdfOutput]] begin[:] call[name[plt].savefig, parameter[binary_operation[name[basename] + constant[.pdf]]]]
keyword[def] identifier[makePlot] ( identifier[args] ): literal[string] identifier[distances] = literal[int] ** identifier[np] . identifier[linspace] ( literal[int] , literal[int] , literal[int] ) identifier[spts] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ] identifier[twokmsRV] =[] identifier[twokmsV] =[] identifier[vabsTwokms] =[] identifier[fivekmsRV] =[] identifier[fivekmsV] =[] identifier[vabsFivekms] =[] identifier[tenkmsRV] =[] identifier[tenkmsV] =[] identifier[vabsTenkms] =[] identifier[fig] = identifier[plt] . identifier[figure] ( identifier[figsize] =( literal[int] , literal[int] )) identifier[deltaHue] = literal[int] /( identifier[len] ( identifier[spts] )- literal[int] ) identifier[hues] =( literal[int] - identifier[np] . identifier[arange] ( identifier[len] ( identifier[spts] ))* identifier[deltaHue] )/ literal[int] identifier[hsv] = identifier[np] . identifier[zeros] (( literal[int] , literal[int] , literal[int] )) identifier[hsv] [ literal[int] , literal[int] , literal[int] ]= literal[int] identifier[hsv] [ literal[int] , literal[int] , literal[int] ]= literal[int] keyword[for] identifier[hue] , identifier[spt] keyword[in] identifier[zip] ( identifier[hues] , identifier[spts] ): identifier[hsv] [ literal[int] , literal[int] , literal[int] ]= identifier[hue] identifier[vmags] = identifier[vabsFromSpt] ( identifier[spt] )+ literal[int] * identifier[np] . identifier[log10] ( identifier[distances] )- literal[int] identifier[vmini] = identifier[vminiFromSpt] ( identifier[spt] ) identifier[grvsmags] = identifier[vmags] - identifier[vminGrvsFromVmini] ( identifier[vmini] ) identifier[rvError] = identifier[vradErrorSkyAvg] ( identifier[vmags] , identifier[spt] ) identifier[observed] =( identifier[grvsmags] >= literal[int] )&( identifier[grvsmags] <= literal[int] ) identifier[rvError] = identifier[rvError] [ identifier[observed] ] keyword[if] ( identifier[rvError] . identifier[min] ()<= literal[int] ): identifier[index] = identifier[len] ( identifier[rvError] [ identifier[rvError] <= literal[int] ])- literal[int] identifier[twokmsRV] . identifier[append] ( identifier[distances] [ identifier[observed] ][ identifier[index] ]) identifier[twokmsV] . identifier[append] ( identifier[vmags] [ identifier[observed] ][ identifier[index] ]) identifier[vabsTwokms] . identifier[append] ( identifier[vabsFromSpt] ( identifier[spt] )) keyword[if] ( identifier[rvError] . identifier[min] ()<= literal[int] ): identifier[index] = identifier[len] ( identifier[rvError] [ identifier[rvError] <= literal[int] ])- literal[int] identifier[fivekmsRV] . identifier[append] ( identifier[distances] [ identifier[observed] ][ identifier[index] ]) identifier[fivekmsV] . identifier[append] ( identifier[vmags] [ identifier[observed] ][ identifier[index] ]) identifier[vabsFivekms] . identifier[append] ( identifier[vabsFromSpt] ( identifier[spt] )) keyword[if] ( identifier[rvError] . identifier[min] ()<= literal[int] ): identifier[index] = identifier[len] ( identifier[rvError] [ identifier[rvError] <= literal[int] ])- literal[int] identifier[tenkmsRV] . identifier[append] ( identifier[distances] [ identifier[observed] ][ identifier[index] ]) identifier[tenkmsV] . identifier[append] ( identifier[vmags] [ identifier[observed] ][ identifier[index] ]) identifier[vabsTenkms] . identifier[append] ( identifier[vabsFromSpt] ( identifier[spt] )) identifier[plt] . identifier[semilogx] ( identifier[distances] [ identifier[observed] ], identifier[vmags] [ identifier[observed] ], literal[string] , identifier[label] = identifier[spt] , identifier[color] = identifier[hsv_to_rgb] ( identifier[hsv] )[ literal[int] , literal[int] ,:]) identifier[plt] . identifier[text] ( identifier[distances] [ identifier[observed] ][- literal[int] ], identifier[vmags] [ identifier[observed] ][- literal[int] ], identifier[spt] , identifier[horizontalalignment] = literal[string] , identifier[verticalalignment] = literal[string] , identifier[fontsize] = literal[int] ) identifier[twokmsRV] = identifier[np] . identifier[array] ( identifier[twokmsRV] ) identifier[twokmsV] = identifier[np] . identifier[array] ( identifier[twokmsV] ) identifier[indices] = identifier[np] . identifier[argsort] ( identifier[vabsTwokms] ) identifier[plt] . identifier[semilogx] ( identifier[twokmsRV] [ identifier[indices] ], identifier[twokmsV] [ identifier[indices] ], literal[string] ) identifier[plt] . identifier[text] ( identifier[twokmsRV] [ identifier[indices] ][- literal[int] ]* literal[int] , identifier[twokmsV] [ identifier[indices] ][- literal[int] ], literal[string] , identifier[ha] = literal[string] , identifier[size] = literal[int] , identifier[bbox] = identifier[dict] ( identifier[boxstyle] = literal[string] , identifier[ec] =( literal[int] , literal[int] , literal[int] ), identifier[fc] =( literal[int] , literal[int] , literal[int] ),)) identifier[fivekmsRV] = identifier[np] . identifier[array] ( identifier[fivekmsRV] ) identifier[fivekmsV] = identifier[np] . identifier[array] ( identifier[fivekmsV] ) identifier[indices] = identifier[np] . identifier[argsort] ( identifier[vabsFivekms] ) identifier[plt] . identifier[semilogx] ( identifier[fivekmsRV] [ identifier[indices] ], identifier[fivekmsV] [ identifier[indices] ], literal[string] ) identifier[plt] . identifier[text] ( identifier[fivekmsRV] [ identifier[indices] ][- literal[int] ]* literal[int] , identifier[fivekmsV] [ identifier[indices] ][- literal[int] ], literal[string] , identifier[ha] = literal[string] , identifier[size] = literal[int] , identifier[bbox] = identifier[dict] ( identifier[boxstyle] = literal[string] , identifier[ec] =( literal[int] , literal[int] , literal[int] ), identifier[fc] =( literal[int] , literal[int] , literal[int] ),)) identifier[tenkmsRV] = identifier[np] . identifier[array] ( identifier[tenkmsRV] ) identifier[tenkmsV] = identifier[np] . identifier[array] ( identifier[tenkmsV] ) identifier[indices] = identifier[np] . identifier[argsort] ( identifier[vabsTenkms] ) identifier[plt] . identifier[semilogx] ( identifier[tenkmsRV] [ identifier[indices] ], identifier[tenkmsV] [ identifier[indices] ], literal[string] ) identifier[plt] . identifier[text] ( identifier[tenkmsRV] [ identifier[indices] ][- literal[int] ]* literal[int] , identifier[tenkmsV] [ identifier[indices] ][- literal[int] ]+ literal[int] , literal[string] , identifier[ha] = literal[string] , identifier[size] = literal[int] , identifier[bbox] = identifier[dict] ( identifier[boxstyle] = literal[string] , identifier[ec] =( literal[int] , literal[int] , literal[int] ), identifier[fc] =( literal[int] , literal[int] , literal[int] ),)) identifier[plt] . identifier[title] ( literal[string] ) identifier[plt] . identifier[xlabel] ( literal[string] ) identifier[plt] . identifier[ylabel] ( literal[string] ) identifier[plt] . identifier[grid] () identifier[plt] . identifier[ylim] ( literal[int] , literal[int] ) identifier[basename] = literal[string] keyword[if] ( identifier[args] [ literal[string] ]): identifier[plt] . identifier[savefig] ( identifier[basename] + literal[string] ) keyword[elif] ( identifier[args] [ literal[string] ]): identifier[plt] . identifier[savefig] ( identifier[basename] + literal[string] ) keyword[else] : identifier[plt] . identifier[show] ()
def makePlot(args): """ Make the plot with parallax horizons. The plot shows V-band magnitude vs distance for a number of spectral types and over the range 5.7<G<20. In addition a set of crudely drawn contours show the points where 0.1, 1, and 10 per cent relative parallax accracy are reached. Parameters ---------- args - Command line arguments. """ distances = 10.0 ** np.linspace(1, 6, 10001) spts = ['B0V', 'A0V', 'F0V', 'G0V', 'K0V', 'K4V', 'K1III'] twokmsRV = [] twokmsV = [] vabsTwokms = [] fivekmsRV = [] fivekmsV = [] vabsFivekms = [] tenkmsRV = [] tenkmsV = [] vabsTenkms = [] fig = plt.figure(figsize=(11, 7.8)) deltaHue = 240.0 / (len(spts) - 1) hues = (240.0 - np.arange(len(spts)) * deltaHue) / 360.0 hsv = np.zeros((1, 1, 3)) hsv[0, 0, 1] = 1.0 hsv[0, 0, 2] = 0.9 for (hue, spt) in zip(hues, spts): hsv[0, 0, 0] = hue vmags = vabsFromSpt(spt) + 5.0 * np.log10(distances) - 5.0 vmini = vminiFromSpt(spt) grvsmags = vmags - vminGrvsFromVmini(vmini) rvError = vradErrorSkyAvg(vmags, spt) observed = (grvsmags >= 5.7) & (grvsmags <= 16.1) rvError = rvError[observed] # Identify the points where the relative parallax accuracy is 0.1, 1, or 10 per cent. if rvError.min() <= 2.0: index = len(rvError[rvError <= 2.0]) - 1 twokmsRV.append(distances[observed][index]) twokmsV.append(vmags[observed][index]) vabsTwokms.append(vabsFromSpt(spt)) # depends on [control=['if'], data=[]] if rvError.min() <= 5.0: index = len(rvError[rvError <= 5.0]) - 1 fivekmsRV.append(distances[observed][index]) fivekmsV.append(vmags[observed][index]) vabsFivekms.append(vabsFromSpt(spt)) # depends on [control=['if'], data=[]] if rvError.min() <= 10.0: index = len(rvError[rvError <= 10.0]) - 1 tenkmsRV.append(distances[observed][index]) tenkmsV.append(vmags[observed][index]) vabsTenkms.append(vabsFromSpt(spt)) # depends on [control=['if'], data=[]] plt.semilogx(distances[observed], vmags[observed], '-', label=spt, color=hsv_to_rgb(hsv)[0, 0, :]) plt.text(distances[observed][-1], vmags[observed][-1], spt, horizontalalignment='center', verticalalignment='bottom', fontsize=14) # depends on [control=['for'], data=[]] # Draw the "contours" of constant radial velocity accuracy. twokmsRV = np.array(twokmsRV) twokmsV = np.array(twokmsV) indices = np.argsort(vabsTwokms) plt.semilogx(twokmsRV[indices], twokmsV[indices], 'k--') plt.text(twokmsRV[indices][-1] * 0.8, twokmsV[indices][-1], '$2$ km s$^{-1}$', ha='right', size=16, bbox=dict(boxstyle='round, pad=0.3', ec=(0.0, 0.0, 0.0), fc=(1.0, 1.0, 1.0))) fivekmsRV = np.array(fivekmsRV) fivekmsV = np.array(fivekmsV) indices = np.argsort(vabsFivekms) plt.semilogx(fivekmsRV[indices], fivekmsV[indices], 'k--') plt.text(fivekmsRV[indices][-1] * 0.8, fivekmsV[indices][-1], '$5$ km s$^{-1}$', ha='right', size=16, bbox=dict(boxstyle='round, pad=0.3', ec=(0.0, 0.0, 0.0), fc=(1.0, 1.0, 1.0))) tenkmsRV = np.array(tenkmsRV) tenkmsV = np.array(tenkmsV) indices = np.argsort(vabsTenkms) plt.semilogx(tenkmsRV[indices], tenkmsV[indices], 'k--') plt.text(tenkmsRV[indices][-1] * 0.8, tenkmsV[indices][-1] + 0.5, '$10$ km s$^{-1}$', ha='right', size=16, bbox=dict(boxstyle='round, pad=0.3', ec=(0.0, 0.0, 0.0), fc=(1.0, 1.0, 1.0))) plt.title('Radial velocity accuracy horizons ($A_V=0$)') plt.xlabel('Distance [pc]') plt.ylabel('V') plt.grid() #leg=plt.legend(loc=4, fontsize=14, labelspacing=0.5) plt.ylim(5, 20) basename = 'RadialVelocityHorizons' if args['pdfOutput']: plt.savefig(basename + '.pdf') # depends on [control=['if'], data=[]] elif args['pngOutput']: plt.savefig(basename + '.png') # depends on [control=['if'], data=[]] else: plt.show()
def mv_voltage_deviation(network, voltage_levels='mv_lv'): """ Checks for voltage stability issues in MV grid. Parameters ---------- network : :class:`~.grid.network.Network` voltage_levels : :obj:`str` Specifies which allowed voltage deviations to use. Possible options are: * 'mv_lv' This is the default. The allowed voltage deviation for nodes in the MV grid is the same as for nodes in the LV grid. Further load and feed-in case are not distinguished. * 'mv' Use this to handle allowed voltage deviations in the MV and LV grid differently. Here, load and feed-in case are differentiated as well. Returns ------- :obj:`dict` Dictionary with :class:`~.grid.grids.MVGrid` as key and a :pandas:`pandas.DataFrame<dataframe>` with its critical nodes, sorted descending by voltage deviation, as value. Index of the dataframe are all nodes (of type :class:`~.grid.components.Generator`, :class:`~.grid.components.Load`, etc.) with over-voltage issues. Columns are 'v_mag_pu' containing the maximum voltage deviation as float and 'time_index' containing the corresponding time step the over-voltage occured in as :pandas:`pandas.Timestamp<timestamp>`. Notes ----- Over-voltage is determined based on allowed voltage deviations defined in the config file 'config_grid_expansion' in section 'grid_expansion_allowed_voltage_deviations'. """ crit_nodes = {} v_dev_allowed_per_case = {} v_dev_allowed_per_case['feedin_case_lower'] = 0.9 v_dev_allowed_per_case['load_case_upper'] = 1.1 offset = network.config[ 'grid_expansion_allowed_voltage_deviations']['hv_mv_trafo_offset'] control_deviation = network.config[ 'grid_expansion_allowed_voltage_deviations'][ 'hv_mv_trafo_control_deviation'] if voltage_levels == 'mv_lv': v_dev_allowed_per_case['feedin_case_upper'] = \ 1 + offset + control_deviation + network.config[ 'grid_expansion_allowed_voltage_deviations'][ 'mv_lv_feedin_case_max_v_deviation'] v_dev_allowed_per_case['load_case_lower'] = \ 1 + offset - control_deviation - network.config[ 'grid_expansion_allowed_voltage_deviations'][ 'mv_lv_load_case_max_v_deviation'] elif voltage_levels == 'mv': v_dev_allowed_per_case['feedin_case_upper'] = \ 1 + offset + control_deviation + network.config[ 'grid_expansion_allowed_voltage_deviations'][ 'mv_feedin_case_max_v_deviation'] v_dev_allowed_per_case['load_case_lower'] = \ 1 + offset - control_deviation - network.config[ 'grid_expansion_allowed_voltage_deviations'][ 'mv_load_case_max_v_deviation'] else: raise ValueError( 'Specified mode {} is not a valid option.'.format(voltage_levels)) # maximum allowed apparent power of station in each time step v_dev_allowed_upper = \ network.timeseries.timesteps_load_feedin_case.case.apply( lambda _: v_dev_allowed_per_case['{}_upper'.format(_)]) v_dev_allowed_lower = \ network.timeseries.timesteps_load_feedin_case.case.apply( lambda _: v_dev_allowed_per_case['{}_lower'.format(_)]) nodes = network.mv_grid.graph.nodes() crit_nodes_grid = _voltage_deviation( network, nodes, v_dev_allowed_upper, v_dev_allowed_lower, voltage_level='mv') if not crit_nodes_grid.empty: crit_nodes[network.mv_grid] = crit_nodes_grid.sort_values( by=['v_mag_pu'], ascending=False) logger.debug( '==> {} node(s) in MV grid has/have voltage issues.'.format( crit_nodes[network.mv_grid].shape[0])) else: logger.debug('==> No voltage issues in MV grid.') return crit_nodes
def function[mv_voltage_deviation, parameter[network, voltage_levels]]: constant[ Checks for voltage stability issues in MV grid. Parameters ---------- network : :class:`~.grid.network.Network` voltage_levels : :obj:`str` Specifies which allowed voltage deviations to use. Possible options are: * 'mv_lv' This is the default. The allowed voltage deviation for nodes in the MV grid is the same as for nodes in the LV grid. Further load and feed-in case are not distinguished. * 'mv' Use this to handle allowed voltage deviations in the MV and LV grid differently. Here, load and feed-in case are differentiated as well. Returns ------- :obj:`dict` Dictionary with :class:`~.grid.grids.MVGrid` as key and a :pandas:`pandas.DataFrame<dataframe>` with its critical nodes, sorted descending by voltage deviation, as value. Index of the dataframe are all nodes (of type :class:`~.grid.components.Generator`, :class:`~.grid.components.Load`, etc.) with over-voltage issues. Columns are 'v_mag_pu' containing the maximum voltage deviation as float and 'time_index' containing the corresponding time step the over-voltage occured in as :pandas:`pandas.Timestamp<timestamp>`. Notes ----- Over-voltage is determined based on allowed voltage deviations defined in the config file 'config_grid_expansion' in section 'grid_expansion_allowed_voltage_deviations'. ] variable[crit_nodes] assign[=] dictionary[[], []] variable[v_dev_allowed_per_case] assign[=] dictionary[[], []] call[name[v_dev_allowed_per_case]][constant[feedin_case_lower]] assign[=] constant[0.9] call[name[v_dev_allowed_per_case]][constant[load_case_upper]] assign[=] constant[1.1] variable[offset] assign[=] call[call[name[network].config][constant[grid_expansion_allowed_voltage_deviations]]][constant[hv_mv_trafo_offset]] variable[control_deviation] assign[=] call[call[name[network].config][constant[grid_expansion_allowed_voltage_deviations]]][constant[hv_mv_trafo_control_deviation]] if compare[name[voltage_levels] equal[==] constant[mv_lv]] begin[:] call[name[v_dev_allowed_per_case]][constant[feedin_case_upper]] assign[=] binary_operation[binary_operation[binary_operation[constant[1] + name[offset]] + name[control_deviation]] + call[call[name[network].config][constant[grid_expansion_allowed_voltage_deviations]]][constant[mv_lv_feedin_case_max_v_deviation]]] call[name[v_dev_allowed_per_case]][constant[load_case_lower]] assign[=] binary_operation[binary_operation[binary_operation[constant[1] + name[offset]] - name[control_deviation]] - call[call[name[network].config][constant[grid_expansion_allowed_voltage_deviations]]][constant[mv_lv_load_case_max_v_deviation]]] variable[v_dev_allowed_upper] assign[=] call[name[network].timeseries.timesteps_load_feedin_case.case.apply, parameter[<ast.Lambda object at 0x7da18bc72440>]] variable[v_dev_allowed_lower] assign[=] call[name[network].timeseries.timesteps_load_feedin_case.case.apply, parameter[<ast.Lambda object at 0x7da18bc73820>]] variable[nodes] assign[=] call[name[network].mv_grid.graph.nodes, parameter[]] variable[crit_nodes_grid] assign[=] call[name[_voltage_deviation], parameter[name[network], name[nodes], name[v_dev_allowed_upper], name[v_dev_allowed_lower]]] if <ast.UnaryOp object at 0x7da18bc73d60> begin[:] call[name[crit_nodes]][name[network].mv_grid] assign[=] call[name[crit_nodes_grid].sort_values, parameter[]] call[name[logger].debug, parameter[call[constant[==> {} node(s) in MV grid has/have voltage issues.].format, parameter[call[call[name[crit_nodes]][name[network].mv_grid].shape][constant[0]]]]]] return[name[crit_nodes]]
keyword[def] identifier[mv_voltage_deviation] ( identifier[network] , identifier[voltage_levels] = literal[string] ): literal[string] identifier[crit_nodes] ={} identifier[v_dev_allowed_per_case] ={} identifier[v_dev_allowed_per_case] [ literal[string] ]= literal[int] identifier[v_dev_allowed_per_case] [ literal[string] ]= literal[int] identifier[offset] = identifier[network] . identifier[config] [ literal[string] ][ literal[string] ] identifier[control_deviation] = identifier[network] . identifier[config] [ literal[string] ][ literal[string] ] keyword[if] identifier[voltage_levels] == literal[string] : identifier[v_dev_allowed_per_case] [ literal[string] ]= literal[int] + identifier[offset] + identifier[control_deviation] + identifier[network] . identifier[config] [ literal[string] ][ literal[string] ] identifier[v_dev_allowed_per_case] [ literal[string] ]= literal[int] + identifier[offset] - identifier[control_deviation] - identifier[network] . identifier[config] [ literal[string] ][ literal[string] ] keyword[elif] identifier[voltage_levels] == literal[string] : identifier[v_dev_allowed_per_case] [ literal[string] ]= literal[int] + identifier[offset] + identifier[control_deviation] + identifier[network] . identifier[config] [ literal[string] ][ literal[string] ] identifier[v_dev_allowed_per_case] [ literal[string] ]= literal[int] + identifier[offset] - identifier[control_deviation] - identifier[network] . identifier[config] [ literal[string] ][ literal[string] ] keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[voltage_levels] )) identifier[v_dev_allowed_upper] = identifier[network] . identifier[timeseries] . identifier[timesteps_load_feedin_case] . identifier[case] . identifier[apply] ( keyword[lambda] identifier[_] : identifier[v_dev_allowed_per_case] [ literal[string] . identifier[format] ( identifier[_] )]) identifier[v_dev_allowed_lower] = identifier[network] . identifier[timeseries] . identifier[timesteps_load_feedin_case] . identifier[case] . identifier[apply] ( keyword[lambda] identifier[_] : identifier[v_dev_allowed_per_case] [ literal[string] . identifier[format] ( identifier[_] )]) identifier[nodes] = identifier[network] . identifier[mv_grid] . identifier[graph] . identifier[nodes] () identifier[crit_nodes_grid] = identifier[_voltage_deviation] ( identifier[network] , identifier[nodes] , identifier[v_dev_allowed_upper] , identifier[v_dev_allowed_lower] , identifier[voltage_level] = literal[string] ) keyword[if] keyword[not] identifier[crit_nodes_grid] . identifier[empty] : identifier[crit_nodes] [ identifier[network] . identifier[mv_grid] ]= identifier[crit_nodes_grid] . identifier[sort_values] ( identifier[by] =[ literal[string] ], identifier[ascending] = keyword[False] ) identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[crit_nodes] [ identifier[network] . identifier[mv_grid] ]. identifier[shape] [ literal[int] ])) keyword[else] : identifier[logger] . identifier[debug] ( literal[string] ) keyword[return] identifier[crit_nodes]
def mv_voltage_deviation(network, voltage_levels='mv_lv'): """ Checks for voltage stability issues in MV grid. Parameters ---------- network : :class:`~.grid.network.Network` voltage_levels : :obj:`str` Specifies which allowed voltage deviations to use. Possible options are: * 'mv_lv' This is the default. The allowed voltage deviation for nodes in the MV grid is the same as for nodes in the LV grid. Further load and feed-in case are not distinguished. * 'mv' Use this to handle allowed voltage deviations in the MV and LV grid differently. Here, load and feed-in case are differentiated as well. Returns ------- :obj:`dict` Dictionary with :class:`~.grid.grids.MVGrid` as key and a :pandas:`pandas.DataFrame<dataframe>` with its critical nodes, sorted descending by voltage deviation, as value. Index of the dataframe are all nodes (of type :class:`~.grid.components.Generator`, :class:`~.grid.components.Load`, etc.) with over-voltage issues. Columns are 'v_mag_pu' containing the maximum voltage deviation as float and 'time_index' containing the corresponding time step the over-voltage occured in as :pandas:`pandas.Timestamp<timestamp>`. Notes ----- Over-voltage is determined based on allowed voltage deviations defined in the config file 'config_grid_expansion' in section 'grid_expansion_allowed_voltage_deviations'. """ crit_nodes = {} v_dev_allowed_per_case = {} v_dev_allowed_per_case['feedin_case_lower'] = 0.9 v_dev_allowed_per_case['load_case_upper'] = 1.1 offset = network.config['grid_expansion_allowed_voltage_deviations']['hv_mv_trafo_offset'] control_deviation = network.config['grid_expansion_allowed_voltage_deviations']['hv_mv_trafo_control_deviation'] if voltage_levels == 'mv_lv': v_dev_allowed_per_case['feedin_case_upper'] = 1 + offset + control_deviation + network.config['grid_expansion_allowed_voltage_deviations']['mv_lv_feedin_case_max_v_deviation'] v_dev_allowed_per_case['load_case_lower'] = 1 + offset - control_deviation - network.config['grid_expansion_allowed_voltage_deviations']['mv_lv_load_case_max_v_deviation'] # depends on [control=['if'], data=[]] elif voltage_levels == 'mv': v_dev_allowed_per_case['feedin_case_upper'] = 1 + offset + control_deviation + network.config['grid_expansion_allowed_voltage_deviations']['mv_feedin_case_max_v_deviation'] v_dev_allowed_per_case['load_case_lower'] = 1 + offset - control_deviation - network.config['grid_expansion_allowed_voltage_deviations']['mv_load_case_max_v_deviation'] # depends on [control=['if'], data=[]] else: raise ValueError('Specified mode {} is not a valid option.'.format(voltage_levels)) # maximum allowed apparent power of station in each time step v_dev_allowed_upper = network.timeseries.timesteps_load_feedin_case.case.apply(lambda _: v_dev_allowed_per_case['{}_upper'.format(_)]) v_dev_allowed_lower = network.timeseries.timesteps_load_feedin_case.case.apply(lambda _: v_dev_allowed_per_case['{}_lower'.format(_)]) nodes = network.mv_grid.graph.nodes() crit_nodes_grid = _voltage_deviation(network, nodes, v_dev_allowed_upper, v_dev_allowed_lower, voltage_level='mv') if not crit_nodes_grid.empty: crit_nodes[network.mv_grid] = crit_nodes_grid.sort_values(by=['v_mag_pu'], ascending=False) logger.debug('==> {} node(s) in MV grid has/have voltage issues.'.format(crit_nodes[network.mv_grid].shape[0])) # depends on [control=['if'], data=[]] else: logger.debug('==> No voltage issues in MV grid.') return crit_nodes
def data_orientation(self): """return a tuple of my permutated axes, non_indexable at the front""" return tuple(itertools.chain([int(a[0]) for a in self.non_index_axes], [int(a.axis) for a in self.index_axes]))
def function[data_orientation, parameter[self]]: constant[return a tuple of my permutated axes, non_indexable at the front] return[call[name[tuple], parameter[call[name[itertools].chain, parameter[<ast.ListComp object at 0x7da18bcca680>, <ast.ListComp object at 0x7da18bccbfa0>]]]]]
keyword[def] identifier[data_orientation] ( identifier[self] ): literal[string] keyword[return] identifier[tuple] ( identifier[itertools] . identifier[chain] ([ identifier[int] ( identifier[a] [ literal[int] ]) keyword[for] identifier[a] keyword[in] identifier[self] . identifier[non_index_axes] ], [ identifier[int] ( identifier[a] . identifier[axis] ) keyword[for] identifier[a] keyword[in] identifier[self] . identifier[index_axes] ]))
def data_orientation(self): """return a tuple of my permutated axes, non_indexable at the front""" return tuple(itertools.chain([int(a[0]) for a in self.non_index_axes], [int(a.axis) for a in self.index_axes]))
def get_subnets(): """ :return: all knows subnets """ LOGGER.debug("SubnetService.get_subnets") args = {'http_operation': 'GET', 'operation_path': ''} response = SubnetService.requester.call(args) ret = None if response.rc == 0: ret = [] for subnet in response.response_content['subnets']: ret.append(Subnet.json_2_subnet(subnet)) elif response.rc != 404: err_msg = 'SubnetService.get_subnets - Problem while getting subnets. ' \ '. Reason: ' + str(response.response_content) + '-' + str(response.error_message) + \ " (" + str(response.rc) + ")" LOGGER.warning(err_msg) return ret
def function[get_subnets, parameter[]]: constant[ :return: all knows subnets ] call[name[LOGGER].debug, parameter[constant[SubnetService.get_subnets]]] variable[args] assign[=] dictionary[[<ast.Constant object at 0x7da1b1379600>, <ast.Constant object at 0x7da1b1379480>], [<ast.Constant object at 0x7da1b137a200>, <ast.Constant object at 0x7da1b1379570>]] variable[response] assign[=] call[name[SubnetService].requester.call, parameter[name[args]]] variable[ret] assign[=] constant[None] if compare[name[response].rc equal[==] constant[0]] begin[:] variable[ret] assign[=] list[[]] for taget[name[subnet]] in starred[call[name[response].response_content][constant[subnets]]] begin[:] call[name[ret].append, parameter[call[name[Subnet].json_2_subnet, parameter[name[subnet]]]]] return[name[ret]]
keyword[def] identifier[get_subnets] (): literal[string] identifier[LOGGER] . identifier[debug] ( literal[string] ) identifier[args] ={ literal[string] : literal[string] , literal[string] : literal[string] } identifier[response] = identifier[SubnetService] . identifier[requester] . identifier[call] ( identifier[args] ) identifier[ret] = keyword[None] keyword[if] identifier[response] . identifier[rc] == literal[int] : identifier[ret] =[] keyword[for] identifier[subnet] keyword[in] identifier[response] . identifier[response_content] [ literal[string] ]: identifier[ret] . identifier[append] ( identifier[Subnet] . identifier[json_2_subnet] ( identifier[subnet] )) keyword[elif] identifier[response] . identifier[rc] != literal[int] : identifier[err_msg] = literal[string] literal[string] + identifier[str] ( identifier[response] . identifier[response_content] )+ literal[string] + identifier[str] ( identifier[response] . identifier[error_message] )+ literal[string] + identifier[str] ( identifier[response] . identifier[rc] )+ literal[string] identifier[LOGGER] . identifier[warning] ( identifier[err_msg] ) keyword[return] identifier[ret]
def get_subnets(): """ :return: all knows subnets """ LOGGER.debug('SubnetService.get_subnets') args = {'http_operation': 'GET', 'operation_path': ''} response = SubnetService.requester.call(args) ret = None if response.rc == 0: ret = [] for subnet in response.response_content['subnets']: ret.append(Subnet.json_2_subnet(subnet)) # depends on [control=['for'], data=['subnet']] # depends on [control=['if'], data=[]] elif response.rc != 404: err_msg = 'SubnetService.get_subnets - Problem while getting subnets. . Reason: ' + str(response.response_content) + '-' + str(response.error_message) + ' (' + str(response.rc) + ')' LOGGER.warning(err_msg) # depends on [control=['if'], data=[]] return ret
def create_engine(url, con=None, header=True, show_progress=5.0, clear_progress=True): '''Create a handler for query engine based on a URL. The following environment variables are used for default connection: TD_API_KEY API key TD_API_SERVER API server (default: api.treasuredata.com) HTTP_PROXY HTTP proxy (optional) Parameters ---------- url : string Engine descriptor in the form "type://apikey@host/database?params..." Use shorthand notation "type:database?params..." for the default connection. con : Connection, optional Handler returned by connect. If not given, default connection is used. header : string or boolean, default True Prepend comment strings, in the form "-- comment", as a header of queries. Set False to disable header. show_progress : double or boolean, default 5.0 Number of seconds to wait before printing progress. Set False to disable progress entirely. clear_progress : boolean, default True If True, clear progress when query completed. Returns ------- QueryEngine ''' url = urlparse(url) engine_type = url.scheme if url.scheme else 'presto' if con is None: if url.netloc: # create connection apikey, host = url.netloc.split('@') con = Connection(apikey=apikey, endpoint="https://{0}/".format(host)) else: # default connection con = Connection() database = url.path[1:] if url.path.startswith('/') else url.path params = { 'type': engine_type, } params.update(parse_qsl(url.query)) return QueryEngine(con, database, params, header=header, show_progress=show_progress, clear_progress=clear_progress)
def function[create_engine, parameter[url, con, header, show_progress, clear_progress]]: constant[Create a handler for query engine based on a URL. The following environment variables are used for default connection: TD_API_KEY API key TD_API_SERVER API server (default: api.treasuredata.com) HTTP_PROXY HTTP proxy (optional) Parameters ---------- url : string Engine descriptor in the form "type://apikey@host/database?params..." Use shorthand notation "type:database?params..." for the default connection. con : Connection, optional Handler returned by connect. If not given, default connection is used. header : string or boolean, default True Prepend comment strings, in the form "-- comment", as a header of queries. Set False to disable header. show_progress : double or boolean, default 5.0 Number of seconds to wait before printing progress. Set False to disable progress entirely. clear_progress : boolean, default True If True, clear progress when query completed. Returns ------- QueryEngine ] variable[url] assign[=] call[name[urlparse], parameter[name[url]]] variable[engine_type] assign[=] <ast.IfExp object at 0x7da20c6c5ba0> if compare[name[con] is constant[None]] begin[:] if name[url].netloc begin[:] <ast.Tuple object at 0x7da20c6c4f70> assign[=] call[name[url].netloc.split, parameter[constant[@]]] variable[con] assign[=] call[name[Connection], parameter[]] variable[database] assign[=] <ast.IfExp object at 0x7da20c6c6b30> variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da20c6c48e0>], [<ast.Name object at 0x7da20c6c75b0>]] call[name[params].update, parameter[call[name[parse_qsl], parameter[name[url].query]]]] return[call[name[QueryEngine], parameter[name[con], name[database], name[params]]]]
keyword[def] identifier[create_engine] ( identifier[url] , identifier[con] = keyword[None] , identifier[header] = keyword[True] , identifier[show_progress] = literal[int] , identifier[clear_progress] = keyword[True] ): literal[string] identifier[url] = identifier[urlparse] ( identifier[url] ) identifier[engine_type] = identifier[url] . identifier[scheme] keyword[if] identifier[url] . identifier[scheme] keyword[else] literal[string] keyword[if] identifier[con] keyword[is] keyword[None] : keyword[if] identifier[url] . identifier[netloc] : identifier[apikey] , identifier[host] = identifier[url] . identifier[netloc] . identifier[split] ( literal[string] ) identifier[con] = identifier[Connection] ( identifier[apikey] = identifier[apikey] , identifier[endpoint] = literal[string] . identifier[format] ( identifier[host] )) keyword[else] : identifier[con] = identifier[Connection] () identifier[database] = identifier[url] . identifier[path] [ literal[int] :] keyword[if] identifier[url] . identifier[path] . identifier[startswith] ( literal[string] ) keyword[else] identifier[url] . identifier[path] identifier[params] ={ literal[string] : identifier[engine_type] , } identifier[params] . identifier[update] ( identifier[parse_qsl] ( identifier[url] . identifier[query] )) keyword[return] identifier[QueryEngine] ( identifier[con] , identifier[database] , identifier[params] , identifier[header] = identifier[header] , identifier[show_progress] = identifier[show_progress] , identifier[clear_progress] = identifier[clear_progress] )
def create_engine(url, con=None, header=True, show_progress=5.0, clear_progress=True): """Create a handler for query engine based on a URL. The following environment variables are used for default connection: TD_API_KEY API key TD_API_SERVER API server (default: api.treasuredata.com) HTTP_PROXY HTTP proxy (optional) Parameters ---------- url : string Engine descriptor in the form "type://apikey@host/database?params..." Use shorthand notation "type:database?params..." for the default connection. con : Connection, optional Handler returned by connect. If not given, default connection is used. header : string or boolean, default True Prepend comment strings, in the form "-- comment", as a header of queries. Set False to disable header. show_progress : double or boolean, default 5.0 Number of seconds to wait before printing progress. Set False to disable progress entirely. clear_progress : boolean, default True If True, clear progress when query completed. Returns ------- QueryEngine """ url = urlparse(url) engine_type = url.scheme if url.scheme else 'presto' if con is None: if url.netloc: # create connection (apikey, host) = url.netloc.split('@') con = Connection(apikey=apikey, endpoint='https://{0}/'.format(host)) # depends on [control=['if'], data=[]] else: # default connection con = Connection() # depends on [control=['if'], data=['con']] database = url.path[1:] if url.path.startswith('/') else url.path params = {'type': engine_type} params.update(parse_qsl(url.query)) return QueryEngine(con, database, params, header=header, show_progress=show_progress, clear_progress=clear_progress)
def name(self, src=None): """Return string representing the name of this type.""" res = [_get_type_name(tt, src) for tt in self._types] if len(res) == 2 and "None" in res: res.remove("None") return "?" + res[0] else: return " | ".join(res)
def function[name, parameter[self, src]]: constant[Return string representing the name of this type.] variable[res] assign[=] <ast.ListComp object at 0x7da1b03b9f30> if <ast.BoolOp object at 0x7da1b03ba6e0> begin[:] call[name[res].remove, parameter[constant[None]]] return[binary_operation[constant[?] + call[name[res]][constant[0]]]]
keyword[def] identifier[name] ( identifier[self] , identifier[src] = keyword[None] ): literal[string] identifier[res] =[ identifier[_get_type_name] ( identifier[tt] , identifier[src] ) keyword[for] identifier[tt] keyword[in] identifier[self] . identifier[_types] ] keyword[if] identifier[len] ( identifier[res] )== literal[int] keyword[and] literal[string] keyword[in] identifier[res] : identifier[res] . identifier[remove] ( literal[string] ) keyword[return] literal[string] + identifier[res] [ literal[int] ] keyword[else] : keyword[return] literal[string] . identifier[join] ( identifier[res] )
def name(self, src=None): """Return string representing the name of this type.""" res = [_get_type_name(tt, src) for tt in self._types] if len(res) == 2 and 'None' in res: res.remove('None') return '?' + res[0] # depends on [control=['if'], data=[]] else: return ' | '.join(res)
def track_event(self, name: str, properties: Dict[str, object] = None, measurements: Dict[str, object] = None) -> None: """ Send information about a single event that has occurred in the context of the application. :param name: the data to associate to this event. :param properties: the set of custom properties the client wants attached to this data item. (defaults to: None) :param measurements: the set of custom measurements the client wants to attach to this data item. (defaults to: None) """ pass
def function[track_event, parameter[self, name, properties, measurements]]: constant[ Send information about a single event that has occurred in the context of the application. :param name: the data to associate to this event. :param properties: the set of custom properties the client wants attached to this data item. (defaults to: None) :param measurements: the set of custom measurements the client wants to attach to this data item. (defaults to: None) ] pass
keyword[def] identifier[track_event] ( identifier[self] , identifier[name] : identifier[str] , identifier[properties] : identifier[Dict] [ identifier[str] , identifier[object] ]= keyword[None] , identifier[measurements] : identifier[Dict] [ identifier[str] , identifier[object] ]= keyword[None] )-> keyword[None] : literal[string] keyword[pass]
def track_event(self, name: str, properties: Dict[str, object]=None, measurements: Dict[str, object]=None) -> None: """ Send information about a single event that has occurred in the context of the application. :param name: the data to associate to this event. :param properties: the set of custom properties the client wants attached to this data item. (defaults to: None) :param measurements: the set of custom measurements the client wants to attach to this data item. (defaults to: None) """ pass
def add_proof(self, certificate_metadata, merkle_proof): """ :param certificate_metadata: :param merkle_proof: :return: """ certificate_json = self._get_certificate_to_issue(certificate_metadata) certificate_json['signature'] = merkle_proof with open(certificate_metadata.blockchain_cert_file_name, 'w') as out_file: out_file.write(json.dumps(certificate_json))
def function[add_proof, parameter[self, certificate_metadata, merkle_proof]]: constant[ :param certificate_metadata: :param merkle_proof: :return: ] variable[certificate_json] assign[=] call[name[self]._get_certificate_to_issue, parameter[name[certificate_metadata]]] call[name[certificate_json]][constant[signature]] assign[=] name[merkle_proof] with call[name[open], parameter[name[certificate_metadata].blockchain_cert_file_name, constant[w]]] begin[:] call[name[out_file].write, parameter[call[name[json].dumps, parameter[name[certificate_json]]]]]
keyword[def] identifier[add_proof] ( identifier[self] , identifier[certificate_metadata] , identifier[merkle_proof] ): literal[string] identifier[certificate_json] = identifier[self] . identifier[_get_certificate_to_issue] ( identifier[certificate_metadata] ) identifier[certificate_json] [ literal[string] ]= identifier[merkle_proof] keyword[with] identifier[open] ( identifier[certificate_metadata] . identifier[blockchain_cert_file_name] , literal[string] ) keyword[as] identifier[out_file] : identifier[out_file] . identifier[write] ( identifier[json] . identifier[dumps] ( identifier[certificate_json] ))
def add_proof(self, certificate_metadata, merkle_proof): """ :param certificate_metadata: :param merkle_proof: :return: """ certificate_json = self._get_certificate_to_issue(certificate_metadata) certificate_json['signature'] = merkle_proof with open(certificate_metadata.blockchain_cert_file_name, 'w') as out_file: out_file.write(json.dumps(certificate_json)) # depends on [control=['with'], data=['out_file']]
def fetch(self, resource_class): """Construct a :class:`.Request` for the given resource type. Provided an :class:`.Entry` subclass, the Content Type ID will be inferred and requested explicitly. Examples:: client.fetch(Asset) client.fetch(Entry) client.fetch(ContentType) client.fetch(CustomEntryClass) :param resource_class: The type of resource to be fetched. :return: :class:`.Request` instance. """ if issubclass(resource_class, Entry): params = None content_type = getattr(resource_class, '__content_type__', None) if content_type is not None: params = {'content_type': resource_class.__content_type__} return RequestArray(self.dispatcher, utils.path_for_class(resource_class), self.config.resolve_links, params=params) else: remote_path = utils.path_for_class(resource_class) if remote_path is None: raise Exception('Invalid resource type \"{0}\".'.format(resource_class)) return RequestArray(self.dispatcher, remote_path, self.config.resolve_links)
def function[fetch, parameter[self, resource_class]]: constant[Construct a :class:`.Request` for the given resource type. Provided an :class:`.Entry` subclass, the Content Type ID will be inferred and requested explicitly. Examples:: client.fetch(Asset) client.fetch(Entry) client.fetch(ContentType) client.fetch(CustomEntryClass) :param resource_class: The type of resource to be fetched. :return: :class:`.Request` instance. ] if call[name[issubclass], parameter[name[resource_class], name[Entry]]] begin[:] variable[params] assign[=] constant[None] variable[content_type] assign[=] call[name[getattr], parameter[name[resource_class], constant[__content_type__], constant[None]]] if compare[name[content_type] is_not constant[None]] begin[:] variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da207f98220>], [<ast.Attribute object at 0x7da207f986d0>]] return[call[name[RequestArray], parameter[name[self].dispatcher, call[name[utils].path_for_class, parameter[name[resource_class]]], name[self].config.resolve_links]]]
keyword[def] identifier[fetch] ( identifier[self] , identifier[resource_class] ): literal[string] keyword[if] identifier[issubclass] ( identifier[resource_class] , identifier[Entry] ): identifier[params] = keyword[None] identifier[content_type] = identifier[getattr] ( identifier[resource_class] , literal[string] , keyword[None] ) keyword[if] identifier[content_type] keyword[is] keyword[not] keyword[None] : identifier[params] ={ literal[string] : identifier[resource_class] . identifier[__content_type__] } keyword[return] identifier[RequestArray] ( identifier[self] . identifier[dispatcher] , identifier[utils] . identifier[path_for_class] ( identifier[resource_class] ), identifier[self] . identifier[config] . identifier[resolve_links] , identifier[params] = identifier[params] ) keyword[else] : identifier[remote_path] = identifier[utils] . identifier[path_for_class] ( identifier[resource_class] ) keyword[if] identifier[remote_path] keyword[is] keyword[None] : keyword[raise] identifier[Exception] ( literal[string] . identifier[format] ( identifier[resource_class] )) keyword[return] identifier[RequestArray] ( identifier[self] . identifier[dispatcher] , identifier[remote_path] , identifier[self] . identifier[config] . identifier[resolve_links] )
def fetch(self, resource_class): """Construct a :class:`.Request` for the given resource type. Provided an :class:`.Entry` subclass, the Content Type ID will be inferred and requested explicitly. Examples:: client.fetch(Asset) client.fetch(Entry) client.fetch(ContentType) client.fetch(CustomEntryClass) :param resource_class: The type of resource to be fetched. :return: :class:`.Request` instance. """ if issubclass(resource_class, Entry): params = None content_type = getattr(resource_class, '__content_type__', None) if content_type is not None: params = {'content_type': resource_class.__content_type__} # depends on [control=['if'], data=[]] return RequestArray(self.dispatcher, utils.path_for_class(resource_class), self.config.resolve_links, params=params) # depends on [control=['if'], data=[]] else: remote_path = utils.path_for_class(resource_class) if remote_path is None: raise Exception('Invalid resource type "{0}".'.format(resource_class)) # depends on [control=['if'], data=[]] return RequestArray(self.dispatcher, remote_path, self.config.resolve_links)
def parse_byteranges(cls, environ): """ Outputs a list of tuples with ranges or the empty list According to the rfc, start or end values can be omitted """ r = [] s = environ.get(cls.header_range, '').replace(' ','').lower() if s: l = s.split('=') if len(l) == 2: unit, vals = tuple(l) if unit == 'bytes' and vals: gen_rng = ( tuple(rng.split('-')) for rng in vals.split(',') if '-' in rng ) for start, end in gen_rng: if start or end: r.append( (int(start) if start else None, int(end) if end else None) ) return r
def function[parse_byteranges, parameter[cls, environ]]: constant[ Outputs a list of tuples with ranges or the empty list According to the rfc, start or end values can be omitted ] variable[r] assign[=] list[[]] variable[s] assign[=] call[call[call[name[environ].get, parameter[name[cls].header_range, constant[]]].replace, parameter[constant[ ], constant[]]].lower, parameter[]] if name[s] begin[:] variable[l] assign[=] call[name[s].split, parameter[constant[=]]] if compare[call[name[len], parameter[name[l]]] equal[==] constant[2]] begin[:] <ast.Tuple object at 0x7da20c6a9b10> assign[=] call[name[tuple], parameter[name[l]]] if <ast.BoolOp object at 0x7da20c6ab6d0> begin[:] variable[gen_rng] assign[=] <ast.GeneratorExp object at 0x7da20c6aa260> for taget[tuple[[<ast.Name object at 0x7da20c6a8640>, <ast.Name object at 0x7da20c6aaa70>]]] in starred[name[gen_rng]] begin[:] if <ast.BoolOp object at 0x7da20c6a9000> begin[:] call[name[r].append, parameter[tuple[[<ast.IfExp object at 0x7da20c6ab1f0>, <ast.IfExp object at 0x7da20c6a8190>]]]] return[name[r]]
keyword[def] identifier[parse_byteranges] ( identifier[cls] , identifier[environ] ): literal[string] identifier[r] =[] identifier[s] = identifier[environ] . identifier[get] ( identifier[cls] . identifier[header_range] , literal[string] ). identifier[replace] ( literal[string] , literal[string] ). identifier[lower] () keyword[if] identifier[s] : identifier[l] = identifier[s] . identifier[split] ( literal[string] ) keyword[if] identifier[len] ( identifier[l] )== literal[int] : identifier[unit] , identifier[vals] = identifier[tuple] ( identifier[l] ) keyword[if] identifier[unit] == literal[string] keyword[and] identifier[vals] : identifier[gen_rng] =( identifier[tuple] ( identifier[rng] . identifier[split] ( literal[string] )) keyword[for] identifier[rng] keyword[in] identifier[vals] . identifier[split] ( literal[string] ) keyword[if] literal[string] keyword[in] identifier[rng] ) keyword[for] identifier[start] , identifier[end] keyword[in] identifier[gen_rng] : keyword[if] identifier[start] keyword[or] identifier[end] : identifier[r] . identifier[append] (( identifier[int] ( identifier[start] ) keyword[if] identifier[start] keyword[else] keyword[None] , identifier[int] ( identifier[end] ) keyword[if] identifier[end] keyword[else] keyword[None] )) keyword[return] identifier[r]
def parse_byteranges(cls, environ): """ Outputs a list of tuples with ranges or the empty list According to the rfc, start or end values can be omitted """ r = [] s = environ.get(cls.header_range, '').replace(' ', '').lower() if s: l = s.split('=') if len(l) == 2: (unit, vals) = tuple(l) if unit == 'bytes' and vals: gen_rng = (tuple(rng.split('-')) for rng in vals.split(',') if '-' in rng) for (start, end) in gen_rng: if start or end: r.append((int(start) if start else None, int(end) if end else None)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return r
def borrow_optimizer(self, shared_module): """Borrows optimizer from a shared module. Used in bucketing, where exactly the same optimizer (esp. kvstore) is used. Parameters ---------- shared_module : Module """ assert shared_module.optimizer_initialized self._optimizer = shared_module._optimizer self._kvstore = shared_module._kvstore self._update_on_kvstore = shared_module._update_on_kvstore self._updater = shared_module._updater self.optimizer_initialized = True
def function[borrow_optimizer, parameter[self, shared_module]]: constant[Borrows optimizer from a shared module. Used in bucketing, where exactly the same optimizer (esp. kvstore) is used. Parameters ---------- shared_module : Module ] assert[name[shared_module].optimizer_initialized] name[self]._optimizer assign[=] name[shared_module]._optimizer name[self]._kvstore assign[=] name[shared_module]._kvstore name[self]._update_on_kvstore assign[=] name[shared_module]._update_on_kvstore name[self]._updater assign[=] name[shared_module]._updater name[self].optimizer_initialized assign[=] constant[True]
keyword[def] identifier[borrow_optimizer] ( identifier[self] , identifier[shared_module] ): literal[string] keyword[assert] identifier[shared_module] . identifier[optimizer_initialized] identifier[self] . identifier[_optimizer] = identifier[shared_module] . identifier[_optimizer] identifier[self] . identifier[_kvstore] = identifier[shared_module] . identifier[_kvstore] identifier[self] . identifier[_update_on_kvstore] = identifier[shared_module] . identifier[_update_on_kvstore] identifier[self] . identifier[_updater] = identifier[shared_module] . identifier[_updater] identifier[self] . identifier[optimizer_initialized] = keyword[True]
def borrow_optimizer(self, shared_module): """Borrows optimizer from a shared module. Used in bucketing, where exactly the same optimizer (esp. kvstore) is used. Parameters ---------- shared_module : Module """ assert shared_module.optimizer_initialized self._optimizer = shared_module._optimizer self._kvstore = shared_module._kvstore self._update_on_kvstore = shared_module._update_on_kvstore self._updater = shared_module._updater self.optimizer_initialized = True
def _put_overlay(self, overlay_name, overlay): """Store overlay so that it is accessible by the given name. :param overlay_name: name of the overlay :param overlay: overlay must be a dictionary where the keys are identifiers in the dataset :raises: TypeError if the overlay is not a dictionary, ValueError if identifiers in overlay and dataset do not match """ if not isinstance(overlay, dict): raise TypeError("Overlay must be dict") if set(self._identifiers()) != set(overlay.keys()): raise ValueError("Overlay keys must be dataset identifiers") self._storage_broker.put_overlay(overlay_name, overlay)
def function[_put_overlay, parameter[self, overlay_name, overlay]]: constant[Store overlay so that it is accessible by the given name. :param overlay_name: name of the overlay :param overlay: overlay must be a dictionary where the keys are identifiers in the dataset :raises: TypeError if the overlay is not a dictionary, ValueError if identifiers in overlay and dataset do not match ] if <ast.UnaryOp object at 0x7da2046226e0> begin[:] <ast.Raise object at 0x7da204622f80> if compare[call[name[set], parameter[call[name[self]._identifiers, parameter[]]]] not_equal[!=] call[name[set], parameter[call[name[overlay].keys, parameter[]]]]] begin[:] <ast.Raise object at 0x7da204623a90> call[name[self]._storage_broker.put_overlay, parameter[name[overlay_name], name[overlay]]]
keyword[def] identifier[_put_overlay] ( identifier[self] , identifier[overlay_name] , identifier[overlay] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[overlay] , identifier[dict] ): keyword[raise] identifier[TypeError] ( literal[string] ) keyword[if] identifier[set] ( identifier[self] . identifier[_identifiers] ())!= identifier[set] ( identifier[overlay] . identifier[keys] ()): keyword[raise] identifier[ValueError] ( literal[string] ) identifier[self] . identifier[_storage_broker] . identifier[put_overlay] ( identifier[overlay_name] , identifier[overlay] )
def _put_overlay(self, overlay_name, overlay): """Store overlay so that it is accessible by the given name. :param overlay_name: name of the overlay :param overlay: overlay must be a dictionary where the keys are identifiers in the dataset :raises: TypeError if the overlay is not a dictionary, ValueError if identifiers in overlay and dataset do not match """ if not isinstance(overlay, dict): raise TypeError('Overlay must be dict') # depends on [control=['if'], data=[]] if set(self._identifiers()) != set(overlay.keys()): raise ValueError('Overlay keys must be dataset identifiers') # depends on [control=['if'], data=[]] self._storage_broker.put_overlay(overlay_name, overlay)
def bandwidth(self, subid, params=None): ''' /v1/server/bandwidth GET - account Get the bandwidth used by a virtual machine Link: https://www.vultr.com/api/#server_bandwidth ''' params = update_params(params, {'SUBID': subid}) return self.request('/v1/server/bandwidth', params, 'GET')
def function[bandwidth, parameter[self, subid, params]]: constant[ /v1/server/bandwidth GET - account Get the bandwidth used by a virtual machine Link: https://www.vultr.com/api/#server_bandwidth ] variable[params] assign[=] call[name[update_params], parameter[name[params], dictionary[[<ast.Constant object at 0x7da1b1392a10>], [<ast.Name object at 0x7da1b13935b0>]]]] return[call[name[self].request, parameter[constant[/v1/server/bandwidth], name[params], constant[GET]]]]
keyword[def] identifier[bandwidth] ( identifier[self] , identifier[subid] , identifier[params] = keyword[None] ): literal[string] identifier[params] = identifier[update_params] ( identifier[params] ,{ literal[string] : identifier[subid] }) keyword[return] identifier[self] . identifier[request] ( literal[string] , identifier[params] , literal[string] )
def bandwidth(self, subid, params=None): """ /v1/server/bandwidth GET - account Get the bandwidth used by a virtual machine Link: https://www.vultr.com/api/#server_bandwidth """ params = update_params(params, {'SUBID': subid}) return self.request('/v1/server/bandwidth', params, 'GET')
def _generate_main_scripts(self): """ Include the scripts used by solutions. """ head = self.parser.find('head').first_result() if head is not None: common_functions_script = self.parser.find( '#' + AccessibleEventImplementation.ID_SCRIPT_COMMON_FUNCTIONS ).first_result() if common_functions_script is None: common_functions_file = open( os.path.join( os.path.dirname(os.path.dirname(os.path.dirname( os.path.realpath(__file__) ))), 'js', 'common.js' ), 'r' ) common_functions_content = common_functions_file.read() common_functions_file.close() common_functions_script = self.parser.create_element('script') common_functions_script.set_attribute( 'id', AccessibleEventImplementation.ID_SCRIPT_COMMON_FUNCTIONS ) common_functions_script.set_attribute( 'type', 'text/javascript' ) common_functions_script.append_text(common_functions_content) head.prepend_element(common_functions_script) if ( self.parser.find( '#' + AccessibleEventImplementation.ID_SCRIPT_EVENT_LISTENER ).first_result() is None ): event_listener_file = open( os.path.join( os.path.dirname(os.path.dirname(os.path.dirname( os.path.realpath(__file__) ))), 'js', 'eventlistener.js' ), 'r' ) event_listener_script_content = event_listener_file.read() event_listener_file.close() script = self.parser.create_element('script') script.set_attribute( 'id', AccessibleEventImplementation.ID_SCRIPT_EVENT_LISTENER ) script.set_attribute('type', 'text/javascript') script.append_text(event_listener_script_content) common_functions_script.insert_after(script) local = self.parser.find('body').first_result() if local is not None: self.script_list = self.parser.find( '#' + AccessibleEventImplementation.ID_LIST_IDS_SCRIPT ).first_result() if self.script_list is None: self.script_list = self.parser.create_element('script') self.script_list.set_attribute( 'id', AccessibleEventImplementation.ID_LIST_IDS_SCRIPT ) self.script_list.set_attribute('type', 'text/javascript') self.script_list.append_text('var activeElements = [];') self.script_list.append_text('var hoverElements = [];') self.script_list.append_text('var dragElements = [];') self.script_list.append_text('var dropElements = [];') local.append_element(self.script_list) if self.parser.find( '#' + AccessibleEventImplementation.ID_FUNCTION_SCRIPT_FIX ).first_result() is None: include_file = open( os.path.join( os.path.dirname(os.path.dirname(os.path.dirname( os.path.realpath(__file__) ))), 'js', 'include.js' ), 'r' ) local_include_script_content = include_file.read() include_file.close() script_function = self.parser.create_element('script') script_function.set_attribute( 'id', AccessibleEventImplementation.ID_FUNCTION_SCRIPT_FIX ) script_function.set_attribute('type', 'text/javascript') script_function.append_text(local_include_script_content) local.append_element(script_function) self.main_script_added = True
def function[_generate_main_scripts, parameter[self]]: constant[ Include the scripts used by solutions. ] variable[head] assign[=] call[call[name[self].parser.find, parameter[constant[head]]].first_result, parameter[]] if compare[name[head] is_not constant[None]] begin[:] variable[common_functions_script] assign[=] call[call[name[self].parser.find, parameter[binary_operation[constant[#] + name[AccessibleEventImplementation].ID_SCRIPT_COMMON_FUNCTIONS]]].first_result, parameter[]] if compare[name[common_functions_script] is constant[None]] begin[:] variable[common_functions_file] assign[=] call[name[open], parameter[call[name[os].path.join, parameter[call[name[os].path.dirname, parameter[call[name[os].path.dirname, parameter[call[name[os].path.dirname, parameter[call[name[os].path.realpath, parameter[name[__file__]]]]]]]]], constant[js], constant[common.js]]], constant[r]]] variable[common_functions_content] assign[=] call[name[common_functions_file].read, parameter[]] call[name[common_functions_file].close, parameter[]] variable[common_functions_script] assign[=] call[name[self].parser.create_element, parameter[constant[script]]] call[name[common_functions_script].set_attribute, parameter[constant[id], name[AccessibleEventImplementation].ID_SCRIPT_COMMON_FUNCTIONS]] call[name[common_functions_script].set_attribute, parameter[constant[type], constant[text/javascript]]] call[name[common_functions_script].append_text, parameter[name[common_functions_content]]] call[name[head].prepend_element, parameter[name[common_functions_script]]] if compare[call[call[name[self].parser.find, parameter[binary_operation[constant[#] + name[AccessibleEventImplementation].ID_SCRIPT_EVENT_LISTENER]]].first_result, parameter[]] is constant[None]] begin[:] variable[event_listener_file] assign[=] call[name[open], parameter[call[name[os].path.join, parameter[call[name[os].path.dirname, parameter[call[name[os].path.dirname, parameter[call[name[os].path.dirname, parameter[call[name[os].path.realpath, parameter[name[__file__]]]]]]]]], constant[js], constant[eventlistener.js]]], constant[r]]] variable[event_listener_script_content] assign[=] call[name[event_listener_file].read, parameter[]] call[name[event_listener_file].close, parameter[]] variable[script] assign[=] call[name[self].parser.create_element, parameter[constant[script]]] call[name[script].set_attribute, parameter[constant[id], name[AccessibleEventImplementation].ID_SCRIPT_EVENT_LISTENER]] call[name[script].set_attribute, parameter[constant[type], constant[text/javascript]]] call[name[script].append_text, parameter[name[event_listener_script_content]]] call[name[common_functions_script].insert_after, parameter[name[script]]] variable[local] assign[=] call[call[name[self].parser.find, parameter[constant[body]]].first_result, parameter[]] if compare[name[local] is_not constant[None]] begin[:] name[self].script_list assign[=] call[call[name[self].parser.find, parameter[binary_operation[constant[#] + name[AccessibleEventImplementation].ID_LIST_IDS_SCRIPT]]].first_result, parameter[]] if compare[name[self].script_list is constant[None]] begin[:] name[self].script_list assign[=] call[name[self].parser.create_element, parameter[constant[script]]] call[name[self].script_list.set_attribute, parameter[constant[id], name[AccessibleEventImplementation].ID_LIST_IDS_SCRIPT]] call[name[self].script_list.set_attribute, parameter[constant[type], constant[text/javascript]]] call[name[self].script_list.append_text, parameter[constant[var activeElements = [];]]] call[name[self].script_list.append_text, parameter[constant[var hoverElements = [];]]] call[name[self].script_list.append_text, parameter[constant[var dragElements = [];]]] call[name[self].script_list.append_text, parameter[constant[var dropElements = [];]]] call[name[local].append_element, parameter[name[self].script_list]] if compare[call[call[name[self].parser.find, parameter[binary_operation[constant[#] + name[AccessibleEventImplementation].ID_FUNCTION_SCRIPT_FIX]]].first_result, parameter[]] is constant[None]] begin[:] variable[include_file] assign[=] call[name[open], parameter[call[name[os].path.join, parameter[call[name[os].path.dirname, parameter[call[name[os].path.dirname, parameter[call[name[os].path.dirname, parameter[call[name[os].path.realpath, parameter[name[__file__]]]]]]]]], constant[js], constant[include.js]]], constant[r]]] variable[local_include_script_content] assign[=] call[name[include_file].read, parameter[]] call[name[include_file].close, parameter[]] variable[script_function] assign[=] call[name[self].parser.create_element, parameter[constant[script]]] call[name[script_function].set_attribute, parameter[constant[id], name[AccessibleEventImplementation].ID_FUNCTION_SCRIPT_FIX]] call[name[script_function].set_attribute, parameter[constant[type], constant[text/javascript]]] call[name[script_function].append_text, parameter[name[local_include_script_content]]] call[name[local].append_element, parameter[name[script_function]]] name[self].main_script_added assign[=] constant[True]
keyword[def] identifier[_generate_main_scripts] ( identifier[self] ): literal[string] identifier[head] = identifier[self] . identifier[parser] . identifier[find] ( literal[string] ). identifier[first_result] () keyword[if] identifier[head] keyword[is] keyword[not] keyword[None] : identifier[common_functions_script] = identifier[self] . identifier[parser] . identifier[find] ( literal[string] + identifier[AccessibleEventImplementation] . identifier[ID_SCRIPT_COMMON_FUNCTIONS] ). identifier[first_result] () keyword[if] identifier[common_functions_script] keyword[is] keyword[None] : identifier[common_functions_file] = identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[os] . identifier[path] . identifier[realpath] ( identifier[__file__] ) ))), literal[string] , literal[string] ), literal[string] ) identifier[common_functions_content] = identifier[common_functions_file] . identifier[read] () identifier[common_functions_file] . identifier[close] () identifier[common_functions_script] = identifier[self] . identifier[parser] . identifier[create_element] ( literal[string] ) identifier[common_functions_script] . identifier[set_attribute] ( literal[string] , identifier[AccessibleEventImplementation] . identifier[ID_SCRIPT_COMMON_FUNCTIONS] ) identifier[common_functions_script] . identifier[set_attribute] ( literal[string] , literal[string] ) identifier[common_functions_script] . identifier[append_text] ( identifier[common_functions_content] ) identifier[head] . identifier[prepend_element] ( identifier[common_functions_script] ) keyword[if] ( identifier[self] . identifier[parser] . identifier[find] ( literal[string] + identifier[AccessibleEventImplementation] . identifier[ID_SCRIPT_EVENT_LISTENER] ). identifier[first_result] () keyword[is] keyword[None] ): identifier[event_listener_file] = identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[os] . identifier[path] . identifier[realpath] ( identifier[__file__] ) ))), literal[string] , literal[string] ), literal[string] ) identifier[event_listener_script_content] = identifier[event_listener_file] . identifier[read] () identifier[event_listener_file] . identifier[close] () identifier[script] = identifier[self] . identifier[parser] . identifier[create_element] ( literal[string] ) identifier[script] . identifier[set_attribute] ( literal[string] , identifier[AccessibleEventImplementation] . identifier[ID_SCRIPT_EVENT_LISTENER] ) identifier[script] . identifier[set_attribute] ( literal[string] , literal[string] ) identifier[script] . identifier[append_text] ( identifier[event_listener_script_content] ) identifier[common_functions_script] . identifier[insert_after] ( identifier[script] ) identifier[local] = identifier[self] . identifier[parser] . identifier[find] ( literal[string] ). identifier[first_result] () keyword[if] identifier[local] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[script_list] = identifier[self] . identifier[parser] . identifier[find] ( literal[string] + identifier[AccessibleEventImplementation] . identifier[ID_LIST_IDS_SCRIPT] ). identifier[first_result] () keyword[if] identifier[self] . identifier[script_list] keyword[is] keyword[None] : identifier[self] . identifier[script_list] = identifier[self] . identifier[parser] . identifier[create_element] ( literal[string] ) identifier[self] . identifier[script_list] . identifier[set_attribute] ( literal[string] , identifier[AccessibleEventImplementation] . identifier[ID_LIST_IDS_SCRIPT] ) identifier[self] . identifier[script_list] . identifier[set_attribute] ( literal[string] , literal[string] ) identifier[self] . identifier[script_list] . identifier[append_text] ( literal[string] ) identifier[self] . identifier[script_list] . identifier[append_text] ( literal[string] ) identifier[self] . identifier[script_list] . identifier[append_text] ( literal[string] ) identifier[self] . identifier[script_list] . identifier[append_text] ( literal[string] ) identifier[local] . identifier[append_element] ( identifier[self] . identifier[script_list] ) keyword[if] identifier[self] . identifier[parser] . identifier[find] ( literal[string] + identifier[AccessibleEventImplementation] . identifier[ID_FUNCTION_SCRIPT_FIX] ). identifier[first_result] () keyword[is] keyword[None] : identifier[include_file] = identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[os] . identifier[path] . identifier[realpath] ( identifier[__file__] ) ))), literal[string] , literal[string] ), literal[string] ) identifier[local_include_script_content] = identifier[include_file] . identifier[read] () identifier[include_file] . identifier[close] () identifier[script_function] = identifier[self] . identifier[parser] . identifier[create_element] ( literal[string] ) identifier[script_function] . identifier[set_attribute] ( literal[string] , identifier[AccessibleEventImplementation] . identifier[ID_FUNCTION_SCRIPT_FIX] ) identifier[script_function] . identifier[set_attribute] ( literal[string] , literal[string] ) identifier[script_function] . identifier[append_text] ( identifier[local_include_script_content] ) identifier[local] . identifier[append_element] ( identifier[script_function] ) identifier[self] . identifier[main_script_added] = keyword[True]
def _generate_main_scripts(self): """ Include the scripts used by solutions. """ head = self.parser.find('head').first_result() if head is not None: common_functions_script = self.parser.find('#' + AccessibleEventImplementation.ID_SCRIPT_COMMON_FUNCTIONS).first_result() if common_functions_script is None: common_functions_file = open(os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))), 'js', 'common.js'), 'r') common_functions_content = common_functions_file.read() common_functions_file.close() common_functions_script = self.parser.create_element('script') common_functions_script.set_attribute('id', AccessibleEventImplementation.ID_SCRIPT_COMMON_FUNCTIONS) common_functions_script.set_attribute('type', 'text/javascript') common_functions_script.append_text(common_functions_content) head.prepend_element(common_functions_script) # depends on [control=['if'], data=['common_functions_script']] if self.parser.find('#' + AccessibleEventImplementation.ID_SCRIPT_EVENT_LISTENER).first_result() is None: event_listener_file = open(os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))), 'js', 'eventlistener.js'), 'r') event_listener_script_content = event_listener_file.read() event_listener_file.close() script = self.parser.create_element('script') script.set_attribute('id', AccessibleEventImplementation.ID_SCRIPT_EVENT_LISTENER) script.set_attribute('type', 'text/javascript') script.append_text(event_listener_script_content) common_functions_script.insert_after(script) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['head']] local = self.parser.find('body').first_result() if local is not None: self.script_list = self.parser.find('#' + AccessibleEventImplementation.ID_LIST_IDS_SCRIPT).first_result() if self.script_list is None: self.script_list = self.parser.create_element('script') self.script_list.set_attribute('id', AccessibleEventImplementation.ID_LIST_IDS_SCRIPT) self.script_list.set_attribute('type', 'text/javascript') self.script_list.append_text('var activeElements = [];') self.script_list.append_text('var hoverElements = [];') self.script_list.append_text('var dragElements = [];') self.script_list.append_text('var dropElements = [];') local.append_element(self.script_list) # depends on [control=['if'], data=[]] if self.parser.find('#' + AccessibleEventImplementation.ID_FUNCTION_SCRIPT_FIX).first_result() is None: include_file = open(os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))), 'js', 'include.js'), 'r') local_include_script_content = include_file.read() include_file.close() script_function = self.parser.create_element('script') script_function.set_attribute('id', AccessibleEventImplementation.ID_FUNCTION_SCRIPT_FIX) script_function.set_attribute('type', 'text/javascript') script_function.append_text(local_include_script_content) local.append_element(script_function) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['local']] self.main_script_added = True
def visit_repr(self, node, parent): """visit a Backquote node by returning a fresh instance of it""" newnode = nodes.Repr(node.lineno, node.col_offset, parent) newnode.postinit(self.visit(node.value, newnode)) return newnode
def function[visit_repr, parameter[self, node, parent]]: constant[visit a Backquote node by returning a fresh instance of it] variable[newnode] assign[=] call[name[nodes].Repr, parameter[name[node].lineno, name[node].col_offset, name[parent]]] call[name[newnode].postinit, parameter[call[name[self].visit, parameter[name[node].value, name[newnode]]]]] return[name[newnode]]
keyword[def] identifier[visit_repr] ( identifier[self] , identifier[node] , identifier[parent] ): literal[string] identifier[newnode] = identifier[nodes] . identifier[Repr] ( identifier[node] . identifier[lineno] , identifier[node] . identifier[col_offset] , identifier[parent] ) identifier[newnode] . identifier[postinit] ( identifier[self] . identifier[visit] ( identifier[node] . identifier[value] , identifier[newnode] )) keyword[return] identifier[newnode]
def visit_repr(self, node, parent): """visit a Backquote node by returning a fresh instance of it""" newnode = nodes.Repr(node.lineno, node.col_offset, parent) newnode.postinit(self.visit(node.value, newnode)) return newnode
def zlib_compress(data): """ Compress things in a py2/3 safe fashion >>> json_str = '{"test": 1}' >>> blob = zlib_compress(json_str) """ if PY3K: if isinstance(data, str): return zlib.compress(bytes(data, 'utf-8')) return zlib.compress(data) return zlib.compress(data)
def function[zlib_compress, parameter[data]]: constant[ Compress things in a py2/3 safe fashion >>> json_str = '{"test": 1}' >>> blob = zlib_compress(json_str) ] if name[PY3K] begin[:] if call[name[isinstance], parameter[name[data], name[str]]] begin[:] return[call[name[zlib].compress, parameter[call[name[bytes], parameter[name[data], constant[utf-8]]]]]] return[call[name[zlib].compress, parameter[name[data]]]] return[call[name[zlib].compress, parameter[name[data]]]]
keyword[def] identifier[zlib_compress] ( identifier[data] ): literal[string] keyword[if] identifier[PY3K] : keyword[if] identifier[isinstance] ( identifier[data] , identifier[str] ): keyword[return] identifier[zlib] . identifier[compress] ( identifier[bytes] ( identifier[data] , literal[string] )) keyword[return] identifier[zlib] . identifier[compress] ( identifier[data] ) keyword[return] identifier[zlib] . identifier[compress] ( identifier[data] )
def zlib_compress(data): """ Compress things in a py2/3 safe fashion >>> json_str = '{"test": 1}' >>> blob = zlib_compress(json_str) """ if PY3K: if isinstance(data, str): return zlib.compress(bytes(data, 'utf-8')) # depends on [control=['if'], data=[]] return zlib.compress(data) # depends on [control=['if'], data=[]] return zlib.compress(data)
def to_dict(obj, **kwargs): """ Convert an object into dictionary. Uses singledispatch to allow for clean extensions for custom class types. Reference: https://pypi.python.org/pypi/singledispatch :param obj: object instance :param kwargs: keyword arguments such as suppress_private_attr, suppress_empty_values, dict_factory :return: converted dictionary. """ # if is_related, then iterate attrs. if is_model(obj.__class__): return related_obj_to_dict(obj, **kwargs) # else, return obj directly. register a custom to_dict if you need to! # reference: https://pypi.python.org/pypi/singledispatch else: return obj
def function[to_dict, parameter[obj]]: constant[ Convert an object into dictionary. Uses singledispatch to allow for clean extensions for custom class types. Reference: https://pypi.python.org/pypi/singledispatch :param obj: object instance :param kwargs: keyword arguments such as suppress_private_attr, suppress_empty_values, dict_factory :return: converted dictionary. ] if call[name[is_model], parameter[name[obj].__class__]] begin[:] return[call[name[related_obj_to_dict], parameter[name[obj]]]]
keyword[def] identifier[to_dict] ( identifier[obj] ,** identifier[kwargs] ): literal[string] keyword[if] identifier[is_model] ( identifier[obj] . identifier[__class__] ): keyword[return] identifier[related_obj_to_dict] ( identifier[obj] ,** identifier[kwargs] ) keyword[else] : keyword[return] identifier[obj]
def to_dict(obj, **kwargs): """ Convert an object into dictionary. Uses singledispatch to allow for clean extensions for custom class types. Reference: https://pypi.python.org/pypi/singledispatch :param obj: object instance :param kwargs: keyword arguments such as suppress_private_attr, suppress_empty_values, dict_factory :return: converted dictionary. """ # if is_related, then iterate attrs. if is_model(obj.__class__): return related_obj_to_dict(obj, **kwargs) # depends on [control=['if'], data=[]] else: # else, return obj directly. register a custom to_dict if you need to! # reference: https://pypi.python.org/pypi/singledispatch return obj
def pre_save(self, model_instance, add): """Updates socket.gethostname() on each save.""" value = socket.gethostname() setattr(model_instance, self.attname, value) return value
def function[pre_save, parameter[self, model_instance, add]]: constant[Updates socket.gethostname() on each save.] variable[value] assign[=] call[name[socket].gethostname, parameter[]] call[name[setattr], parameter[name[model_instance], name[self].attname, name[value]]] return[name[value]]
keyword[def] identifier[pre_save] ( identifier[self] , identifier[model_instance] , identifier[add] ): literal[string] identifier[value] = identifier[socket] . identifier[gethostname] () identifier[setattr] ( identifier[model_instance] , identifier[self] . identifier[attname] , identifier[value] ) keyword[return] identifier[value]
def pre_save(self, model_instance, add): """Updates socket.gethostname() on each save.""" value = socket.gethostname() setattr(model_instance, self.attname, value) return value
def _contextkey(jail=None, chroot=None, root=None, prefix='pkg.list_pkgs'): ''' As this module is designed to manipulate packages in jails and chroots, use the passed jail/chroot to ensure that a key in the __context__ dict that is unique to that jail/chroot is used. ''' if jail: return six.text_type(prefix) + '.jail_{0}'.format(jail) elif chroot: return six.text_type(prefix) + '.chroot_{0}'.format(chroot) elif root: return six.text_type(prefix) + '.root_{0}'.format(root) return prefix
def function[_contextkey, parameter[jail, chroot, root, prefix]]: constant[ As this module is designed to manipulate packages in jails and chroots, use the passed jail/chroot to ensure that a key in the __context__ dict that is unique to that jail/chroot is used. ] if name[jail] begin[:] return[binary_operation[call[name[six].text_type, parameter[name[prefix]]] + call[constant[.jail_{0}].format, parameter[name[jail]]]]] return[name[prefix]]
keyword[def] identifier[_contextkey] ( identifier[jail] = keyword[None] , identifier[chroot] = keyword[None] , identifier[root] = keyword[None] , identifier[prefix] = literal[string] ): literal[string] keyword[if] identifier[jail] : keyword[return] identifier[six] . identifier[text_type] ( identifier[prefix] )+ literal[string] . identifier[format] ( identifier[jail] ) keyword[elif] identifier[chroot] : keyword[return] identifier[six] . identifier[text_type] ( identifier[prefix] )+ literal[string] . identifier[format] ( identifier[chroot] ) keyword[elif] identifier[root] : keyword[return] identifier[six] . identifier[text_type] ( identifier[prefix] )+ literal[string] . identifier[format] ( identifier[root] ) keyword[return] identifier[prefix]
def _contextkey(jail=None, chroot=None, root=None, prefix='pkg.list_pkgs'): """ As this module is designed to manipulate packages in jails and chroots, use the passed jail/chroot to ensure that a key in the __context__ dict that is unique to that jail/chroot is used. """ if jail: return six.text_type(prefix) + '.jail_{0}'.format(jail) # depends on [control=['if'], data=[]] elif chroot: return six.text_type(prefix) + '.chroot_{0}'.format(chroot) # depends on [control=['if'], data=[]] elif root: return six.text_type(prefix) + '.root_{0}'.format(root) # depends on [control=['if'], data=[]] return prefix
def add(self, registry): """ Add works like replace, but only previously pushed metrics with the same name (and the same job and instance) will be replaced. (It uses HTTP method 'POST' to push to the Pushgateway.) """ # POST payload = self.formatter.marshall(registry) r = requests.post(self.path, data=payload, headers=self.headers)
def function[add, parameter[self, registry]]: constant[ Add works like replace, but only previously pushed metrics with the same name (and the same job and instance) will be replaced. (It uses HTTP method 'POST' to push to the Pushgateway.) ] variable[payload] assign[=] call[name[self].formatter.marshall, parameter[name[registry]]] variable[r] assign[=] call[name[requests].post, parameter[name[self].path]]
keyword[def] identifier[add] ( identifier[self] , identifier[registry] ): literal[string] identifier[payload] = identifier[self] . identifier[formatter] . identifier[marshall] ( identifier[registry] ) identifier[r] = identifier[requests] . identifier[post] ( identifier[self] . identifier[path] , identifier[data] = identifier[payload] , identifier[headers] = identifier[self] . identifier[headers] )
def add(self, registry): """ Add works like replace, but only previously pushed metrics with the same name (and the same job and instance) will be replaced. (It uses HTTP method 'POST' to push to the Pushgateway.) """ # POST payload = self.formatter.marshall(registry) r = requests.post(self.path, data=payload, headers=self.headers)
def set_angle_limit(self, limit_for_id, **kwargs): """ Sets the angle limit to the specified motors. """ convert = kwargs['convert'] if 'convert' in kwargs else self._convert if 'wheel' in self.get_control_mode(limit_for_id.keys()): raise ValueError('can not change the angle limit of a motor in wheel mode') if (0, 0) in limit_for_id.values(): raise ValueError('can not set limit to (0, 0)') self._set_angle_limit(limit_for_id, convert=convert)
def function[set_angle_limit, parameter[self, limit_for_id]]: constant[ Sets the angle limit to the specified motors. ] variable[convert] assign[=] <ast.IfExp object at 0x7da1b14c5cf0> if compare[constant[wheel] in call[name[self].get_control_mode, parameter[call[name[limit_for_id].keys, parameter[]]]]] begin[:] <ast.Raise object at 0x7da1b14c63b0> if compare[tuple[[<ast.Constant object at 0x7da1b14c52d0>, <ast.Constant object at 0x7da1b14c5ed0>]] in call[name[limit_for_id].values, parameter[]]] begin[:] <ast.Raise object at 0x7da1b13073a0> call[name[self]._set_angle_limit, parameter[name[limit_for_id]]]
keyword[def] identifier[set_angle_limit] ( identifier[self] , identifier[limit_for_id] ,** identifier[kwargs] ): literal[string] identifier[convert] = identifier[kwargs] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[kwargs] keyword[else] identifier[self] . identifier[_convert] keyword[if] literal[string] keyword[in] identifier[self] . identifier[get_control_mode] ( identifier[limit_for_id] . identifier[keys] ()): keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] ( literal[int] , literal[int] ) keyword[in] identifier[limit_for_id] . identifier[values] (): keyword[raise] identifier[ValueError] ( literal[string] ) identifier[self] . identifier[_set_angle_limit] ( identifier[limit_for_id] , identifier[convert] = identifier[convert] )
def set_angle_limit(self, limit_for_id, **kwargs): """ Sets the angle limit to the specified motors. """ convert = kwargs['convert'] if 'convert' in kwargs else self._convert if 'wheel' in self.get_control_mode(limit_for_id.keys()): raise ValueError('can not change the angle limit of a motor in wheel mode') # depends on [control=['if'], data=[]] if (0, 0) in limit_for_id.values(): raise ValueError('can not set limit to (0, 0)') # depends on [control=['if'], data=[]] self._set_angle_limit(limit_for_id, convert=convert)
def get_applications(self, states=None, name=None, user=None, queue=None, started_begin=None, started_end=None, finished_begin=None, finished_end=None): """Get the status of current skein applications. Parameters ---------- states : sequence of ApplicationState, optional If provided, applications will be filtered to these application states. Default is ``['SUBMITTED', 'ACCEPTED', 'RUNNING']``. name : str, optional Only select applications with this name. user : str, optional Only select applications with this user. queue : str, optional Only select applications in this queue. started_begin : datetime or str, optional Only select applications that started after this time (inclusive). Can be either a datetime or a string representation of one. String representations can use any of the following formats: - ``YYYY-M-D H:M:S`` (e.g. 2019-4-10 14:50:20) - ``YYYY-M-D H:M`` (e.g. 2019-4-10 14:50) - ``YYYY-M-D`` (e.g. 2019-4-10) - ``H:M:S`` (e.g. 14:50:20, today is used for date) - ``H:M`` (e.g. 14:50, today is used for date) started_end : datetime or str, optional Only select applications that started before this time (inclusive). Can be either a datetime or a string representation of one. finished_begin : datetime or str, optional Only select applications that finished after this time (inclusive). Can be either a datetime or a string representation of one. finished_end : datetime or str, optional Only select applications that finished before this time (inclusive). Can be either a datetime or a string representation of one. Returns ------- reports : list of ApplicationReport Examples -------- Get all the finished and failed applications >>> client.get_applications(states=['FINISHED', 'FAILED']) [ApplicationReport<name='demo'>, ApplicationReport<name='dask'>, ApplicationReport<name='demo'>] Get all applications named 'demo' started after 2019-4-10: >>> client.get_applications(name='demo', started_begin='2019-4-10') [ApplicationReport<name='demo'>, ApplicationReport<name='demo'>] """ if states is not None: states = tuple(ApplicationState(s) for s in states) else: states = (ApplicationState.SUBMITTED, ApplicationState.ACCEPTED, ApplicationState.RUNNING) started_begin = self._parse_datetime(started_begin, 'started_begin') started_end = self._parse_datetime(started_end, 'started_end') finished_begin = self._parse_datetime(finished_begin, 'finished_begin') finished_end = self._parse_datetime(finished_end, 'finished_end') req = proto.ApplicationsRequest( states=[str(s) for s in states], name=name, user=user, queue=queue, started_begin=datetime_to_millis(started_begin), started_end=datetime_to_millis(started_end), finished_begin=datetime_to_millis(finished_begin), finished_end=datetime_to_millis(finished_end) ) resp = self._call('getApplications', req) return sorted((ApplicationReport.from_protobuf(r) for r in resp.reports), key=lambda x: x.id)
def function[get_applications, parameter[self, states, name, user, queue, started_begin, started_end, finished_begin, finished_end]]: constant[Get the status of current skein applications. Parameters ---------- states : sequence of ApplicationState, optional If provided, applications will be filtered to these application states. Default is ``['SUBMITTED', 'ACCEPTED', 'RUNNING']``. name : str, optional Only select applications with this name. user : str, optional Only select applications with this user. queue : str, optional Only select applications in this queue. started_begin : datetime or str, optional Only select applications that started after this time (inclusive). Can be either a datetime or a string representation of one. String representations can use any of the following formats: - ``YYYY-M-D H:M:S`` (e.g. 2019-4-10 14:50:20) - ``YYYY-M-D H:M`` (e.g. 2019-4-10 14:50) - ``YYYY-M-D`` (e.g. 2019-4-10) - ``H:M:S`` (e.g. 14:50:20, today is used for date) - ``H:M`` (e.g. 14:50, today is used for date) started_end : datetime or str, optional Only select applications that started before this time (inclusive). Can be either a datetime or a string representation of one. finished_begin : datetime or str, optional Only select applications that finished after this time (inclusive). Can be either a datetime or a string representation of one. finished_end : datetime or str, optional Only select applications that finished before this time (inclusive). Can be either a datetime or a string representation of one. Returns ------- reports : list of ApplicationReport Examples -------- Get all the finished and failed applications >>> client.get_applications(states=['FINISHED', 'FAILED']) [ApplicationReport<name='demo'>, ApplicationReport<name='dask'>, ApplicationReport<name='demo'>] Get all applications named 'demo' started after 2019-4-10: >>> client.get_applications(name='demo', started_begin='2019-4-10') [ApplicationReport<name='demo'>, ApplicationReport<name='demo'>] ] if compare[name[states] is_not constant[None]] begin[:] variable[states] assign[=] call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da207f02d40>]] variable[started_begin] assign[=] call[name[self]._parse_datetime, parameter[name[started_begin], constant[started_begin]]] variable[started_end] assign[=] call[name[self]._parse_datetime, parameter[name[started_end], constant[started_end]]] variable[finished_begin] assign[=] call[name[self]._parse_datetime, parameter[name[finished_begin], constant[finished_begin]]] variable[finished_end] assign[=] call[name[self]._parse_datetime, parameter[name[finished_end], constant[finished_end]]] variable[req] assign[=] call[name[proto].ApplicationsRequest, parameter[]] variable[resp] assign[=] call[name[self]._call, parameter[constant[getApplications], name[req]]] return[call[name[sorted], parameter[<ast.GeneratorExp object at 0x7da1b08da8f0>]]]
keyword[def] identifier[get_applications] ( identifier[self] , identifier[states] = keyword[None] , identifier[name] = keyword[None] , identifier[user] = keyword[None] , identifier[queue] = keyword[None] , identifier[started_begin] = keyword[None] , identifier[started_end] = keyword[None] , identifier[finished_begin] = keyword[None] , identifier[finished_end] = keyword[None] ): literal[string] keyword[if] identifier[states] keyword[is] keyword[not] keyword[None] : identifier[states] = identifier[tuple] ( identifier[ApplicationState] ( identifier[s] ) keyword[for] identifier[s] keyword[in] identifier[states] ) keyword[else] : identifier[states] =( identifier[ApplicationState] . identifier[SUBMITTED] , identifier[ApplicationState] . identifier[ACCEPTED] , identifier[ApplicationState] . identifier[RUNNING] ) identifier[started_begin] = identifier[self] . identifier[_parse_datetime] ( identifier[started_begin] , literal[string] ) identifier[started_end] = identifier[self] . identifier[_parse_datetime] ( identifier[started_end] , literal[string] ) identifier[finished_begin] = identifier[self] . identifier[_parse_datetime] ( identifier[finished_begin] , literal[string] ) identifier[finished_end] = identifier[self] . identifier[_parse_datetime] ( identifier[finished_end] , literal[string] ) identifier[req] = identifier[proto] . identifier[ApplicationsRequest] ( identifier[states] =[ identifier[str] ( identifier[s] ) keyword[for] identifier[s] keyword[in] identifier[states] ], identifier[name] = identifier[name] , identifier[user] = identifier[user] , identifier[queue] = identifier[queue] , identifier[started_begin] = identifier[datetime_to_millis] ( identifier[started_begin] ), identifier[started_end] = identifier[datetime_to_millis] ( identifier[started_end] ), identifier[finished_begin] = identifier[datetime_to_millis] ( identifier[finished_begin] ), identifier[finished_end] = identifier[datetime_to_millis] ( identifier[finished_end] ) ) identifier[resp] = identifier[self] . identifier[_call] ( literal[string] , identifier[req] ) keyword[return] identifier[sorted] (( identifier[ApplicationReport] . identifier[from_protobuf] ( identifier[r] ) keyword[for] identifier[r] keyword[in] identifier[resp] . identifier[reports] ), identifier[key] = keyword[lambda] identifier[x] : identifier[x] . identifier[id] )
def get_applications(self, states=None, name=None, user=None, queue=None, started_begin=None, started_end=None, finished_begin=None, finished_end=None): """Get the status of current skein applications. Parameters ---------- states : sequence of ApplicationState, optional If provided, applications will be filtered to these application states. Default is ``['SUBMITTED', 'ACCEPTED', 'RUNNING']``. name : str, optional Only select applications with this name. user : str, optional Only select applications with this user. queue : str, optional Only select applications in this queue. started_begin : datetime or str, optional Only select applications that started after this time (inclusive). Can be either a datetime or a string representation of one. String representations can use any of the following formats: - ``YYYY-M-D H:M:S`` (e.g. 2019-4-10 14:50:20) - ``YYYY-M-D H:M`` (e.g. 2019-4-10 14:50) - ``YYYY-M-D`` (e.g. 2019-4-10) - ``H:M:S`` (e.g. 14:50:20, today is used for date) - ``H:M`` (e.g. 14:50, today is used for date) started_end : datetime or str, optional Only select applications that started before this time (inclusive). Can be either a datetime or a string representation of one. finished_begin : datetime or str, optional Only select applications that finished after this time (inclusive). Can be either a datetime or a string representation of one. finished_end : datetime or str, optional Only select applications that finished before this time (inclusive). Can be either a datetime or a string representation of one. Returns ------- reports : list of ApplicationReport Examples -------- Get all the finished and failed applications >>> client.get_applications(states=['FINISHED', 'FAILED']) [ApplicationReport<name='demo'>, ApplicationReport<name='dask'>, ApplicationReport<name='demo'>] Get all applications named 'demo' started after 2019-4-10: >>> client.get_applications(name='demo', started_begin='2019-4-10') [ApplicationReport<name='demo'>, ApplicationReport<name='demo'>] """ if states is not None: states = tuple((ApplicationState(s) for s in states)) # depends on [control=['if'], data=['states']] else: states = (ApplicationState.SUBMITTED, ApplicationState.ACCEPTED, ApplicationState.RUNNING) started_begin = self._parse_datetime(started_begin, 'started_begin') started_end = self._parse_datetime(started_end, 'started_end') finished_begin = self._parse_datetime(finished_begin, 'finished_begin') finished_end = self._parse_datetime(finished_end, 'finished_end') req = proto.ApplicationsRequest(states=[str(s) for s in states], name=name, user=user, queue=queue, started_begin=datetime_to_millis(started_begin), started_end=datetime_to_millis(started_end), finished_begin=datetime_to_millis(finished_begin), finished_end=datetime_to_millis(finished_end)) resp = self._call('getApplications', req) return sorted((ApplicationReport.from_protobuf(r) for r in resp.reports), key=lambda x: x.id)
def extract_run_id(key): """Extract date part from run id Arguments: key - full key name, such as shredded-archive/run=2012-12-11-01-31-33/ (trailing slash is required) >>> extract_run_id('shredded-archive/run=2012-12-11-01-11-33/') 'shredded-archive/run=2012-12-11-01-11-33/' >>> extract_run_id('shredded-archive/run=2012-12-11-01-11-33') >>> extract_run_id('shredded-archive/run=2012-13-11-01-11-33/') """ filename = key.split('/')[-2] # -1 element is empty string run_id = filename.lstrip('run=') try: datetime.strptime(run_id, '%Y-%m-%d-%H-%M-%S') return key except ValueError: return None
def function[extract_run_id, parameter[key]]: constant[Extract date part from run id Arguments: key - full key name, such as shredded-archive/run=2012-12-11-01-31-33/ (trailing slash is required) >>> extract_run_id('shredded-archive/run=2012-12-11-01-11-33/') 'shredded-archive/run=2012-12-11-01-11-33/' >>> extract_run_id('shredded-archive/run=2012-12-11-01-11-33') >>> extract_run_id('shredded-archive/run=2012-13-11-01-11-33/') ] variable[filename] assign[=] call[call[name[key].split, parameter[constant[/]]]][<ast.UnaryOp object at 0x7da1b02c3160>] variable[run_id] assign[=] call[name[filename].lstrip, parameter[constant[run=]]] <ast.Try object at 0x7da1b02c23e0>
keyword[def] identifier[extract_run_id] ( identifier[key] ): literal[string] identifier[filename] = identifier[key] . identifier[split] ( literal[string] )[- literal[int] ] identifier[run_id] = identifier[filename] . identifier[lstrip] ( literal[string] ) keyword[try] : identifier[datetime] . identifier[strptime] ( identifier[run_id] , literal[string] ) keyword[return] identifier[key] keyword[except] identifier[ValueError] : keyword[return] keyword[None]
def extract_run_id(key): """Extract date part from run id Arguments: key - full key name, such as shredded-archive/run=2012-12-11-01-31-33/ (trailing slash is required) >>> extract_run_id('shredded-archive/run=2012-12-11-01-11-33/') 'shredded-archive/run=2012-12-11-01-11-33/' >>> extract_run_id('shredded-archive/run=2012-12-11-01-11-33') >>> extract_run_id('shredded-archive/run=2012-13-11-01-11-33/') """ filename = key.split('/')[-2] # -1 element is empty string run_id = filename.lstrip('run=') try: datetime.strptime(run_id, '%Y-%m-%d-%H-%M-%S') return key # depends on [control=['try'], data=[]] except ValueError: return None # depends on [control=['except'], data=[]]
def languages2marc(self, key, value): """Populate the ``041`` MARC field.""" return {'a': pycountry.languages.get(alpha_2=value).name.lower()}
def function[languages2marc, parameter[self, key, value]]: constant[Populate the ``041`` MARC field.] return[dictionary[[<ast.Constant object at 0x7da2041dbf40>], [<ast.Call object at 0x7da2041d8310>]]]
keyword[def] identifier[languages2marc] ( identifier[self] , identifier[key] , identifier[value] ): literal[string] keyword[return] { literal[string] : identifier[pycountry] . identifier[languages] . identifier[get] ( identifier[alpha_2] = identifier[value] ). identifier[name] . identifier[lower] ()}
def languages2marc(self, key, value): """Populate the ``041`` MARC field.""" return {'a': pycountry.languages.get(alpha_2=value).name.lower()}
def _tree_store_sub_branch(self, traj_node, branch_name, store_data=pypetconstants.STORE_DATA, with_links=True, recursive=False, max_depth=None, hdf5_group=None): """Stores data starting from a node along a branch and starts recursively loading all data at end of branch. :param traj_node: The node where storing starts :param branch_name: A branch along which storing progresses. Colon Notation is used: 'group1.group2.group3' loads 'group1', then 'group2', then 'group3', and then finally recursively all children and children's children below 'group3'. :param store_data: How data should be stored :param with_links: If links should be stored :param recursive: If the rest of the tree should be recursively stored :param max_depth: Maximum depth to store :param hdf5_group: HDF5 node in the file corresponding to `traj_node` """ if store_data == pypetconstants.STORE_NOTHING: return if max_depth is None: max_depth = float('inf') if hdf5_group is None: # Get parent hdf5 node location = traj_node.v_full_name hdf5_location = location.replace('.', '/') try: if location == '': hdf5_group = self._trajectory_group else: hdf5_group = self._hdf5file.get_node( where=self._trajectory_group, name=hdf5_location) except pt.NoSuchNodeError: self._logger.debug('Cannot store `%s` the parental hdf5 node with path `%s` does ' 'not exist on disk.' % (traj_node.v_name, hdf5_location)) if traj_node.v_is_leaf: self._logger.error('Cannot store `%s` the parental hdf5 ' 'node with path `%s` does ' 'not exist on disk! The child ' 'you want to store is a leaf node,' 'that cannot be stored without ' 'the parental node existing on ' 'disk.' % (traj_node.v_name, hdf5_location)) raise else: self._logger.debug('I will try to store the path from trajectory root to ' 'the child now.') self._tree_store_sub_branch(traj_node._nn_interface._root_instance, traj_node.v_full_name + '.' + branch_name, store_data=store_data, with_links=with_links, recursive=recursive, max_depth=max_depth + traj_node.v_depth, hdf5_group=self._trajectory_group) return current_depth = 1 split_names = branch_name.split('.') leaf_name = split_names.pop() for name in split_names: if current_depth > max_depth: return # Store along a branch self._tree_store_nodes_dfs(traj_node, name, store_data=store_data, with_links=with_links, recursive=False, max_depth=max_depth, current_depth=current_depth, parent_hdf5_group=hdf5_group) current_depth += 1 traj_node = traj_node._children[name] hdf5_group = getattr(hdf5_group, name) # Store final group and recursively everything below it if current_depth <= max_depth: self._tree_store_nodes_dfs(traj_node, leaf_name, store_data=store_data, with_links=with_links, recursive=recursive, max_depth=max_depth, current_depth=current_depth, parent_hdf5_group=hdf5_group)
def function[_tree_store_sub_branch, parameter[self, traj_node, branch_name, store_data, with_links, recursive, max_depth, hdf5_group]]: constant[Stores data starting from a node along a branch and starts recursively loading all data at end of branch. :param traj_node: The node where storing starts :param branch_name: A branch along which storing progresses. Colon Notation is used: 'group1.group2.group3' loads 'group1', then 'group2', then 'group3', and then finally recursively all children and children's children below 'group3'. :param store_data: How data should be stored :param with_links: If links should be stored :param recursive: If the rest of the tree should be recursively stored :param max_depth: Maximum depth to store :param hdf5_group: HDF5 node in the file corresponding to `traj_node` ] if compare[name[store_data] equal[==] name[pypetconstants].STORE_NOTHING] begin[:] return[None] if compare[name[max_depth] is constant[None]] begin[:] variable[max_depth] assign[=] call[name[float], parameter[constant[inf]]] if compare[name[hdf5_group] is constant[None]] begin[:] variable[location] assign[=] name[traj_node].v_full_name variable[hdf5_location] assign[=] call[name[location].replace, parameter[constant[.], constant[/]]] <ast.Try object at 0x7da1b031a980> variable[current_depth] assign[=] constant[1] variable[split_names] assign[=] call[name[branch_name].split, parameter[constant[.]]] variable[leaf_name] assign[=] call[name[split_names].pop, parameter[]] for taget[name[name]] in starred[name[split_names]] begin[:] if compare[name[current_depth] greater[>] name[max_depth]] begin[:] return[None] call[name[self]._tree_store_nodes_dfs, parameter[name[traj_node], name[name]]] <ast.AugAssign object at 0x7da1b031a260> variable[traj_node] assign[=] call[name[traj_node]._children][name[name]] variable[hdf5_group] assign[=] call[name[getattr], parameter[name[hdf5_group], name[name]]] if compare[name[current_depth] less_or_equal[<=] name[max_depth]] begin[:] call[name[self]._tree_store_nodes_dfs, parameter[name[traj_node], name[leaf_name]]]
keyword[def] identifier[_tree_store_sub_branch] ( identifier[self] , identifier[traj_node] , identifier[branch_name] , identifier[store_data] = identifier[pypetconstants] . identifier[STORE_DATA] , identifier[with_links] = keyword[True] , identifier[recursive] = keyword[False] , identifier[max_depth] = keyword[None] , identifier[hdf5_group] = keyword[None] ): literal[string] keyword[if] identifier[store_data] == identifier[pypetconstants] . identifier[STORE_NOTHING] : keyword[return] keyword[if] identifier[max_depth] keyword[is] keyword[None] : identifier[max_depth] = identifier[float] ( literal[string] ) keyword[if] identifier[hdf5_group] keyword[is] keyword[None] : identifier[location] = identifier[traj_node] . identifier[v_full_name] identifier[hdf5_location] = identifier[location] . identifier[replace] ( literal[string] , literal[string] ) keyword[try] : keyword[if] identifier[location] == literal[string] : identifier[hdf5_group] = identifier[self] . identifier[_trajectory_group] keyword[else] : identifier[hdf5_group] = identifier[self] . identifier[_hdf5file] . identifier[get_node] ( identifier[where] = identifier[self] . identifier[_trajectory_group] , identifier[name] = identifier[hdf5_location] ) keyword[except] identifier[pt] . identifier[NoSuchNodeError] : identifier[self] . identifier[_logger] . identifier[debug] ( literal[string] literal[string] % ( identifier[traj_node] . identifier[v_name] , identifier[hdf5_location] )) keyword[if] identifier[traj_node] . identifier[v_is_leaf] : identifier[self] . identifier[_logger] . identifier[error] ( literal[string] literal[string] literal[string] literal[string] literal[string] literal[string] literal[string] %( identifier[traj_node] . identifier[v_name] , identifier[hdf5_location] )) keyword[raise] keyword[else] : identifier[self] . identifier[_logger] . identifier[debug] ( literal[string] literal[string] ) identifier[self] . identifier[_tree_store_sub_branch] ( identifier[traj_node] . identifier[_nn_interface] . identifier[_root_instance] , identifier[traj_node] . identifier[v_full_name] + literal[string] + identifier[branch_name] , identifier[store_data] = identifier[store_data] , identifier[with_links] = identifier[with_links] , identifier[recursive] = identifier[recursive] , identifier[max_depth] = identifier[max_depth] + identifier[traj_node] . identifier[v_depth] , identifier[hdf5_group] = identifier[self] . identifier[_trajectory_group] ) keyword[return] identifier[current_depth] = literal[int] identifier[split_names] = identifier[branch_name] . identifier[split] ( literal[string] ) identifier[leaf_name] = identifier[split_names] . identifier[pop] () keyword[for] identifier[name] keyword[in] identifier[split_names] : keyword[if] identifier[current_depth] > identifier[max_depth] : keyword[return] identifier[self] . identifier[_tree_store_nodes_dfs] ( identifier[traj_node] , identifier[name] , identifier[store_data] = identifier[store_data] , identifier[with_links] = identifier[with_links] , identifier[recursive] = keyword[False] , identifier[max_depth] = identifier[max_depth] , identifier[current_depth] = identifier[current_depth] , identifier[parent_hdf5_group] = identifier[hdf5_group] ) identifier[current_depth] += literal[int] identifier[traj_node] = identifier[traj_node] . identifier[_children] [ identifier[name] ] identifier[hdf5_group] = identifier[getattr] ( identifier[hdf5_group] , identifier[name] ) keyword[if] identifier[current_depth] <= identifier[max_depth] : identifier[self] . identifier[_tree_store_nodes_dfs] ( identifier[traj_node] , identifier[leaf_name] , identifier[store_data] = identifier[store_data] , identifier[with_links] = identifier[with_links] , identifier[recursive] = identifier[recursive] , identifier[max_depth] = identifier[max_depth] , identifier[current_depth] = identifier[current_depth] , identifier[parent_hdf5_group] = identifier[hdf5_group] )
def _tree_store_sub_branch(self, traj_node, branch_name, store_data=pypetconstants.STORE_DATA, with_links=True, recursive=False, max_depth=None, hdf5_group=None): """Stores data starting from a node along a branch and starts recursively loading all data at end of branch. :param traj_node: The node where storing starts :param branch_name: A branch along which storing progresses. Colon Notation is used: 'group1.group2.group3' loads 'group1', then 'group2', then 'group3', and then finally recursively all children and children's children below 'group3'. :param store_data: How data should be stored :param with_links: If links should be stored :param recursive: If the rest of the tree should be recursively stored :param max_depth: Maximum depth to store :param hdf5_group: HDF5 node in the file corresponding to `traj_node` """ if store_data == pypetconstants.STORE_NOTHING: return # depends on [control=['if'], data=[]] if max_depth is None: max_depth = float('inf') # depends on [control=['if'], data=['max_depth']] if hdf5_group is None: # Get parent hdf5 node location = traj_node.v_full_name hdf5_location = location.replace('.', '/') try: if location == '': hdf5_group = self._trajectory_group # depends on [control=['if'], data=[]] else: hdf5_group = self._hdf5file.get_node(where=self._trajectory_group, name=hdf5_location) # depends on [control=['try'], data=[]] except pt.NoSuchNodeError: self._logger.debug('Cannot store `%s` the parental hdf5 node with path `%s` does not exist on disk.' % (traj_node.v_name, hdf5_location)) if traj_node.v_is_leaf: self._logger.error('Cannot store `%s` the parental hdf5 node with path `%s` does not exist on disk! The child you want to store is a leaf node,that cannot be stored without the parental node existing on disk.' % (traj_node.v_name, hdf5_location)) raise # depends on [control=['if'], data=[]] else: self._logger.debug('I will try to store the path from trajectory root to the child now.') self._tree_store_sub_branch(traj_node._nn_interface._root_instance, traj_node.v_full_name + '.' + branch_name, store_data=store_data, with_links=with_links, recursive=recursive, max_depth=max_depth + traj_node.v_depth, hdf5_group=self._trajectory_group) return # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['hdf5_group']] current_depth = 1 split_names = branch_name.split('.') leaf_name = split_names.pop() for name in split_names: if current_depth > max_depth: return # depends on [control=['if'], data=[]] # Store along a branch self._tree_store_nodes_dfs(traj_node, name, store_data=store_data, with_links=with_links, recursive=False, max_depth=max_depth, current_depth=current_depth, parent_hdf5_group=hdf5_group) current_depth += 1 traj_node = traj_node._children[name] hdf5_group = getattr(hdf5_group, name) # depends on [control=['for'], data=['name']] # Store final group and recursively everything below it if current_depth <= max_depth: self._tree_store_nodes_dfs(traj_node, leaf_name, store_data=store_data, with_links=with_links, recursive=recursive, max_depth=max_depth, current_depth=current_depth, parent_hdf5_group=hdf5_group) # depends on [control=['if'], data=['current_depth', 'max_depth']]
def make_confidence_report_bundled(filepath, train_start=TRAIN_START, train_end=TRAIN_END, test_start=TEST_START, test_end=TEST_END, which_set=WHICH_SET, recipe=RECIPE, report_path=REPORT_PATH, nb_iter=NB_ITER, base_eps=None, base_eps_iter=None, base_eps_iter_small=None, batch_size=BATCH_SIZE): """ Load a saved model, gather its predictions, and save a confidence report. :param filepath: path to model to evaluate :param train_start: index of first training set example to use :param train_end: index of last training set example to use :param test_start: index of first test set example to use :param test_end: index of last test set example to use :param which_set: 'train' or 'test' :param nb_iter: int, number of iterations of attack algorithm (note that different recipes will use this differently, for example many will run two attacks, one with nb_iter iterations and one with 25X more) :param base_eps: float, epsilon parameter for threat model, on a scale of [0, 1]. Inferred from the dataset if not specified. :param base_eps_iter: float, a step size used in different ways by different recipes. Typically the step size for a PGD attack. Inferred from the dataset if not specified. :param base_eps_iter_small: float, a second step size for a more fine-grained attack. Inferred from the dataset if not specified. :param batch_size: int, batch size """ # Avoid circular import from cleverhans import attack_bundling if callable(recipe): run_recipe = recipe else: run_recipe = getattr(attack_bundling, recipe) # Set logging level to see debug information set_log_level(logging.INFO) # Create TF session sess = tf.Session() assert filepath.endswith('.joblib') if report_path is None: report_path = filepath[:-len('.joblib')] + "_bundled_report.joblib" with sess.as_default(): model = load(filepath) assert len(model.get_params()) > 0 factory = model.dataset_factory factory.kwargs['train_start'] = train_start factory.kwargs['train_end'] = train_end factory.kwargs['test_start'] = test_start factory.kwargs['test_end'] = test_end dataset = factory() center = dataset.kwargs['center'] if 'max_val' in factory.kwargs: max_value = factory.kwargs['max_val'] elif hasattr(dataset, 'max_val'): max_value = dataset.max_val else: raise AttributeError("Can't find max_value specification") min_value = 0. - center * max_value value_range = max_value - min_value if 'CIFAR' in str(factory.cls): if base_eps is None: base_eps = 8. / 255. if base_eps_iter is None: base_eps_iter = 2. / 255. if base_eps_iter_small is None: base_eps_iter_small = 1. / 255. elif 'MNIST' in str(factory.cls): if base_eps is None: base_eps = .3 if base_eps_iter is None: base_eps_iter = .1 base_eps_iter_small = None else: # Note that it is not required to specify base_eps_iter_small if base_eps is None or base_eps_iter is None: raise NotImplementedError("Not able to infer threat model from " + str(factory.cls)) eps = base_eps * value_range eps_iter = base_eps_iter * value_range if base_eps_iter_small is None: eps_iter_small = None else: eps_iter_small = base_eps_iter_small * value_range clip_min = min_value clip_max = max_value x_data, y_data = dataset.get_set(which_set) assert x_data.max() <= max_value assert x_data.min() >= min_value assert eps_iter <= eps assert eps_iter_small is None or eps_iter_small <= eps # Different recipes take different arguments. # For now I don't have an idea for a beautiful unifying framework, so # we get an if statement. if recipe == 'random_search_max_confidence_recipe': # pylint always checks against the default recipe here # pylint: disable=no-value-for-parameter run_recipe(sess=sess, model=model, x=x_data, y=y_data, eps=eps, clip_min=clip_min, clip_max=clip_max, report_path=report_path) else: run_recipe(sess=sess, model=model, x=x_data, y=y_data, nb_classes=dataset.NB_CLASSES, eps=eps, clip_min=clip_min, clip_max=clip_max, eps_iter=eps_iter, nb_iter=nb_iter, report_path=report_path, eps_iter_small=eps_iter_small, batch_size=batch_size)
def function[make_confidence_report_bundled, parameter[filepath, train_start, train_end, test_start, test_end, which_set, recipe, report_path, nb_iter, base_eps, base_eps_iter, base_eps_iter_small, batch_size]]: constant[ Load a saved model, gather its predictions, and save a confidence report. :param filepath: path to model to evaluate :param train_start: index of first training set example to use :param train_end: index of last training set example to use :param test_start: index of first test set example to use :param test_end: index of last test set example to use :param which_set: 'train' or 'test' :param nb_iter: int, number of iterations of attack algorithm (note that different recipes will use this differently, for example many will run two attacks, one with nb_iter iterations and one with 25X more) :param base_eps: float, epsilon parameter for threat model, on a scale of [0, 1]. Inferred from the dataset if not specified. :param base_eps_iter: float, a step size used in different ways by different recipes. Typically the step size for a PGD attack. Inferred from the dataset if not specified. :param base_eps_iter_small: float, a second step size for a more fine-grained attack. Inferred from the dataset if not specified. :param batch_size: int, batch size ] from relative_module[cleverhans] import module[attack_bundling] if call[name[callable], parameter[name[recipe]]] begin[:] variable[run_recipe] assign[=] name[recipe] call[name[set_log_level], parameter[name[logging].INFO]] variable[sess] assign[=] call[name[tf].Session, parameter[]] assert[call[name[filepath].endswith, parameter[constant[.joblib]]]] if compare[name[report_path] is constant[None]] begin[:] variable[report_path] assign[=] binary_operation[call[name[filepath]][<ast.Slice object at 0x7da207f9bfa0>] + constant[_bundled_report.joblib]] with call[name[sess].as_default, parameter[]] begin[:] variable[model] assign[=] call[name[load], parameter[name[filepath]]] assert[compare[call[name[len], parameter[call[name[model].get_params, parameter[]]]] greater[>] constant[0]]] variable[factory] assign[=] name[model].dataset_factory call[name[factory].kwargs][constant[train_start]] assign[=] name[train_start] call[name[factory].kwargs][constant[train_end]] assign[=] name[train_end] call[name[factory].kwargs][constant[test_start]] assign[=] name[test_start] call[name[factory].kwargs][constant[test_end]] assign[=] name[test_end] variable[dataset] assign[=] call[name[factory], parameter[]] variable[center] assign[=] call[name[dataset].kwargs][constant[center]] if compare[constant[max_val] in name[factory].kwargs] begin[:] variable[max_value] assign[=] call[name[factory].kwargs][constant[max_val]] variable[min_value] assign[=] binary_operation[constant[0.0] - binary_operation[name[center] * name[max_value]]] variable[value_range] assign[=] binary_operation[name[max_value] - name[min_value]] if compare[constant[CIFAR] in call[name[str], parameter[name[factory].cls]]] begin[:] if compare[name[base_eps] is constant[None]] begin[:] variable[base_eps] assign[=] binary_operation[constant[8.0] / constant[255.0]] if compare[name[base_eps_iter] is constant[None]] begin[:] variable[base_eps_iter] assign[=] binary_operation[constant[2.0] / constant[255.0]] if compare[name[base_eps_iter_small] is constant[None]] begin[:] variable[base_eps_iter_small] assign[=] binary_operation[constant[1.0] / constant[255.0]] variable[eps] assign[=] binary_operation[name[base_eps] * name[value_range]] variable[eps_iter] assign[=] binary_operation[name[base_eps_iter] * name[value_range]] if compare[name[base_eps_iter_small] is constant[None]] begin[:] variable[eps_iter_small] assign[=] constant[None] variable[clip_min] assign[=] name[min_value] variable[clip_max] assign[=] name[max_value] <ast.Tuple object at 0x7da2044c2770> assign[=] call[name[dataset].get_set, parameter[name[which_set]]] assert[compare[call[name[x_data].max, parameter[]] less_or_equal[<=] name[max_value]]] assert[compare[call[name[x_data].min, parameter[]] greater_or_equal[>=] name[min_value]]] assert[compare[name[eps_iter] less_or_equal[<=] name[eps]]] assert[<ast.BoolOp object at 0x7da2044c34f0>] if compare[name[recipe] equal[==] constant[random_search_max_confidence_recipe]] begin[:] call[name[run_recipe], parameter[]]
keyword[def] identifier[make_confidence_report_bundled] ( identifier[filepath] , identifier[train_start] = identifier[TRAIN_START] , identifier[train_end] = identifier[TRAIN_END] , identifier[test_start] = identifier[TEST_START] , identifier[test_end] = identifier[TEST_END] , identifier[which_set] = identifier[WHICH_SET] , identifier[recipe] = identifier[RECIPE] , identifier[report_path] = identifier[REPORT_PATH] , identifier[nb_iter] = identifier[NB_ITER] , identifier[base_eps] = keyword[None] , identifier[base_eps_iter] = keyword[None] , identifier[base_eps_iter_small] = keyword[None] , identifier[batch_size] = identifier[BATCH_SIZE] ): literal[string] keyword[from] identifier[cleverhans] keyword[import] identifier[attack_bundling] keyword[if] identifier[callable] ( identifier[recipe] ): identifier[run_recipe] = identifier[recipe] keyword[else] : identifier[run_recipe] = identifier[getattr] ( identifier[attack_bundling] , identifier[recipe] ) identifier[set_log_level] ( identifier[logging] . identifier[INFO] ) identifier[sess] = identifier[tf] . identifier[Session] () keyword[assert] identifier[filepath] . identifier[endswith] ( literal[string] ) keyword[if] identifier[report_path] keyword[is] keyword[None] : identifier[report_path] = identifier[filepath] [:- identifier[len] ( literal[string] )]+ literal[string] keyword[with] identifier[sess] . identifier[as_default] (): identifier[model] = identifier[load] ( identifier[filepath] ) keyword[assert] identifier[len] ( identifier[model] . identifier[get_params] ())> literal[int] identifier[factory] = identifier[model] . identifier[dataset_factory] identifier[factory] . identifier[kwargs] [ literal[string] ]= identifier[train_start] identifier[factory] . identifier[kwargs] [ literal[string] ]= identifier[train_end] identifier[factory] . identifier[kwargs] [ literal[string] ]= identifier[test_start] identifier[factory] . identifier[kwargs] [ literal[string] ]= identifier[test_end] identifier[dataset] = identifier[factory] () identifier[center] = identifier[dataset] . identifier[kwargs] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[factory] . identifier[kwargs] : identifier[max_value] = identifier[factory] . identifier[kwargs] [ literal[string] ] keyword[elif] identifier[hasattr] ( identifier[dataset] , literal[string] ): identifier[max_value] = identifier[dataset] . identifier[max_val] keyword[else] : keyword[raise] identifier[AttributeError] ( literal[string] ) identifier[min_value] = literal[int] - identifier[center] * identifier[max_value] identifier[value_range] = identifier[max_value] - identifier[min_value] keyword[if] literal[string] keyword[in] identifier[str] ( identifier[factory] . identifier[cls] ): keyword[if] identifier[base_eps] keyword[is] keyword[None] : identifier[base_eps] = literal[int] / literal[int] keyword[if] identifier[base_eps_iter] keyword[is] keyword[None] : identifier[base_eps_iter] = literal[int] / literal[int] keyword[if] identifier[base_eps_iter_small] keyword[is] keyword[None] : identifier[base_eps_iter_small] = literal[int] / literal[int] keyword[elif] literal[string] keyword[in] identifier[str] ( identifier[factory] . identifier[cls] ): keyword[if] identifier[base_eps] keyword[is] keyword[None] : identifier[base_eps] = literal[int] keyword[if] identifier[base_eps_iter] keyword[is] keyword[None] : identifier[base_eps_iter] = literal[int] identifier[base_eps_iter_small] = keyword[None] keyword[else] : keyword[if] identifier[base_eps] keyword[is] keyword[None] keyword[or] identifier[base_eps_iter] keyword[is] keyword[None] : keyword[raise] identifier[NotImplementedError] ( literal[string] + identifier[str] ( identifier[factory] . identifier[cls] )) identifier[eps] = identifier[base_eps] * identifier[value_range] identifier[eps_iter] = identifier[base_eps_iter] * identifier[value_range] keyword[if] identifier[base_eps_iter_small] keyword[is] keyword[None] : identifier[eps_iter_small] = keyword[None] keyword[else] : identifier[eps_iter_small] = identifier[base_eps_iter_small] * identifier[value_range] identifier[clip_min] = identifier[min_value] identifier[clip_max] = identifier[max_value] identifier[x_data] , identifier[y_data] = identifier[dataset] . identifier[get_set] ( identifier[which_set] ) keyword[assert] identifier[x_data] . identifier[max] ()<= identifier[max_value] keyword[assert] identifier[x_data] . identifier[min] ()>= identifier[min_value] keyword[assert] identifier[eps_iter] <= identifier[eps] keyword[assert] identifier[eps_iter_small] keyword[is] keyword[None] keyword[or] identifier[eps_iter_small] <= identifier[eps] keyword[if] identifier[recipe] == literal[string] : identifier[run_recipe] ( identifier[sess] = identifier[sess] , identifier[model] = identifier[model] , identifier[x] = identifier[x_data] , identifier[y] = identifier[y_data] , identifier[eps] = identifier[eps] , identifier[clip_min] = identifier[clip_min] , identifier[clip_max] = identifier[clip_max] , identifier[report_path] = identifier[report_path] ) keyword[else] : identifier[run_recipe] ( identifier[sess] = identifier[sess] , identifier[model] = identifier[model] , identifier[x] = identifier[x_data] , identifier[y] = identifier[y_data] , identifier[nb_classes] = identifier[dataset] . identifier[NB_CLASSES] , identifier[eps] = identifier[eps] , identifier[clip_min] = identifier[clip_min] , identifier[clip_max] = identifier[clip_max] , identifier[eps_iter] = identifier[eps_iter] , identifier[nb_iter] = identifier[nb_iter] , identifier[report_path] = identifier[report_path] , identifier[eps_iter_small] = identifier[eps_iter_small] , identifier[batch_size] = identifier[batch_size] )
def make_confidence_report_bundled(filepath, train_start=TRAIN_START, train_end=TRAIN_END, test_start=TEST_START, test_end=TEST_END, which_set=WHICH_SET, recipe=RECIPE, report_path=REPORT_PATH, nb_iter=NB_ITER, base_eps=None, base_eps_iter=None, base_eps_iter_small=None, batch_size=BATCH_SIZE): """ Load a saved model, gather its predictions, and save a confidence report. :param filepath: path to model to evaluate :param train_start: index of first training set example to use :param train_end: index of last training set example to use :param test_start: index of first test set example to use :param test_end: index of last test set example to use :param which_set: 'train' or 'test' :param nb_iter: int, number of iterations of attack algorithm (note that different recipes will use this differently, for example many will run two attacks, one with nb_iter iterations and one with 25X more) :param base_eps: float, epsilon parameter for threat model, on a scale of [0, 1]. Inferred from the dataset if not specified. :param base_eps_iter: float, a step size used in different ways by different recipes. Typically the step size for a PGD attack. Inferred from the dataset if not specified. :param base_eps_iter_small: float, a second step size for a more fine-grained attack. Inferred from the dataset if not specified. :param batch_size: int, batch size """ # Avoid circular import from cleverhans import attack_bundling if callable(recipe): run_recipe = recipe # depends on [control=['if'], data=[]] else: run_recipe = getattr(attack_bundling, recipe) # Set logging level to see debug information set_log_level(logging.INFO) # Create TF session sess = tf.Session() assert filepath.endswith('.joblib') if report_path is None: report_path = filepath[:-len('.joblib')] + '_bundled_report.joblib' # depends on [control=['if'], data=['report_path']] with sess.as_default(): model = load(filepath) # depends on [control=['with'], data=[]] assert len(model.get_params()) > 0 factory = model.dataset_factory factory.kwargs['train_start'] = train_start factory.kwargs['train_end'] = train_end factory.kwargs['test_start'] = test_start factory.kwargs['test_end'] = test_end dataset = factory() center = dataset.kwargs['center'] if 'max_val' in factory.kwargs: max_value = factory.kwargs['max_val'] # depends on [control=['if'], data=[]] elif hasattr(dataset, 'max_val'): max_value = dataset.max_val # depends on [control=['if'], data=[]] else: raise AttributeError("Can't find max_value specification") min_value = 0.0 - center * max_value value_range = max_value - min_value if 'CIFAR' in str(factory.cls): if base_eps is None: base_eps = 8.0 / 255.0 # depends on [control=['if'], data=['base_eps']] if base_eps_iter is None: base_eps_iter = 2.0 / 255.0 # depends on [control=['if'], data=['base_eps_iter']] if base_eps_iter_small is None: base_eps_iter_small = 1.0 / 255.0 # depends on [control=['if'], data=['base_eps_iter_small']] # depends on [control=['if'], data=[]] elif 'MNIST' in str(factory.cls): if base_eps is None: base_eps = 0.3 # depends on [control=['if'], data=['base_eps']] if base_eps_iter is None: base_eps_iter = 0.1 # depends on [control=['if'], data=['base_eps_iter']] base_eps_iter_small = None # depends on [control=['if'], data=[]] # Note that it is not required to specify base_eps_iter_small elif base_eps is None or base_eps_iter is None: raise NotImplementedError('Not able to infer threat model from ' + str(factory.cls)) # depends on [control=['if'], data=[]] eps = base_eps * value_range eps_iter = base_eps_iter * value_range if base_eps_iter_small is None: eps_iter_small = None # depends on [control=['if'], data=[]] else: eps_iter_small = base_eps_iter_small * value_range clip_min = min_value clip_max = max_value (x_data, y_data) = dataset.get_set(which_set) assert x_data.max() <= max_value assert x_data.min() >= min_value assert eps_iter <= eps assert eps_iter_small is None or eps_iter_small <= eps # Different recipes take different arguments. # For now I don't have an idea for a beautiful unifying framework, so # we get an if statement. if recipe == 'random_search_max_confidence_recipe': # pylint always checks against the default recipe here # pylint: disable=no-value-for-parameter run_recipe(sess=sess, model=model, x=x_data, y=y_data, eps=eps, clip_min=clip_min, clip_max=clip_max, report_path=report_path) # depends on [control=['if'], data=[]] else: run_recipe(sess=sess, model=model, x=x_data, y=y_data, nb_classes=dataset.NB_CLASSES, eps=eps, clip_min=clip_min, clip_max=clip_max, eps_iter=eps_iter, nb_iter=nb_iter, report_path=report_path, eps_iter_small=eps_iter_small, batch_size=batch_size)
def clean(self): """ Validate that an event with this name on this date does not exist. """ cleaned = super(EventForm, self).clean() if Event.objects.filter(name=cleaned['name'], start_date=cleaned['start_date']).count(): raise forms.ValidationError(u'This event appears to be in the database already.') return cleaned
def function[clean, parameter[self]]: constant[ Validate that an event with this name on this date does not exist. ] variable[cleaned] assign[=] call[call[name[super], parameter[name[EventForm], name[self]]].clean, parameter[]] if call[call[name[Event].objects.filter, parameter[]].count, parameter[]] begin[:] <ast.Raise object at 0x7da2054a7ac0> return[name[cleaned]]
keyword[def] identifier[clean] ( identifier[self] ): literal[string] identifier[cleaned] = identifier[super] ( identifier[EventForm] , identifier[self] ). identifier[clean] () keyword[if] identifier[Event] . identifier[objects] . identifier[filter] ( identifier[name] = identifier[cleaned] [ literal[string] ], identifier[start_date] = identifier[cleaned] [ literal[string] ]). identifier[count] (): keyword[raise] identifier[forms] . identifier[ValidationError] ( literal[string] ) keyword[return] identifier[cleaned]
def clean(self): """ Validate that an event with this name on this date does not exist. """ cleaned = super(EventForm, self).clean() if Event.objects.filter(name=cleaned['name'], start_date=cleaned['start_date']).count(): raise forms.ValidationError(u'This event appears to be in the database already.') # depends on [control=['if'], data=[]] return cleaned
def xorsum(t): """ 异或校验 :param t: :type t: :return: :rtype: """ _b = t[0] for i in t[1:]: _b = _b ^ i _b &= 0xff return _b
def function[xorsum, parameter[t]]: constant[ 异或校验 :param t: :type t: :return: :rtype: ] variable[_b] assign[=] call[name[t]][constant[0]] for taget[name[i]] in starred[call[name[t]][<ast.Slice object at 0x7da1b158a3b0>]] begin[:] variable[_b] assign[=] binary_operation[name[_b] <ast.BitXor object at 0x7da2590d6b00> name[i]] <ast.AugAssign object at 0x7da1b1588790> return[name[_b]]
keyword[def] identifier[xorsum] ( identifier[t] ): literal[string] identifier[_b] = identifier[t] [ literal[int] ] keyword[for] identifier[i] keyword[in] identifier[t] [ literal[int] :]: identifier[_b] = identifier[_b] ^ identifier[i] identifier[_b] &= literal[int] keyword[return] identifier[_b]
def xorsum(t): """ 异或校验 :param t: :type t: :return: :rtype: """ _b = t[0] for i in t[1:]: _b = _b ^ i _b &= 255 # depends on [control=['for'], data=['i']] return _b
def any_user(password=None, permissions=[], groups=[], **kwargs): """ Shortcut for creating Users Permissions could be a list of permission names If not specified, creates active, non superuser and non staff user """ is_active = kwargs.pop('is_active', True) is_superuser = kwargs.pop('is_superuser', False) is_staff = kwargs.pop('is_staff', False) user = any_model(User, is_active = is_active, is_superuser = is_superuser, is_staff = is_staff, **kwargs) for group_name in groups : group = Group.objects.get(name=group_name) user.groups.add(group) for permission_name in permissions: app_label, codename = permission_name.split('.') permission = Permission.objects.get( content_type__app_label=app_label, codename=codename) user.user_permissions.add(permission) if password: user.set_password(password) user.save() return user
def function[any_user, parameter[password, permissions, groups]]: constant[ Shortcut for creating Users Permissions could be a list of permission names If not specified, creates active, non superuser and non staff user ] variable[is_active] assign[=] call[name[kwargs].pop, parameter[constant[is_active], constant[True]]] variable[is_superuser] assign[=] call[name[kwargs].pop, parameter[constant[is_superuser], constant[False]]] variable[is_staff] assign[=] call[name[kwargs].pop, parameter[constant[is_staff], constant[False]]] variable[user] assign[=] call[name[any_model], parameter[name[User]]] for taget[name[group_name]] in starred[name[groups]] begin[:] variable[group] assign[=] call[name[Group].objects.get, parameter[]] call[name[user].groups.add, parameter[name[group]]] for taget[name[permission_name]] in starred[name[permissions]] begin[:] <ast.Tuple object at 0x7da1b02b9de0> assign[=] call[name[permission_name].split, parameter[constant[.]]] variable[permission] assign[=] call[name[Permission].objects.get, parameter[]] call[name[user].user_permissions.add, parameter[name[permission]]] if name[password] begin[:] call[name[user].set_password, parameter[name[password]]] call[name[user].save, parameter[]] return[name[user]]
keyword[def] identifier[any_user] ( identifier[password] = keyword[None] , identifier[permissions] =[], identifier[groups] =[],** identifier[kwargs] ): literal[string] identifier[is_active] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[True] ) identifier[is_superuser] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] ) identifier[is_staff] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] ) identifier[user] = identifier[any_model] ( identifier[User] , identifier[is_active] = identifier[is_active] , identifier[is_superuser] = identifier[is_superuser] , identifier[is_staff] = identifier[is_staff] ,** identifier[kwargs] ) keyword[for] identifier[group_name] keyword[in] identifier[groups] : identifier[group] = identifier[Group] . identifier[objects] . identifier[get] ( identifier[name] = identifier[group_name] ) identifier[user] . identifier[groups] . identifier[add] ( identifier[group] ) keyword[for] identifier[permission_name] keyword[in] identifier[permissions] : identifier[app_label] , identifier[codename] = identifier[permission_name] . identifier[split] ( literal[string] ) identifier[permission] = identifier[Permission] . identifier[objects] . identifier[get] ( identifier[content_type__app_label] = identifier[app_label] , identifier[codename] = identifier[codename] ) identifier[user] . identifier[user_permissions] . identifier[add] ( identifier[permission] ) keyword[if] identifier[password] : identifier[user] . identifier[set_password] ( identifier[password] ) identifier[user] . identifier[save] () keyword[return] identifier[user]
def any_user(password=None, permissions=[], groups=[], **kwargs): """ Shortcut for creating Users Permissions could be a list of permission names If not specified, creates active, non superuser and non staff user """ is_active = kwargs.pop('is_active', True) is_superuser = kwargs.pop('is_superuser', False) is_staff = kwargs.pop('is_staff', False) user = any_model(User, is_active=is_active, is_superuser=is_superuser, is_staff=is_staff, **kwargs) for group_name in groups: group = Group.objects.get(name=group_name) user.groups.add(group) # depends on [control=['for'], data=['group_name']] for permission_name in permissions: (app_label, codename) = permission_name.split('.') permission = Permission.objects.get(content_type__app_label=app_label, codename=codename) user.user_permissions.add(permission) # depends on [control=['for'], data=['permission_name']] if password: user.set_password(password) # depends on [control=['if'], data=[]] user.save() return user
def spline_interpolate(x1, y1, x2): """ Given a function at a set of points (x1, y1), interpolate to evaluate it at points x2. """ sp = Spline(x1, y1) return sp(x2)
def function[spline_interpolate, parameter[x1, y1, x2]]: constant[ Given a function at a set of points (x1, y1), interpolate to evaluate it at points x2. ] variable[sp] assign[=] call[name[Spline], parameter[name[x1], name[y1]]] return[call[name[sp], parameter[name[x2]]]]
keyword[def] identifier[spline_interpolate] ( identifier[x1] , identifier[y1] , identifier[x2] ): literal[string] identifier[sp] = identifier[Spline] ( identifier[x1] , identifier[y1] ) keyword[return] identifier[sp] ( identifier[x2] )
def spline_interpolate(x1, y1, x2): """ Given a function at a set of points (x1, y1), interpolate to evaluate it at points x2. """ sp = Spline(x1, y1) return sp(x2)
def choose_path(): """ Invoke a folder selection dialog here :return: """ dirs = webview.create_file_dialog(webview.FOLDER_DIALOG) if dirs and len(dirs) > 0: directory = dirs[0] if isinstance(directory, bytes): directory = directory.decode("utf-8") response = {"status": "ok", "directory": directory} else: response = {"status": "cancel"} return jsonify(response)
def function[choose_path, parameter[]]: constant[ Invoke a folder selection dialog here :return: ] variable[dirs] assign[=] call[name[webview].create_file_dialog, parameter[name[webview].FOLDER_DIALOG]] if <ast.BoolOp object at 0x7da18f723400> begin[:] variable[directory] assign[=] call[name[dirs]][constant[0]] if call[name[isinstance], parameter[name[directory], name[bytes]]] begin[:] variable[directory] assign[=] call[name[directory].decode, parameter[constant[utf-8]]] variable[response] assign[=] dictionary[[<ast.Constant object at 0x7da2041db2e0>, <ast.Constant object at 0x7da2041d9cc0>], [<ast.Constant object at 0x7da2041d9270>, <ast.Name object at 0x7da2041d8580>]] return[call[name[jsonify], parameter[name[response]]]]
keyword[def] identifier[choose_path] (): literal[string] identifier[dirs] = identifier[webview] . identifier[create_file_dialog] ( identifier[webview] . identifier[FOLDER_DIALOG] ) keyword[if] identifier[dirs] keyword[and] identifier[len] ( identifier[dirs] )> literal[int] : identifier[directory] = identifier[dirs] [ literal[int] ] keyword[if] identifier[isinstance] ( identifier[directory] , identifier[bytes] ): identifier[directory] = identifier[directory] . identifier[decode] ( literal[string] ) identifier[response] ={ literal[string] : literal[string] , literal[string] : identifier[directory] } keyword[else] : identifier[response] ={ literal[string] : literal[string] } keyword[return] identifier[jsonify] ( identifier[response] )
def choose_path(): """ Invoke a folder selection dialog here :return: """ dirs = webview.create_file_dialog(webview.FOLDER_DIALOG) if dirs and len(dirs) > 0: directory = dirs[0] if isinstance(directory, bytes): directory = directory.decode('utf-8') # depends on [control=['if'], data=[]] response = {'status': 'ok', 'directory': directory} # depends on [control=['if'], data=[]] else: response = {'status': 'cancel'} return jsonify(response)
def nodes_iter(self, t=None, data=False): """Return an iterator over the nodes with respect to a given temporal snapshot. Parameters ---------- t : snapshot id (default=None). If None the iterator returns all the nodes of the flattened graph. data : boolean, optional (default=False) If False the iterator returns nodes. If True return a two-tuple of node and node data dictionary Returns ------- niter : iterator An iterator over nodes. If data=True the iterator gives two-tuples containing (node, node data, dictionary) Examples -------- >>> G = dn.DynGraph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2], 0) >>> [n for n, d in G.nodes_iter(t=0)] [0, 1, 2] """ if t is not None: return iter([n for n in self.degree(t=t).values() if n > 0]) return iter(self._node)
def function[nodes_iter, parameter[self, t, data]]: constant[Return an iterator over the nodes with respect to a given temporal snapshot. Parameters ---------- t : snapshot id (default=None). If None the iterator returns all the nodes of the flattened graph. data : boolean, optional (default=False) If False the iterator returns nodes. If True return a two-tuple of node and node data dictionary Returns ------- niter : iterator An iterator over nodes. If data=True the iterator gives two-tuples containing (node, node data, dictionary) Examples -------- >>> G = dn.DynGraph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2], 0) >>> [n for n, d in G.nodes_iter(t=0)] [0, 1, 2] ] if compare[name[t] is_not constant[None]] begin[:] return[call[name[iter], parameter[<ast.ListComp object at 0x7da1b04ef760>]]] return[call[name[iter], parameter[name[self]._node]]]
keyword[def] identifier[nodes_iter] ( identifier[self] , identifier[t] = keyword[None] , identifier[data] = keyword[False] ): literal[string] keyword[if] identifier[t] keyword[is] keyword[not] keyword[None] : keyword[return] identifier[iter] ([ identifier[n] keyword[for] identifier[n] keyword[in] identifier[self] . identifier[degree] ( identifier[t] = identifier[t] ). identifier[values] () keyword[if] identifier[n] > literal[int] ]) keyword[return] identifier[iter] ( identifier[self] . identifier[_node] )
def nodes_iter(self, t=None, data=False): """Return an iterator over the nodes with respect to a given temporal snapshot. Parameters ---------- t : snapshot id (default=None). If None the iterator returns all the nodes of the flattened graph. data : boolean, optional (default=False) If False the iterator returns nodes. If True return a two-tuple of node and node data dictionary Returns ------- niter : iterator An iterator over nodes. If data=True the iterator gives two-tuples containing (node, node data, dictionary) Examples -------- >>> G = dn.DynGraph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2], 0) >>> [n for n, d in G.nodes_iter(t=0)] [0, 1, 2] """ if t is not None: return iter([n for n in self.degree(t=t).values() if n > 0]) # depends on [control=['if'], data=['t']] return iter(self._node)
def resolve_one_step(self): """ Resolves model references. """ metamodel = self.parser.metamodel current_crossrefs = self.parser._crossrefs # print("DEBUG: Current crossrefs #: {}". # format(len(current_crossrefs))) new_crossrefs = [] self.delayed_crossrefs = [] resolved_crossref_count = 0 # ------------------------- # start of resolve-loop # ------------------------- default_scope = DefaultScopeProvider() for obj, attr, crossref in current_crossrefs: if (get_model(obj) == self.model): attr_value = getattr(obj, attr.name) attr_refs = [obj.__class__.__name__ + "." + attr.name, "*." + attr.name, obj.__class__.__name__ + ".*", "*.*"] for attr_ref in attr_refs: if attr_ref in metamodel.scope_providers: if self.parser.debug: self.parser.dprint(" FOUND {}".format(attr_ref)) resolved = metamodel.scope_providers[attr_ref]( obj, attr, crossref) break else: resolved = default_scope(obj, attr, crossref) # Collect cross-references for textx-tools if resolved and not type(resolved) is Postponed: if metamodel.textx_tools_support: self.pos_crossref_list.append( RefRulePosition( name=crossref.obj_name, ref_pos_start=crossref.position, ref_pos_end=crossref.position + len( resolved.name), def_pos_start=resolved._tx_position, def_pos_end=resolved._tx_position_end)) if not resolved: # As a fall-back search builtins if given if metamodel.builtins: if crossref.obj_name in metamodel.builtins: # TODO: Classes must match resolved = metamodel.builtins[crossref.obj_name] if not resolved: line, col = self.parser.pos_to_linecol(crossref.position) raise TextXSemanticError( message='Unknown object "{}" of class "{}"'.format( crossref.obj_name, crossref.cls.__name__), line=line, col=col, err_type=UNKNOWN_OBJ_ERROR, expected_obj_cls=crossref.cls, filename=self.model._tx_filename) if type(resolved) is Postponed: self.delayed_crossrefs.append((obj, attr, crossref)) new_crossrefs.append((obj, attr, crossref)) else: resolved_crossref_count += 1 if attr.mult in [MULT_ONEORMORE, MULT_ZEROORMORE]: attr_value.append(resolved) else: setattr(obj, attr.name, resolved) else: # crossref not in model new_crossrefs.append((obj, attr, crossref)) # ------------------------- # end of resolve-loop # ------------------------- # store cross-refs from other models in the parser list (for later # processing) self.parser._crossrefs = new_crossrefs # print("DEBUG: Next crossrefs #: {}".format(len(new_crossrefs))) return (resolved_crossref_count, self.delayed_crossrefs)
def function[resolve_one_step, parameter[self]]: constant[ Resolves model references. ] variable[metamodel] assign[=] name[self].parser.metamodel variable[current_crossrefs] assign[=] name[self].parser._crossrefs variable[new_crossrefs] assign[=] list[[]] name[self].delayed_crossrefs assign[=] list[[]] variable[resolved_crossref_count] assign[=] constant[0] variable[default_scope] assign[=] call[name[DefaultScopeProvider], parameter[]] for taget[tuple[[<ast.Name object at 0x7da20c6e5480>, <ast.Name object at 0x7da20c6e4d30>, <ast.Name object at 0x7da20c6e4bb0>]]] in starred[name[current_crossrefs]] begin[:] if compare[call[name[get_model], parameter[name[obj]]] equal[==] name[self].model] begin[:] variable[attr_value] assign[=] call[name[getattr], parameter[name[obj], name[attr].name]] variable[attr_refs] assign[=] list[[<ast.BinOp object at 0x7da20c6e5780>, <ast.BinOp object at 0x7da20c6e7910>, <ast.BinOp object at 0x7da20c6e4820>, <ast.Constant object at 0x7da20c6e7e20>]] for taget[name[attr_ref]] in starred[name[attr_refs]] begin[:] if compare[name[attr_ref] in name[metamodel].scope_providers] begin[:] if name[self].parser.debug begin[:] call[name[self].parser.dprint, parameter[call[constant[ FOUND {}].format, parameter[name[attr_ref]]]]] variable[resolved] assign[=] call[call[name[metamodel].scope_providers][name[attr_ref]], parameter[name[obj], name[attr], name[crossref]]] break if <ast.BoolOp object at 0x7da20c6e78e0> begin[:] if name[metamodel].textx_tools_support begin[:] call[name[self].pos_crossref_list.append, parameter[call[name[RefRulePosition], parameter[]]]] if <ast.UnaryOp object at 0x7da20c6e6380> begin[:] if name[metamodel].builtins begin[:] if compare[name[crossref].obj_name in name[metamodel].builtins] begin[:] variable[resolved] assign[=] call[name[metamodel].builtins][name[crossref].obj_name] if <ast.UnaryOp object at 0x7da20c6e5720> begin[:] <ast.Tuple object at 0x7da20c6e61d0> assign[=] call[name[self].parser.pos_to_linecol, parameter[name[crossref].position]] <ast.Raise object at 0x7da20c6e6cb0> if compare[call[name[type], parameter[name[resolved]]] is name[Postponed]] begin[:] call[name[self].delayed_crossrefs.append, parameter[tuple[[<ast.Name object at 0x7da2044c1db0>, <ast.Name object at 0x7da2044c0e50>, <ast.Name object at 0x7da2044c2410>]]]] call[name[new_crossrefs].append, parameter[tuple[[<ast.Name object at 0x7da2044c11b0>, <ast.Name object at 0x7da2044c2590>, <ast.Name object at 0x7da2044c0df0>]]]] name[self].parser._crossrefs assign[=] name[new_crossrefs] return[tuple[[<ast.Name object at 0x7da2044c0fa0>, <ast.Attribute object at 0x7da2044c2ce0>]]]
keyword[def] identifier[resolve_one_step] ( identifier[self] ): literal[string] identifier[metamodel] = identifier[self] . identifier[parser] . identifier[metamodel] identifier[current_crossrefs] = identifier[self] . identifier[parser] . identifier[_crossrefs] identifier[new_crossrefs] =[] identifier[self] . identifier[delayed_crossrefs] =[] identifier[resolved_crossref_count] = literal[int] identifier[default_scope] = identifier[DefaultScopeProvider] () keyword[for] identifier[obj] , identifier[attr] , identifier[crossref] keyword[in] identifier[current_crossrefs] : keyword[if] ( identifier[get_model] ( identifier[obj] )== identifier[self] . identifier[model] ): identifier[attr_value] = identifier[getattr] ( identifier[obj] , identifier[attr] . identifier[name] ) identifier[attr_refs] =[ identifier[obj] . identifier[__class__] . identifier[__name__] + literal[string] + identifier[attr] . identifier[name] , literal[string] + identifier[attr] . identifier[name] , identifier[obj] . identifier[__class__] . identifier[__name__] + literal[string] , literal[string] ] keyword[for] identifier[attr_ref] keyword[in] identifier[attr_refs] : keyword[if] identifier[attr_ref] keyword[in] identifier[metamodel] . identifier[scope_providers] : keyword[if] identifier[self] . identifier[parser] . identifier[debug] : identifier[self] . identifier[parser] . identifier[dprint] ( literal[string] . identifier[format] ( identifier[attr_ref] )) identifier[resolved] = identifier[metamodel] . identifier[scope_providers] [ identifier[attr_ref] ]( identifier[obj] , identifier[attr] , identifier[crossref] ) keyword[break] keyword[else] : identifier[resolved] = identifier[default_scope] ( identifier[obj] , identifier[attr] , identifier[crossref] ) keyword[if] identifier[resolved] keyword[and] keyword[not] identifier[type] ( identifier[resolved] ) keyword[is] identifier[Postponed] : keyword[if] identifier[metamodel] . identifier[textx_tools_support] : identifier[self] . identifier[pos_crossref_list] . identifier[append] ( identifier[RefRulePosition] ( identifier[name] = identifier[crossref] . identifier[obj_name] , identifier[ref_pos_start] = identifier[crossref] . identifier[position] , identifier[ref_pos_end] = identifier[crossref] . identifier[position] + identifier[len] ( identifier[resolved] . identifier[name] ), identifier[def_pos_start] = identifier[resolved] . identifier[_tx_position] , identifier[def_pos_end] = identifier[resolved] . identifier[_tx_position_end] )) keyword[if] keyword[not] identifier[resolved] : keyword[if] identifier[metamodel] . identifier[builtins] : keyword[if] identifier[crossref] . identifier[obj_name] keyword[in] identifier[metamodel] . identifier[builtins] : identifier[resolved] = identifier[metamodel] . identifier[builtins] [ identifier[crossref] . identifier[obj_name] ] keyword[if] keyword[not] identifier[resolved] : identifier[line] , identifier[col] = identifier[self] . identifier[parser] . identifier[pos_to_linecol] ( identifier[crossref] . identifier[position] ) keyword[raise] identifier[TextXSemanticError] ( identifier[message] = literal[string] . identifier[format] ( identifier[crossref] . identifier[obj_name] , identifier[crossref] . identifier[cls] . identifier[__name__] ), identifier[line] = identifier[line] , identifier[col] = identifier[col] , identifier[err_type] = identifier[UNKNOWN_OBJ_ERROR] , identifier[expected_obj_cls] = identifier[crossref] . identifier[cls] , identifier[filename] = identifier[self] . identifier[model] . identifier[_tx_filename] ) keyword[if] identifier[type] ( identifier[resolved] ) keyword[is] identifier[Postponed] : identifier[self] . identifier[delayed_crossrefs] . identifier[append] (( identifier[obj] , identifier[attr] , identifier[crossref] )) identifier[new_crossrefs] . identifier[append] (( identifier[obj] , identifier[attr] , identifier[crossref] )) keyword[else] : identifier[resolved_crossref_count] += literal[int] keyword[if] identifier[attr] . identifier[mult] keyword[in] [ identifier[MULT_ONEORMORE] , identifier[MULT_ZEROORMORE] ]: identifier[attr_value] . identifier[append] ( identifier[resolved] ) keyword[else] : identifier[setattr] ( identifier[obj] , identifier[attr] . identifier[name] , identifier[resolved] ) keyword[else] : identifier[new_crossrefs] . identifier[append] (( identifier[obj] , identifier[attr] , identifier[crossref] )) identifier[self] . identifier[parser] . identifier[_crossrefs] = identifier[new_crossrefs] keyword[return] ( identifier[resolved_crossref_count] , identifier[self] . identifier[delayed_crossrefs] )
def resolve_one_step(self): """ Resolves model references. """ metamodel = self.parser.metamodel current_crossrefs = self.parser._crossrefs # print("DEBUG: Current crossrefs #: {}". # format(len(current_crossrefs))) new_crossrefs = [] self.delayed_crossrefs = [] resolved_crossref_count = 0 # ------------------------- # start of resolve-loop # ------------------------- default_scope = DefaultScopeProvider() for (obj, attr, crossref) in current_crossrefs: if get_model(obj) == self.model: attr_value = getattr(obj, attr.name) attr_refs = [obj.__class__.__name__ + '.' + attr.name, '*.' + attr.name, obj.__class__.__name__ + '.*', '*.*'] for attr_ref in attr_refs: if attr_ref in metamodel.scope_providers: if self.parser.debug: self.parser.dprint(' FOUND {}'.format(attr_ref)) # depends on [control=['if'], data=[]] resolved = metamodel.scope_providers[attr_ref](obj, attr, crossref) break # depends on [control=['if'], data=['attr_ref']] # depends on [control=['for'], data=['attr_ref']] else: resolved = default_scope(obj, attr, crossref) # Collect cross-references for textx-tools if resolved and (not type(resolved) is Postponed): if metamodel.textx_tools_support: self.pos_crossref_list.append(RefRulePosition(name=crossref.obj_name, ref_pos_start=crossref.position, ref_pos_end=crossref.position + len(resolved.name), def_pos_start=resolved._tx_position, def_pos_end=resolved._tx_position_end)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if not resolved: # As a fall-back search builtins if given if metamodel.builtins: if crossref.obj_name in metamodel.builtins: # TODO: Classes must match resolved = metamodel.builtins[crossref.obj_name] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if not resolved: (line, col) = self.parser.pos_to_linecol(crossref.position) raise TextXSemanticError(message='Unknown object "{}" of class "{}"'.format(crossref.obj_name, crossref.cls.__name__), line=line, col=col, err_type=UNKNOWN_OBJ_ERROR, expected_obj_cls=crossref.cls, filename=self.model._tx_filename) # depends on [control=['if'], data=[]] if type(resolved) is Postponed: self.delayed_crossrefs.append((obj, attr, crossref)) new_crossrefs.append((obj, attr, crossref)) # depends on [control=['if'], data=[]] else: resolved_crossref_count += 1 if attr.mult in [MULT_ONEORMORE, MULT_ZEROORMORE]: attr_value.append(resolved) # depends on [control=['if'], data=[]] else: setattr(obj, attr.name, resolved) # depends on [control=['if'], data=[]] else: # crossref not in model new_crossrefs.append((obj, attr, crossref)) # depends on [control=['for'], data=[]] # ------------------------- # end of resolve-loop # ------------------------- # store cross-refs from other models in the parser list (for later # processing) self.parser._crossrefs = new_crossrefs # print("DEBUG: Next crossrefs #: {}".format(len(new_crossrefs))) return (resolved_crossref_count, self.delayed_crossrefs)
def render_to_response(self, context, **response_kwargs): """ Returns a response with a template depending if the request is ajax or not and it renders with the given context. """ if self.request.is_ajax(): template = self.page_template else: template = self.get_template_names() return self.response_class( request=self.request, template=template, context=context, **response_kwargs )
def function[render_to_response, parameter[self, context]]: constant[ Returns a response with a template depending if the request is ajax or not and it renders with the given context. ] if call[name[self].request.is_ajax, parameter[]] begin[:] variable[template] assign[=] name[self].page_template return[call[name[self].response_class, parameter[]]]
keyword[def] identifier[render_to_response] ( identifier[self] , identifier[context] ,** identifier[response_kwargs] ): literal[string] keyword[if] identifier[self] . identifier[request] . identifier[is_ajax] (): identifier[template] = identifier[self] . identifier[page_template] keyword[else] : identifier[template] = identifier[self] . identifier[get_template_names] () keyword[return] identifier[self] . identifier[response_class] ( identifier[request] = identifier[self] . identifier[request] , identifier[template] = identifier[template] , identifier[context] = identifier[context] , ** identifier[response_kwargs] )
def render_to_response(self, context, **response_kwargs): """ Returns a response with a template depending if the request is ajax or not and it renders with the given context. """ if self.request.is_ajax(): template = self.page_template # depends on [control=['if'], data=[]] else: template = self.get_template_names() return self.response_class(request=self.request, template=template, context=context, **response_kwargs)
def delete_track_at_index(self, href=None, index=None): """Delete a track, or all the tracks. 'href' the relative href to the track list. May not be None. 'index' the index of the track to delete. If none is given, all tracks are deleted. Returns nothing. If the response status is not 204, throws an APIException.""" # Argument error checking. assert href is not None # Deal with any parameters that need to be passed in. data = None fields = {} if index is not None: fields['track'] = index if len(fields) > 0: data = fields raw_result = self.delete(href, data) if raw_result.status != 204: raise APIException(raw_result.status, raw_result.json)
def function[delete_track_at_index, parameter[self, href, index]]: constant[Delete a track, or all the tracks. 'href' the relative href to the track list. May not be None. 'index' the index of the track to delete. If none is given, all tracks are deleted. Returns nothing. If the response status is not 204, throws an APIException.] assert[compare[name[href] is_not constant[None]]] variable[data] assign[=] constant[None] variable[fields] assign[=] dictionary[[], []] if compare[name[index] is_not constant[None]] begin[:] call[name[fields]][constant[track]] assign[=] name[index] if compare[call[name[len], parameter[name[fields]]] greater[>] constant[0]] begin[:] variable[data] assign[=] name[fields] variable[raw_result] assign[=] call[name[self].delete, parameter[name[href], name[data]]] if compare[name[raw_result].status not_equal[!=] constant[204]] begin[:] <ast.Raise object at 0x7da1b08100a0>
keyword[def] identifier[delete_track_at_index] ( identifier[self] , identifier[href] = keyword[None] , identifier[index] = keyword[None] ): literal[string] keyword[assert] identifier[href] keyword[is] keyword[not] keyword[None] identifier[data] = keyword[None] identifier[fields] ={} keyword[if] identifier[index] keyword[is] keyword[not] keyword[None] : identifier[fields] [ literal[string] ]= identifier[index] keyword[if] identifier[len] ( identifier[fields] )> literal[int] : identifier[data] = identifier[fields] identifier[raw_result] = identifier[self] . identifier[delete] ( identifier[href] , identifier[data] ) keyword[if] identifier[raw_result] . identifier[status] != literal[int] : keyword[raise] identifier[APIException] ( identifier[raw_result] . identifier[status] , identifier[raw_result] . identifier[json] )
def delete_track_at_index(self, href=None, index=None): """Delete a track, or all the tracks. 'href' the relative href to the track list. May not be None. 'index' the index of the track to delete. If none is given, all tracks are deleted. Returns nothing. If the response status is not 204, throws an APIException.""" # Argument error checking. assert href is not None # Deal with any parameters that need to be passed in. data = None fields = {} if index is not None: fields['track'] = index # depends on [control=['if'], data=['index']] if len(fields) > 0: data = fields # depends on [control=['if'], data=[]] raw_result = self.delete(href, data) if raw_result.status != 204: raise APIException(raw_result.status, raw_result.json) # depends on [control=['if'], data=[]]
def process_custom(custom): """Process custom.""" custom_selectors = {} if custom is not None: for key, value in custom.items(): name = util.lower(key) if RE_CUSTOM.match(name) is None: raise SelectorSyntaxError("The name '{}' is not a valid custom pseudo-class name".format(name)) if name in custom_selectors: raise KeyError("The custom selector '{}' has already been registered".format(name)) custom_selectors[css_unescape(name)] = value return custom_selectors
def function[process_custom, parameter[custom]]: constant[Process custom.] variable[custom_selectors] assign[=] dictionary[[], []] if compare[name[custom] is_not constant[None]] begin[:] for taget[tuple[[<ast.Name object at 0x7da207f992a0>, <ast.Name object at 0x7da207f9b3a0>]]] in starred[call[name[custom].items, parameter[]]] begin[:] variable[name] assign[=] call[name[util].lower, parameter[name[key]]] if compare[call[name[RE_CUSTOM].match, parameter[name[name]]] is constant[None]] begin[:] <ast.Raise object at 0x7da207f98d60> if compare[name[name] in name[custom_selectors]] begin[:] <ast.Raise object at 0x7da207f99510> call[name[custom_selectors]][call[name[css_unescape], parameter[name[name]]]] assign[=] name[value] return[name[custom_selectors]]
keyword[def] identifier[process_custom] ( identifier[custom] ): literal[string] identifier[custom_selectors] ={} keyword[if] identifier[custom] keyword[is] keyword[not] keyword[None] : keyword[for] identifier[key] , identifier[value] keyword[in] identifier[custom] . identifier[items] (): identifier[name] = identifier[util] . identifier[lower] ( identifier[key] ) keyword[if] identifier[RE_CUSTOM] . identifier[match] ( identifier[name] ) keyword[is] keyword[None] : keyword[raise] identifier[SelectorSyntaxError] ( literal[string] . identifier[format] ( identifier[name] )) keyword[if] identifier[name] keyword[in] identifier[custom_selectors] : keyword[raise] identifier[KeyError] ( literal[string] . identifier[format] ( identifier[name] )) identifier[custom_selectors] [ identifier[css_unescape] ( identifier[name] )]= identifier[value] keyword[return] identifier[custom_selectors]
def process_custom(custom): """Process custom.""" custom_selectors = {} if custom is not None: for (key, value) in custom.items(): name = util.lower(key) if RE_CUSTOM.match(name) is None: raise SelectorSyntaxError("The name '{}' is not a valid custom pseudo-class name".format(name)) # depends on [control=['if'], data=[]] if name in custom_selectors: raise KeyError("The custom selector '{}' has already been registered".format(name)) # depends on [control=['if'], data=['name']] custom_selectors[css_unescape(name)] = value # depends on [control=['for'], data=[]] # depends on [control=['if'], data=['custom']] return custom_selectors
def indent_files(arguments): """ indent_files(fname : str) 1. Creates a backup of the source file(backup_source_file()) 2. Reads the files contents(read_file()) 3. Indents the code(indent_code()) 4. Writes the file or print the indented code(_after_indentation()) """ opts = parse_options(arguments) if not opts.files: # Indent from stdin code = sys.stdin.read() indent_result = indent_code(code, opts) _after_indentation(indent_result) for fname in opts.files: code = read_file(fname) if not opts.dialect: # Guess dialect from the file extensions if none is specified in the # command line if fname.endswith('.lisp'): opts.dialect = 'lisp' elif fname.endswith('.lsp'): opts.dialect = 'newlisp' elif re.search(".clj[sc]{0,1}$", fname): opts.dialect = 'clojure' elif fname.endswith('.ss') or fname.endswith('.scm'): opts.dialect = 'scheme' else: opts.dialect = 'all' indent_result = indent_code(code, opts) if opts.backup: # Create a backup file in the directory specified backup_source_file(fname, opts) _after_indentation(indent_result, fpath=fname)
def function[indent_files, parameter[arguments]]: constant[ indent_files(fname : str) 1. Creates a backup of the source file(backup_source_file()) 2. Reads the files contents(read_file()) 3. Indents the code(indent_code()) 4. Writes the file or print the indented code(_after_indentation()) ] variable[opts] assign[=] call[name[parse_options], parameter[name[arguments]]] if <ast.UnaryOp object at 0x7da18eb55f00> begin[:] variable[code] assign[=] call[name[sys].stdin.read, parameter[]] variable[indent_result] assign[=] call[name[indent_code], parameter[name[code], name[opts]]] call[name[_after_indentation], parameter[name[indent_result]]] for taget[name[fname]] in starred[name[opts].files] begin[:] variable[code] assign[=] call[name[read_file], parameter[name[fname]]] if <ast.UnaryOp object at 0x7da18eb56e90> begin[:] if call[name[fname].endswith, parameter[constant[.lisp]]] begin[:] name[opts].dialect assign[=] constant[lisp] variable[indent_result] assign[=] call[name[indent_code], parameter[name[code], name[opts]]] if name[opts].backup begin[:] call[name[backup_source_file], parameter[name[fname], name[opts]]] call[name[_after_indentation], parameter[name[indent_result]]]
keyword[def] identifier[indent_files] ( identifier[arguments] ): literal[string] identifier[opts] = identifier[parse_options] ( identifier[arguments] ) keyword[if] keyword[not] identifier[opts] . identifier[files] : identifier[code] = identifier[sys] . identifier[stdin] . identifier[read] () identifier[indent_result] = identifier[indent_code] ( identifier[code] , identifier[opts] ) identifier[_after_indentation] ( identifier[indent_result] ) keyword[for] identifier[fname] keyword[in] identifier[opts] . identifier[files] : identifier[code] = identifier[read_file] ( identifier[fname] ) keyword[if] keyword[not] identifier[opts] . identifier[dialect] : keyword[if] identifier[fname] . identifier[endswith] ( literal[string] ): identifier[opts] . identifier[dialect] = literal[string] keyword[elif] identifier[fname] . identifier[endswith] ( literal[string] ): identifier[opts] . identifier[dialect] = literal[string] keyword[elif] identifier[re] . identifier[search] ( literal[string] , identifier[fname] ): identifier[opts] . identifier[dialect] = literal[string] keyword[elif] identifier[fname] . identifier[endswith] ( literal[string] ) keyword[or] identifier[fname] . identifier[endswith] ( literal[string] ): identifier[opts] . identifier[dialect] = literal[string] keyword[else] : identifier[opts] . identifier[dialect] = literal[string] identifier[indent_result] = identifier[indent_code] ( identifier[code] , identifier[opts] ) keyword[if] identifier[opts] . identifier[backup] : identifier[backup_source_file] ( identifier[fname] , identifier[opts] ) identifier[_after_indentation] ( identifier[indent_result] , identifier[fpath] = identifier[fname] )
def indent_files(arguments): """ indent_files(fname : str) 1. Creates a backup of the source file(backup_source_file()) 2. Reads the files contents(read_file()) 3. Indents the code(indent_code()) 4. Writes the file or print the indented code(_after_indentation()) """ opts = parse_options(arguments) if not opts.files: # Indent from stdin code = sys.stdin.read() indent_result = indent_code(code, opts) _after_indentation(indent_result) # depends on [control=['if'], data=[]] for fname in opts.files: code = read_file(fname) if not opts.dialect: # Guess dialect from the file extensions if none is specified in the # command line if fname.endswith('.lisp'): opts.dialect = 'lisp' # depends on [control=['if'], data=[]] elif fname.endswith('.lsp'): opts.dialect = 'newlisp' # depends on [control=['if'], data=[]] elif re.search('.clj[sc]{0,1}$', fname): opts.dialect = 'clojure' # depends on [control=['if'], data=[]] elif fname.endswith('.ss') or fname.endswith('.scm'): opts.dialect = 'scheme' # depends on [control=['if'], data=[]] else: opts.dialect = 'all' # depends on [control=['if'], data=[]] indent_result = indent_code(code, opts) if opts.backup: # Create a backup file in the directory specified backup_source_file(fname, opts) # depends on [control=['if'], data=[]] _after_indentation(indent_result, fpath=fname) # depends on [control=['for'], data=['fname']]
def sequences_add_end_id_after_pad(sequences, end_id=888, pad_id=0): """Add special end token(id) in the end of each sequence. Parameters ----------- sequences : list of list of int All sequences where each row is a sequence. end_id : int The end ID. pad_id : int The pad ID. Returns ---------- list of list of int The processed sequences. Examples --------- >>> sequences = [[1,2,0,0], [1,2,3,0], [1,2,3,4]] >>> print(sequences_add_end_id_after_pad(sequences, end_id=99, pad_id=0)) [[1, 2, 99, 0], [1, 2, 3, 99], [1, 2, 3, 4]] """ # sequences_out = [[] for _ in range(len(sequences))]#[[]] * len(sequences) sequences_out = copy.deepcopy(sequences) # # add a pad to all # for i in range(len(sequences)): # for j in range(len(sequences[i])): # sequences_out[i].append(pad_id) # # pad -- > end # max_len = 0 for i, v in enumerate(sequences): for j, _v2 in enumerate(v): if sequences[i][j] == pad_id: sequences_out[i][j] = end_id # if j > max_len: # max_len = j break # # remove pad if too long # for i in range(len(sequences)): # for j in range(len(sequences[i])): # sequences_out[i] = sequences_out[i][:max_len+1] return sequences_out
def function[sequences_add_end_id_after_pad, parameter[sequences, end_id, pad_id]]: constant[Add special end token(id) in the end of each sequence. Parameters ----------- sequences : list of list of int All sequences where each row is a sequence. end_id : int The end ID. pad_id : int The pad ID. Returns ---------- list of list of int The processed sequences. Examples --------- >>> sequences = [[1,2,0,0], [1,2,3,0], [1,2,3,4]] >>> print(sequences_add_end_id_after_pad(sequences, end_id=99, pad_id=0)) [[1, 2, 99, 0], [1, 2, 3, 99], [1, 2, 3, 4]] ] variable[sequences_out] assign[=] call[name[copy].deepcopy, parameter[name[sequences]]] for taget[tuple[[<ast.Name object at 0x7da18f09dae0>, <ast.Name object at 0x7da18f09ffd0>]]] in starred[call[name[enumerate], parameter[name[sequences]]]] begin[:] for taget[tuple[[<ast.Name object at 0x7da204567dc0>, <ast.Name object at 0x7da204564fd0>]]] in starred[call[name[enumerate], parameter[name[v]]]] begin[:] if compare[call[call[name[sequences]][name[i]]][name[j]] equal[==] name[pad_id]] begin[:] call[call[name[sequences_out]][name[i]]][name[j]] assign[=] name[end_id] break return[name[sequences_out]]
keyword[def] identifier[sequences_add_end_id_after_pad] ( identifier[sequences] , identifier[end_id] = literal[int] , identifier[pad_id] = literal[int] ): literal[string] identifier[sequences_out] = identifier[copy] . identifier[deepcopy] ( identifier[sequences] ) keyword[for] identifier[i] , identifier[v] keyword[in] identifier[enumerate] ( identifier[sequences] ): keyword[for] identifier[j] , identifier[_v2] keyword[in] identifier[enumerate] ( identifier[v] ): keyword[if] identifier[sequences] [ identifier[i] ][ identifier[j] ]== identifier[pad_id] : identifier[sequences_out] [ identifier[i] ][ identifier[j] ]= identifier[end_id] keyword[break] keyword[return] identifier[sequences_out]
def sequences_add_end_id_after_pad(sequences, end_id=888, pad_id=0): """Add special end token(id) in the end of each sequence. Parameters ----------- sequences : list of list of int All sequences where each row is a sequence. end_id : int The end ID. pad_id : int The pad ID. Returns ---------- list of list of int The processed sequences. Examples --------- >>> sequences = [[1,2,0,0], [1,2,3,0], [1,2,3,4]] >>> print(sequences_add_end_id_after_pad(sequences, end_id=99, pad_id=0)) [[1, 2, 99, 0], [1, 2, 3, 99], [1, 2, 3, 4]] """ # sequences_out = [[] for _ in range(len(sequences))]#[[]] * len(sequences) sequences_out = copy.deepcopy(sequences) # # add a pad to all # for i in range(len(sequences)): # for j in range(len(sequences[i])): # sequences_out[i].append(pad_id) # # pad -- > end # max_len = 0 for (i, v) in enumerate(sequences): for (j, _v2) in enumerate(v): if sequences[i][j] == pad_id: sequences_out[i][j] = end_id # if j > max_len: # max_len = j break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]] # # remove pad if too long # for i in range(len(sequences)): # for j in range(len(sequences[i])): # sequences_out[i] = sequences_out[i][:max_len+1] return sequences_out
def student_t(degrees_of_freedom, confidence=0.95): """Return Student-t statistic for given DOF and confidence interval.""" return scipy.stats.t.interval(alpha=confidence, df=degrees_of_freedom)[-1]
def function[student_t, parameter[degrees_of_freedom, confidence]]: constant[Return Student-t statistic for given DOF and confidence interval.] return[call[call[name[scipy].stats.t.interval, parameter[]]][<ast.UnaryOp object at 0x7da20c76cdc0>]]
keyword[def] identifier[student_t] ( identifier[degrees_of_freedom] , identifier[confidence] = literal[int] ): literal[string] keyword[return] identifier[scipy] . identifier[stats] . identifier[t] . identifier[interval] ( identifier[alpha] = identifier[confidence] , identifier[df] = identifier[degrees_of_freedom] )[- literal[int] ]
def student_t(degrees_of_freedom, confidence=0.95): """Return Student-t statistic for given DOF and confidence interval.""" return scipy.stats.t.interval(alpha=confidence, df=degrees_of_freedom)[-1]
def atomic_write(filename): """ Open a NamedTemoraryFile handle in a context manager """ f = _tempfile(os.fsencode(filename)) try: yield f finally: f.close() # replace the original file with the new temp file (atomic on success) os.replace(f.name, filename)
def function[atomic_write, parameter[filename]]: constant[ Open a NamedTemoraryFile handle in a context manager ] variable[f] assign[=] call[name[_tempfile], parameter[call[name[os].fsencode, parameter[name[filename]]]]] <ast.Try object at 0x7da18bccab30>
keyword[def] identifier[atomic_write] ( identifier[filename] ): literal[string] identifier[f] = identifier[_tempfile] ( identifier[os] . identifier[fsencode] ( identifier[filename] )) keyword[try] : keyword[yield] identifier[f] keyword[finally] : identifier[f] . identifier[close] () identifier[os] . identifier[replace] ( identifier[f] . identifier[name] , identifier[filename] )
def atomic_write(filename): """ Open a NamedTemoraryFile handle in a context manager """ f = _tempfile(os.fsencode(filename)) try: yield f # depends on [control=['try'], data=[]] finally: f.close() # replace the original file with the new temp file (atomic on success) os.replace(f.name, filename)
def _denominator(self, weighted, include_transforms_for_dims, axis): """Calculate denominator for percentages. Only include those H&S dimensions, across which we DON'T sum. These H&S are needed because of the shape, when dividing. Those across dims which are summed across MUST NOT be included, because they would change the result.""" table = self._measure(weighted).raw_cube_array new_axis = self._adjust_axis(axis) index = tuple( None if i in new_axis else slice(None) for i, _ in enumerate(table.shape) ) hs_dims = self._hs_dims_for_den(include_transforms_for_dims, axis) den = self._apply_subtotals(self._apply_missings(table), hs_dims) return np.sum(den, axis=new_axis)[index]
def function[_denominator, parameter[self, weighted, include_transforms_for_dims, axis]]: constant[Calculate denominator for percentages. Only include those H&S dimensions, across which we DON'T sum. These H&S are needed because of the shape, when dividing. Those across dims which are summed across MUST NOT be included, because they would change the result.] variable[table] assign[=] call[name[self]._measure, parameter[name[weighted]]].raw_cube_array variable[new_axis] assign[=] call[name[self]._adjust_axis, parameter[name[axis]]] variable[index] assign[=] call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da1b1b7f1c0>]] variable[hs_dims] assign[=] call[name[self]._hs_dims_for_den, parameter[name[include_transforms_for_dims], name[axis]]] variable[den] assign[=] call[name[self]._apply_subtotals, parameter[call[name[self]._apply_missings, parameter[name[table]]], name[hs_dims]]] return[call[call[name[np].sum, parameter[name[den]]]][name[index]]]
keyword[def] identifier[_denominator] ( identifier[self] , identifier[weighted] , identifier[include_transforms_for_dims] , identifier[axis] ): literal[string] identifier[table] = identifier[self] . identifier[_measure] ( identifier[weighted] ). identifier[raw_cube_array] identifier[new_axis] = identifier[self] . identifier[_adjust_axis] ( identifier[axis] ) identifier[index] = identifier[tuple] ( keyword[None] keyword[if] identifier[i] keyword[in] identifier[new_axis] keyword[else] identifier[slice] ( keyword[None] ) keyword[for] identifier[i] , identifier[_] keyword[in] identifier[enumerate] ( identifier[table] . identifier[shape] ) ) identifier[hs_dims] = identifier[self] . identifier[_hs_dims_for_den] ( identifier[include_transforms_for_dims] , identifier[axis] ) identifier[den] = identifier[self] . identifier[_apply_subtotals] ( identifier[self] . identifier[_apply_missings] ( identifier[table] ), identifier[hs_dims] ) keyword[return] identifier[np] . identifier[sum] ( identifier[den] , identifier[axis] = identifier[new_axis] )[ identifier[index] ]
def _denominator(self, weighted, include_transforms_for_dims, axis): """Calculate denominator for percentages. Only include those H&S dimensions, across which we DON'T sum. These H&S are needed because of the shape, when dividing. Those across dims which are summed across MUST NOT be included, because they would change the result.""" table = self._measure(weighted).raw_cube_array new_axis = self._adjust_axis(axis) index = tuple((None if i in new_axis else slice(None) for (i, _) in enumerate(table.shape))) hs_dims = self._hs_dims_for_den(include_transforms_for_dims, axis) den = self._apply_subtotals(self._apply_missings(table), hs_dims) return np.sum(den, axis=new_axis)[index]
def paths(self): ''' given a basedir, yield all test modules paths recursively found in basedir that are test modules return -- generator ''' module_name = getattr(self, 'module_name', '') module_prefix = getattr(self, 'prefix', '') filepath = getattr(self, 'filepath', '') if filepath: if os.path.isabs(filepath): yield filepath else: yield os.path.join(self.basedir, filepath) else: if module_prefix: basedirs = self._find_prefix_paths(self.basedir, module_prefix) else: basedirs = [self.basedir] for basedir in basedirs: try: if module_name: path = self._find_module_path(basedir, module_name) else: path = basedir if os.path.isfile(path): logger.debug('Module path: {}'.format(path)) yield path else: seen_paths = set() for root, dirs, files in self.walk(path): for basename in files: if basename.startswith("__init__"): if self._is_module_path(root): filepath = os.path.join(root, basename) if filepath not in seen_paths: logger.debug('Module package path: {}'.format(filepath)) seen_paths.add(filepath) yield filepath else: fileroot = os.path.splitext(basename)[0] for pf in self.module_postfixes: if fileroot.endswith(pf): filepath = os.path.join(root, basename) if filepath not in seen_paths: logger.debug('Module postfix path: {}'.format(filepath)) seen_paths.add(filepath) yield filepath for pf in self.module_prefixes: if fileroot.startswith(pf): filepath = os.path.join(root, basename) if filepath not in seen_paths: logger.debug('Module prefix path: {}'.format(filepath)) seen_paths.add(filepath) yield filepath except IOError as e: # we failed to find a suitable path logger.warning(e, exc_info=True) pass
def function[paths, parameter[self]]: constant[ given a basedir, yield all test modules paths recursively found in basedir that are test modules return -- generator ] variable[module_name] assign[=] call[name[getattr], parameter[name[self], constant[module_name], constant[]]] variable[module_prefix] assign[=] call[name[getattr], parameter[name[self], constant[prefix], constant[]]] variable[filepath] assign[=] call[name[getattr], parameter[name[self], constant[filepath], constant[]]] if name[filepath] begin[:] if call[name[os].path.isabs, parameter[name[filepath]]] begin[:] <ast.Yield object at 0x7da20c6e7460>
keyword[def] identifier[paths] ( identifier[self] ): literal[string] identifier[module_name] = identifier[getattr] ( identifier[self] , literal[string] , literal[string] ) identifier[module_prefix] = identifier[getattr] ( identifier[self] , literal[string] , literal[string] ) identifier[filepath] = identifier[getattr] ( identifier[self] , literal[string] , literal[string] ) keyword[if] identifier[filepath] : keyword[if] identifier[os] . identifier[path] . identifier[isabs] ( identifier[filepath] ): keyword[yield] identifier[filepath] keyword[else] : keyword[yield] identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[basedir] , identifier[filepath] ) keyword[else] : keyword[if] identifier[module_prefix] : identifier[basedirs] = identifier[self] . identifier[_find_prefix_paths] ( identifier[self] . identifier[basedir] , identifier[module_prefix] ) keyword[else] : identifier[basedirs] =[ identifier[self] . identifier[basedir] ] keyword[for] identifier[basedir] keyword[in] identifier[basedirs] : keyword[try] : keyword[if] identifier[module_name] : identifier[path] = identifier[self] . identifier[_find_module_path] ( identifier[basedir] , identifier[module_name] ) keyword[else] : identifier[path] = identifier[basedir] keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[path] ): identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[path] )) keyword[yield] identifier[path] keyword[else] : identifier[seen_paths] = identifier[set] () keyword[for] identifier[root] , identifier[dirs] , identifier[files] keyword[in] identifier[self] . identifier[walk] ( identifier[path] ): keyword[for] identifier[basename] keyword[in] identifier[files] : keyword[if] identifier[basename] . identifier[startswith] ( literal[string] ): keyword[if] identifier[self] . identifier[_is_module_path] ( identifier[root] ): identifier[filepath] = identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[basename] ) keyword[if] identifier[filepath] keyword[not] keyword[in] identifier[seen_paths] : identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[filepath] )) identifier[seen_paths] . identifier[add] ( identifier[filepath] ) keyword[yield] identifier[filepath] keyword[else] : identifier[fileroot] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[basename] )[ literal[int] ] keyword[for] identifier[pf] keyword[in] identifier[self] . identifier[module_postfixes] : keyword[if] identifier[fileroot] . identifier[endswith] ( identifier[pf] ): identifier[filepath] = identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[basename] ) keyword[if] identifier[filepath] keyword[not] keyword[in] identifier[seen_paths] : identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[filepath] )) identifier[seen_paths] . identifier[add] ( identifier[filepath] ) keyword[yield] identifier[filepath] keyword[for] identifier[pf] keyword[in] identifier[self] . identifier[module_prefixes] : keyword[if] identifier[fileroot] . identifier[startswith] ( identifier[pf] ): identifier[filepath] = identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[basename] ) keyword[if] identifier[filepath] keyword[not] keyword[in] identifier[seen_paths] : identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[filepath] )) identifier[seen_paths] . identifier[add] ( identifier[filepath] ) keyword[yield] identifier[filepath] keyword[except] identifier[IOError] keyword[as] identifier[e] : identifier[logger] . identifier[warning] ( identifier[e] , identifier[exc_info] = keyword[True] ) keyword[pass]
def paths(self): """ given a basedir, yield all test modules paths recursively found in basedir that are test modules return -- generator """ module_name = getattr(self, 'module_name', '') module_prefix = getattr(self, 'prefix', '') filepath = getattr(self, 'filepath', '') if filepath: if os.path.isabs(filepath): yield filepath # depends on [control=['if'], data=[]] else: yield os.path.join(self.basedir, filepath) # depends on [control=['if'], data=[]] else: if module_prefix: basedirs = self._find_prefix_paths(self.basedir, module_prefix) # depends on [control=['if'], data=[]] else: basedirs = [self.basedir] for basedir in basedirs: try: if module_name: path = self._find_module_path(basedir, module_name) # depends on [control=['if'], data=[]] else: path = basedir if os.path.isfile(path): logger.debug('Module path: {}'.format(path)) yield path # depends on [control=['if'], data=[]] else: seen_paths = set() for (root, dirs, files) in self.walk(path): for basename in files: if basename.startswith('__init__'): if self._is_module_path(root): filepath = os.path.join(root, basename) if filepath not in seen_paths: logger.debug('Module package path: {}'.format(filepath)) seen_paths.add(filepath) yield filepath # depends on [control=['if'], data=['filepath', 'seen_paths']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: fileroot = os.path.splitext(basename)[0] for pf in self.module_postfixes: if fileroot.endswith(pf): filepath = os.path.join(root, basename) if filepath not in seen_paths: logger.debug('Module postfix path: {}'.format(filepath)) seen_paths.add(filepath) yield filepath # depends on [control=['if'], data=['filepath', 'seen_paths']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['pf']] for pf in self.module_prefixes: if fileroot.startswith(pf): filepath = os.path.join(root, basename) if filepath not in seen_paths: logger.debug('Module prefix path: {}'.format(filepath)) seen_paths.add(filepath) yield filepath # depends on [control=['if'], data=['filepath', 'seen_paths']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['pf']] # depends on [control=['for'], data=['basename']] # depends on [control=['for'], data=[]] # depends on [control=['try'], data=[]] except IOError as e: # we failed to find a suitable path logger.warning(e, exc_info=True) pass # depends on [control=['except'], data=['e']] # depends on [control=['for'], data=['basedir']]
def _mapColumn(self, index): """ Maps a column to its respective input index, keeping to the topology of the region. It takes the index of the column as an argument and determines what is the index of the flattened input vector that is to be the center of the column's potential pool. It distributes the columns over the inputs uniformly. The return value is an integer representing the index of the input bit. Examples of the expected output of this method: * If the topology is one dimensional, and the column index is 0, this method will return the input index 0. If the column index is 1, and there are 3 columns over 7 inputs, this method will return the input index 3. * If the topology is two dimensional, with column dimensions [3, 5] and input dimensions [7, 11], and the column index is 3, the method returns input index 8. Parameters: ---------------------------- :param index: The index identifying a column in the permanence, potential and connectivity matrices. :param wrapAround: A boolean value indicating that boundaries should be ignored. """ columnCoords = numpy.unravel_index(index, self._columnDimensions) columnCoords = numpy.array(columnCoords, dtype=realDType) ratios = columnCoords / self._columnDimensions inputCoords = self._inputDimensions * ratios inputCoords += 0.5 * self._inputDimensions / self._columnDimensions inputCoords = inputCoords.astype(int) inputIndex = numpy.ravel_multi_index(inputCoords, self._inputDimensions) return inputIndex
def function[_mapColumn, parameter[self, index]]: constant[ Maps a column to its respective input index, keeping to the topology of the region. It takes the index of the column as an argument and determines what is the index of the flattened input vector that is to be the center of the column's potential pool. It distributes the columns over the inputs uniformly. The return value is an integer representing the index of the input bit. Examples of the expected output of this method: * If the topology is one dimensional, and the column index is 0, this method will return the input index 0. If the column index is 1, and there are 3 columns over 7 inputs, this method will return the input index 3. * If the topology is two dimensional, with column dimensions [3, 5] and input dimensions [7, 11], and the column index is 3, the method returns input index 8. Parameters: ---------------------------- :param index: The index identifying a column in the permanence, potential and connectivity matrices. :param wrapAround: A boolean value indicating that boundaries should be ignored. ] variable[columnCoords] assign[=] call[name[numpy].unravel_index, parameter[name[index], name[self]._columnDimensions]] variable[columnCoords] assign[=] call[name[numpy].array, parameter[name[columnCoords]]] variable[ratios] assign[=] binary_operation[name[columnCoords] / name[self]._columnDimensions] variable[inputCoords] assign[=] binary_operation[name[self]._inputDimensions * name[ratios]] <ast.AugAssign object at 0x7da18f09e6e0> variable[inputCoords] assign[=] call[name[inputCoords].astype, parameter[name[int]]] variable[inputIndex] assign[=] call[name[numpy].ravel_multi_index, parameter[name[inputCoords], name[self]._inputDimensions]] return[name[inputIndex]]
keyword[def] identifier[_mapColumn] ( identifier[self] , identifier[index] ): literal[string] identifier[columnCoords] = identifier[numpy] . identifier[unravel_index] ( identifier[index] , identifier[self] . identifier[_columnDimensions] ) identifier[columnCoords] = identifier[numpy] . identifier[array] ( identifier[columnCoords] , identifier[dtype] = identifier[realDType] ) identifier[ratios] = identifier[columnCoords] / identifier[self] . identifier[_columnDimensions] identifier[inputCoords] = identifier[self] . identifier[_inputDimensions] * identifier[ratios] identifier[inputCoords] += literal[int] * identifier[self] . identifier[_inputDimensions] / identifier[self] . identifier[_columnDimensions] identifier[inputCoords] = identifier[inputCoords] . identifier[astype] ( identifier[int] ) identifier[inputIndex] = identifier[numpy] . identifier[ravel_multi_index] ( identifier[inputCoords] , identifier[self] . identifier[_inputDimensions] ) keyword[return] identifier[inputIndex]
def _mapColumn(self, index): """ Maps a column to its respective input index, keeping to the topology of the region. It takes the index of the column as an argument and determines what is the index of the flattened input vector that is to be the center of the column's potential pool. It distributes the columns over the inputs uniformly. The return value is an integer representing the index of the input bit. Examples of the expected output of this method: * If the topology is one dimensional, and the column index is 0, this method will return the input index 0. If the column index is 1, and there are 3 columns over 7 inputs, this method will return the input index 3. * If the topology is two dimensional, with column dimensions [3, 5] and input dimensions [7, 11], and the column index is 3, the method returns input index 8. Parameters: ---------------------------- :param index: The index identifying a column in the permanence, potential and connectivity matrices. :param wrapAround: A boolean value indicating that boundaries should be ignored. """ columnCoords = numpy.unravel_index(index, self._columnDimensions) columnCoords = numpy.array(columnCoords, dtype=realDType) ratios = columnCoords / self._columnDimensions inputCoords = self._inputDimensions * ratios inputCoords += 0.5 * self._inputDimensions / self._columnDimensions inputCoords = inputCoords.astype(int) inputIndex = numpy.ravel_multi_index(inputCoords, self._inputDimensions) return inputIndex
def GenerateNetworkedConfigFile(load_hook, normal_class_load_hook, normal_class_dump_hook, **kwargs) -> NetworkedConfigObject: """ Generates a NetworkedConfigObject using the specified hooks. """ def NetworkedConfigObjectGenerator(url, safe_load: bool=True): cfg = NetworkedConfigObject(url=url, load_hook=load_hook, safe_load=safe_load, normal_class_load_hook=normal_class_load_hook, normal_class_dump_hook=normal_class_dump_hook) return cfg return NetworkedConfigObjectGenerator
def function[GenerateNetworkedConfigFile, parameter[load_hook, normal_class_load_hook, normal_class_dump_hook]]: constant[ Generates a NetworkedConfigObject using the specified hooks. ] def function[NetworkedConfigObjectGenerator, parameter[url, safe_load]]: variable[cfg] assign[=] call[name[NetworkedConfigObject], parameter[]] return[name[cfg]] return[name[NetworkedConfigObjectGenerator]]
keyword[def] identifier[GenerateNetworkedConfigFile] ( identifier[load_hook] , identifier[normal_class_load_hook] , identifier[normal_class_dump_hook] ,** identifier[kwargs] )-> identifier[NetworkedConfigObject] : literal[string] keyword[def] identifier[NetworkedConfigObjectGenerator] ( identifier[url] , identifier[safe_load] : identifier[bool] = keyword[True] ): identifier[cfg] = identifier[NetworkedConfigObject] ( identifier[url] = identifier[url] , identifier[load_hook] = identifier[load_hook] , identifier[safe_load] = identifier[safe_load] , identifier[normal_class_load_hook] = identifier[normal_class_load_hook] , identifier[normal_class_dump_hook] = identifier[normal_class_dump_hook] ) keyword[return] identifier[cfg] keyword[return] identifier[NetworkedConfigObjectGenerator]
def GenerateNetworkedConfigFile(load_hook, normal_class_load_hook, normal_class_dump_hook, **kwargs) -> NetworkedConfigObject: """ Generates a NetworkedConfigObject using the specified hooks. """ def NetworkedConfigObjectGenerator(url, safe_load: bool=True): cfg = NetworkedConfigObject(url=url, load_hook=load_hook, safe_load=safe_load, normal_class_load_hook=normal_class_load_hook, normal_class_dump_hook=normal_class_dump_hook) return cfg return NetworkedConfigObjectGenerator
def get_returns_cached(filepath, update_func, latest_dt, **kwargs): """ Get returns from a cached file if the cache is recent enough, otherwise, try to retrieve via a provided update function and update the cache file. Parameters ---------- filepath : str Path to cached csv file update_func : function Function to call in case cache is not up-to-date. latest_dt : pd.Timestamp (tz=UTC) Latest datetime required in csv file. **kwargs : Keyword arguments Optional keyword arguments will be passed to update_func() Returns ------- pandas.DataFrame DataFrame containing returns """ update_cache = False try: mtime = getmtime(filepath) except OSError as e: if e.errno != errno.ENOENT: raise update_cache = True else: file_dt = pd.Timestamp(mtime, unit='s') if latest_dt.tzinfo: file_dt = file_dt.tz_localize('utc') if file_dt < latest_dt: update_cache = True else: returns = pd.read_csv(filepath, index_col=0, parse_dates=True) returns.index = returns.index.tz_localize("UTC") if update_cache: returns = update_func(**kwargs) try: ensure_directory(cache_dir()) except OSError as e: warnings.warn( 'could not update cache: {}. {}: {}'.format( filepath, type(e).__name__, e, ), UserWarning, ) try: returns.to_csv(filepath) except OSError as e: warnings.warn( 'could not update cache {}. {}: {}'.format( filepath, type(e).__name__, e, ), UserWarning, ) return returns
def function[get_returns_cached, parameter[filepath, update_func, latest_dt]]: constant[ Get returns from a cached file if the cache is recent enough, otherwise, try to retrieve via a provided update function and update the cache file. Parameters ---------- filepath : str Path to cached csv file update_func : function Function to call in case cache is not up-to-date. latest_dt : pd.Timestamp (tz=UTC) Latest datetime required in csv file. **kwargs : Keyword arguments Optional keyword arguments will be passed to update_func() Returns ------- pandas.DataFrame DataFrame containing returns ] variable[update_cache] assign[=] constant[False] <ast.Try object at 0x7da1b18465c0> if name[update_cache] begin[:] variable[returns] assign[=] call[name[update_func], parameter[]] <ast.Try object at 0x7da1b1846440> <ast.Try object at 0x7da1b1846f20> return[name[returns]]
keyword[def] identifier[get_returns_cached] ( identifier[filepath] , identifier[update_func] , identifier[latest_dt] ,** identifier[kwargs] ): literal[string] identifier[update_cache] = keyword[False] keyword[try] : identifier[mtime] = identifier[getmtime] ( identifier[filepath] ) keyword[except] identifier[OSError] keyword[as] identifier[e] : keyword[if] identifier[e] . identifier[errno] != identifier[errno] . identifier[ENOENT] : keyword[raise] identifier[update_cache] = keyword[True] keyword[else] : identifier[file_dt] = identifier[pd] . identifier[Timestamp] ( identifier[mtime] , identifier[unit] = literal[string] ) keyword[if] identifier[latest_dt] . identifier[tzinfo] : identifier[file_dt] = identifier[file_dt] . identifier[tz_localize] ( literal[string] ) keyword[if] identifier[file_dt] < identifier[latest_dt] : identifier[update_cache] = keyword[True] keyword[else] : identifier[returns] = identifier[pd] . identifier[read_csv] ( identifier[filepath] , identifier[index_col] = literal[int] , identifier[parse_dates] = keyword[True] ) identifier[returns] . identifier[index] = identifier[returns] . identifier[index] . identifier[tz_localize] ( literal[string] ) keyword[if] identifier[update_cache] : identifier[returns] = identifier[update_func] (** identifier[kwargs] ) keyword[try] : identifier[ensure_directory] ( identifier[cache_dir] ()) keyword[except] identifier[OSError] keyword[as] identifier[e] : identifier[warnings] . identifier[warn] ( literal[string] . identifier[format] ( identifier[filepath] , identifier[type] ( identifier[e] ). identifier[__name__] , identifier[e] , ), identifier[UserWarning] , ) keyword[try] : identifier[returns] . identifier[to_csv] ( identifier[filepath] ) keyword[except] identifier[OSError] keyword[as] identifier[e] : identifier[warnings] . identifier[warn] ( literal[string] . identifier[format] ( identifier[filepath] , identifier[type] ( identifier[e] ). identifier[__name__] , identifier[e] , ), identifier[UserWarning] , ) keyword[return] identifier[returns]
def get_returns_cached(filepath, update_func, latest_dt, **kwargs): """ Get returns from a cached file if the cache is recent enough, otherwise, try to retrieve via a provided update function and update the cache file. Parameters ---------- filepath : str Path to cached csv file update_func : function Function to call in case cache is not up-to-date. latest_dt : pd.Timestamp (tz=UTC) Latest datetime required in csv file. **kwargs : Keyword arguments Optional keyword arguments will be passed to update_func() Returns ------- pandas.DataFrame DataFrame containing returns """ update_cache = False try: mtime = getmtime(filepath) # depends on [control=['try'], data=[]] except OSError as e: if e.errno != errno.ENOENT: raise # depends on [control=['if'], data=[]] update_cache = True # depends on [control=['except'], data=['e']] else: file_dt = pd.Timestamp(mtime, unit='s') if latest_dt.tzinfo: file_dt = file_dt.tz_localize('utc') # depends on [control=['if'], data=[]] if file_dt < latest_dt: update_cache = True # depends on [control=['if'], data=[]] else: returns = pd.read_csv(filepath, index_col=0, parse_dates=True) returns.index = returns.index.tz_localize('UTC') if update_cache: returns = update_func(**kwargs) try: ensure_directory(cache_dir()) # depends on [control=['try'], data=[]] except OSError as e: warnings.warn('could not update cache: {}. {}: {}'.format(filepath, type(e).__name__, e), UserWarning) # depends on [control=['except'], data=['e']] try: returns.to_csv(filepath) # depends on [control=['try'], data=[]] except OSError as e: warnings.warn('could not update cache {}. {}: {}'.format(filepath, type(e).__name__, e), UserWarning) # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]] return returns
async def populate(self, agent_cls, *args, **kwargs): '''Populate all the slave grid environments with agents. Assumes that no agents have been spawned yet to the slave environment grids. This excludes the slave environment managers as they are not in the grids.) ''' n = self.gs[0] * self.gs[1] tasks = [] for addr in self.addrs: task = asyncio.ensure_future(self._populate_slave(addr, agent_cls, n, *args, **kwargs)) tasks.append(task) rets = await asyncio.gather(*tasks) return rets
<ast.AsyncFunctionDef object at 0x7da18f812410>
keyword[async] keyword[def] identifier[populate] ( identifier[self] , identifier[agent_cls] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[n] = identifier[self] . identifier[gs] [ literal[int] ]* identifier[self] . identifier[gs] [ literal[int] ] identifier[tasks] =[] keyword[for] identifier[addr] keyword[in] identifier[self] . identifier[addrs] : identifier[task] = identifier[asyncio] . identifier[ensure_future] ( identifier[self] . identifier[_populate_slave] ( identifier[addr] , identifier[agent_cls] , identifier[n] ,* identifier[args] , ** identifier[kwargs] )) identifier[tasks] . identifier[append] ( identifier[task] ) identifier[rets] = keyword[await] identifier[asyncio] . identifier[gather] (* identifier[tasks] ) keyword[return] identifier[rets]
async def populate(self, agent_cls, *args, **kwargs): """Populate all the slave grid environments with agents. Assumes that no agents have been spawned yet to the slave environment grids. This excludes the slave environment managers as they are not in the grids.) """ n = self.gs[0] * self.gs[1] tasks = [] for addr in self.addrs: task = asyncio.ensure_future(self._populate_slave(addr, agent_cls, n, *args, **kwargs)) tasks.append(task) # depends on [control=['for'], data=['addr']] rets = await asyncio.gather(*tasks) return rets
def get_token(code, token_service, client_id, client_secret, redirect_uri, grant_type): """Fetches an OAuth 2 token.""" data = { 'code': code, 'client_id': client_id, 'client_secret': client_secret, 'redirect_uri': redirect_uri, 'grant_type': grant_type, } # Get the default http client resp = requests.post(token_service, data, verify=False) return resp.json()
def function[get_token, parameter[code, token_service, client_id, client_secret, redirect_uri, grant_type]]: constant[Fetches an OAuth 2 token.] variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da207f99b10>, <ast.Constant object at 0x7da207f9ba60>, <ast.Constant object at 0x7da207f98c70>, <ast.Constant object at 0x7da207f9a560>, <ast.Constant object at 0x7da207f98f70>], [<ast.Name object at 0x7da207f98640>, <ast.Name object at 0x7da207f98040>, <ast.Name object at 0x7da20c6aa6e0>, <ast.Name object at 0x7da20c6ab7c0>, <ast.Name object at 0x7da20c6aa6b0>]] variable[resp] assign[=] call[name[requests].post, parameter[name[token_service], name[data]]] return[call[name[resp].json, parameter[]]]
keyword[def] identifier[get_token] ( identifier[code] , identifier[token_service] , identifier[client_id] , identifier[client_secret] , identifier[redirect_uri] , identifier[grant_type] ): literal[string] identifier[data] ={ literal[string] : identifier[code] , literal[string] : identifier[client_id] , literal[string] : identifier[client_secret] , literal[string] : identifier[redirect_uri] , literal[string] : identifier[grant_type] , } identifier[resp] = identifier[requests] . identifier[post] ( identifier[token_service] , identifier[data] , identifier[verify] = keyword[False] ) keyword[return] identifier[resp] . identifier[json] ()
def get_token(code, token_service, client_id, client_secret, redirect_uri, grant_type): """Fetches an OAuth 2 token.""" data = {'code': code, 'client_id': client_id, 'client_secret': client_secret, 'redirect_uri': redirect_uri, 'grant_type': grant_type} # Get the default http client resp = requests.post(token_service, data, verify=False) return resp.json()
def priority(cls, url): """ Returns LOW priority if the URL is not prefixed with hls:// but ends with .m3u8 and return NORMAL priority if the URL is prefixed. :param url: the URL to find the plugin priority for :return: plugin priority for the given URL """ m = cls._url_re.match(url) if m: prefix, url = cls._url_re.match(url).groups() url_path = urlparse(url).path if prefix is None and url_path.endswith(".m3u8"): return LOW_PRIORITY elif prefix is not None: return NORMAL_PRIORITY return NO_PRIORITY
def function[priority, parameter[cls, url]]: constant[ Returns LOW priority if the URL is not prefixed with hls:// but ends with .m3u8 and return NORMAL priority if the URL is prefixed. :param url: the URL to find the plugin priority for :return: plugin priority for the given URL ] variable[m] assign[=] call[name[cls]._url_re.match, parameter[name[url]]] if name[m] begin[:] <ast.Tuple object at 0x7da207f9b640> assign[=] call[call[name[cls]._url_re.match, parameter[name[url]]].groups, parameter[]] variable[url_path] assign[=] call[name[urlparse], parameter[name[url]]].path if <ast.BoolOp object at 0x7da207f9bfa0> begin[:] return[name[LOW_PRIORITY]] return[name[NO_PRIORITY]]
keyword[def] identifier[priority] ( identifier[cls] , identifier[url] ): literal[string] identifier[m] = identifier[cls] . identifier[_url_re] . identifier[match] ( identifier[url] ) keyword[if] identifier[m] : identifier[prefix] , identifier[url] = identifier[cls] . identifier[_url_re] . identifier[match] ( identifier[url] ). identifier[groups] () identifier[url_path] = identifier[urlparse] ( identifier[url] ). identifier[path] keyword[if] identifier[prefix] keyword[is] keyword[None] keyword[and] identifier[url_path] . identifier[endswith] ( literal[string] ): keyword[return] identifier[LOW_PRIORITY] keyword[elif] identifier[prefix] keyword[is] keyword[not] keyword[None] : keyword[return] identifier[NORMAL_PRIORITY] keyword[return] identifier[NO_PRIORITY]
def priority(cls, url): """ Returns LOW priority if the URL is not prefixed with hls:// but ends with .m3u8 and return NORMAL priority if the URL is prefixed. :param url: the URL to find the plugin priority for :return: plugin priority for the given URL """ m = cls._url_re.match(url) if m: (prefix, url) = cls._url_re.match(url).groups() url_path = urlparse(url).path if prefix is None and url_path.endswith('.m3u8'): return LOW_PRIORITY # depends on [control=['if'], data=[]] elif prefix is not None: return NORMAL_PRIORITY # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return NO_PRIORITY
def mac_address_table_static_vlan(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") mac_address_table = ET.SubElement(config, "mac-address-table", xmlns="urn:brocade.com:mgmt:brocade-mac-address-table") static = ET.SubElement(mac_address_table, "static") mac_address_key = ET.SubElement(static, "mac-address") mac_address_key.text = kwargs.pop('mac_address') forward_key = ET.SubElement(static, "forward") forward_key.text = kwargs.pop('forward') interface_type_key = ET.SubElement(static, "interface-type") interface_type_key.text = kwargs.pop('interface_type') interface_name_key = ET.SubElement(static, "interface-name") interface_name_key.text = kwargs.pop('interface_name') vlanid_key = ET.SubElement(static, "vlanid") vlanid_key.text = kwargs.pop('vlanid') vlan = ET.SubElement(static, "vlan") vlan.text = kwargs.pop('vlan') callback = kwargs.pop('callback', self._callback) return callback(config)
def function[mac_address_table_static_vlan, parameter[self]]: constant[Auto Generated Code ] variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]] variable[mac_address_table] assign[=] call[name[ET].SubElement, parameter[name[config], constant[mac-address-table]]] variable[static] assign[=] call[name[ET].SubElement, parameter[name[mac_address_table], constant[static]]] variable[mac_address_key] assign[=] call[name[ET].SubElement, parameter[name[static], constant[mac-address]]] name[mac_address_key].text assign[=] call[name[kwargs].pop, parameter[constant[mac_address]]] variable[forward_key] assign[=] call[name[ET].SubElement, parameter[name[static], constant[forward]]] name[forward_key].text assign[=] call[name[kwargs].pop, parameter[constant[forward]]] variable[interface_type_key] assign[=] call[name[ET].SubElement, parameter[name[static], constant[interface-type]]] name[interface_type_key].text assign[=] call[name[kwargs].pop, parameter[constant[interface_type]]] variable[interface_name_key] assign[=] call[name[ET].SubElement, parameter[name[static], constant[interface-name]]] name[interface_name_key].text assign[=] call[name[kwargs].pop, parameter[constant[interface_name]]] variable[vlanid_key] assign[=] call[name[ET].SubElement, parameter[name[static], constant[vlanid]]] name[vlanid_key].text assign[=] call[name[kwargs].pop, parameter[constant[vlanid]]] variable[vlan] assign[=] call[name[ET].SubElement, parameter[name[static], constant[vlan]]] name[vlan].text assign[=] call[name[kwargs].pop, parameter[constant[vlan]]] variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]] return[call[name[callback], parameter[name[config]]]]
keyword[def] identifier[mac_address_table_static_vlan] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[config] = identifier[ET] . identifier[Element] ( literal[string] ) identifier[mac_address_table] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] ) identifier[static] = identifier[ET] . identifier[SubElement] ( identifier[mac_address_table] , literal[string] ) identifier[mac_address_key] = identifier[ET] . identifier[SubElement] ( identifier[static] , literal[string] ) identifier[mac_address_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[forward_key] = identifier[ET] . identifier[SubElement] ( identifier[static] , literal[string] ) identifier[forward_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[interface_type_key] = identifier[ET] . identifier[SubElement] ( identifier[static] , literal[string] ) identifier[interface_type_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[interface_name_key] = identifier[ET] . identifier[SubElement] ( identifier[static] , literal[string] ) identifier[interface_name_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[vlanid_key] = identifier[ET] . identifier[SubElement] ( identifier[static] , literal[string] ) identifier[vlanid_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[vlan] = identifier[ET] . identifier[SubElement] ( identifier[static] , literal[string] ) identifier[vlan] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] ) keyword[return] identifier[callback] ( identifier[config] )
def mac_address_table_static_vlan(self, **kwargs): """Auto Generated Code """ config = ET.Element('config') mac_address_table = ET.SubElement(config, 'mac-address-table', xmlns='urn:brocade.com:mgmt:brocade-mac-address-table') static = ET.SubElement(mac_address_table, 'static') mac_address_key = ET.SubElement(static, 'mac-address') mac_address_key.text = kwargs.pop('mac_address') forward_key = ET.SubElement(static, 'forward') forward_key.text = kwargs.pop('forward') interface_type_key = ET.SubElement(static, 'interface-type') interface_type_key.text = kwargs.pop('interface_type') interface_name_key = ET.SubElement(static, 'interface-name') interface_name_key.text = kwargs.pop('interface_name') vlanid_key = ET.SubElement(static, 'vlanid') vlanid_key.text = kwargs.pop('vlanid') vlan = ET.SubElement(static, 'vlan') vlan.text = kwargs.pop('vlan') callback = kwargs.pop('callback', self._callback) return callback(config)
def cli_plugin_add_argument(*args, **kwargs): """ Decorator generator that adds an argument to the cli plugin based on the decorated function Args: *args: Any args to be passed to :func:`argparse.ArgumentParser.add_argument` *kwargs: Any keyword args to be passed to :func:`argparse.ArgumentParser.add_argument` Returns: function: Decorator that builds or extends the cliplugin for the decorated function, adding the given argument definition Examples: >>> @cli_plugin_add_argument('-m', '--mogambo', action='store_true') ... def test(**kwargs): ... print 'test' ... >>> print test.__class__ <class 'cli.CLIPluginFuncWrapper'> >>> print test._parser_args [(('-m', '--mogambo'), {'action': 'store_true'})] >>> @cli_plugin_add_argument('-m', '--mogambo', action='store_true') ... @cli_plugin_add_argument('-b', '--bogabmo', action='store_false') ... @cli_plugin ... def test(**kwargs): ... print 'test' ... >>> print test.__class__ <class 'cli.CLIPluginFuncWrapper'> >>> print test._parser_args # doctest: +NORMALIZE_WHITESPACE [(('-b', '--bogabmo'), {'action': 'store_false'}), (('-m', '--mogambo'), {'action': 'store_true'})] """ def decorator(func): if not isinstance(func, CLIPluginFuncWrapper): func = CLIPluginFuncWrapper(do_run=func) func.add_argument(*args, **kwargs) return func return decorator
def function[cli_plugin_add_argument, parameter[]]: constant[ Decorator generator that adds an argument to the cli plugin based on the decorated function Args: *args: Any args to be passed to :func:`argparse.ArgumentParser.add_argument` *kwargs: Any keyword args to be passed to :func:`argparse.ArgumentParser.add_argument` Returns: function: Decorator that builds or extends the cliplugin for the decorated function, adding the given argument definition Examples: >>> @cli_plugin_add_argument('-m', '--mogambo', action='store_true') ... def test(**kwargs): ... print 'test' ... >>> print test.__class__ <class 'cli.CLIPluginFuncWrapper'> >>> print test._parser_args [(('-m', '--mogambo'), {'action': 'store_true'})] >>> @cli_plugin_add_argument('-m', '--mogambo', action='store_true') ... @cli_plugin_add_argument('-b', '--bogabmo', action='store_false') ... @cli_plugin ... def test(**kwargs): ... print 'test' ... >>> print test.__class__ <class 'cli.CLIPluginFuncWrapper'> >>> print test._parser_args # doctest: +NORMALIZE_WHITESPACE [(('-b', '--bogabmo'), {'action': 'store_false'}), (('-m', '--mogambo'), {'action': 'store_true'})] ] def function[decorator, parameter[func]]: if <ast.UnaryOp object at 0x7da2045679d0> begin[:] variable[func] assign[=] call[name[CLIPluginFuncWrapper], parameter[]] call[name[func].add_argument, parameter[<ast.Starred object at 0x7da204567a30>]] return[name[func]] return[name[decorator]]
keyword[def] identifier[cli_plugin_add_argument] (* identifier[args] ,** identifier[kwargs] ): literal[string] keyword[def] identifier[decorator] ( identifier[func] ): keyword[if] keyword[not] identifier[isinstance] ( identifier[func] , identifier[CLIPluginFuncWrapper] ): identifier[func] = identifier[CLIPluginFuncWrapper] ( identifier[do_run] = identifier[func] ) identifier[func] . identifier[add_argument] (* identifier[args] ,** identifier[kwargs] ) keyword[return] identifier[func] keyword[return] identifier[decorator]
def cli_plugin_add_argument(*args, **kwargs): """ Decorator generator that adds an argument to the cli plugin based on the decorated function Args: *args: Any args to be passed to :func:`argparse.ArgumentParser.add_argument` *kwargs: Any keyword args to be passed to :func:`argparse.ArgumentParser.add_argument` Returns: function: Decorator that builds or extends the cliplugin for the decorated function, adding the given argument definition Examples: >>> @cli_plugin_add_argument('-m', '--mogambo', action='store_true') ... def test(**kwargs): ... print 'test' ... >>> print test.__class__ <class 'cli.CLIPluginFuncWrapper'> >>> print test._parser_args [(('-m', '--mogambo'), {'action': 'store_true'})] >>> @cli_plugin_add_argument('-m', '--mogambo', action='store_true') ... @cli_plugin_add_argument('-b', '--bogabmo', action='store_false') ... @cli_plugin ... def test(**kwargs): ... print 'test' ... >>> print test.__class__ <class 'cli.CLIPluginFuncWrapper'> >>> print test._parser_args # doctest: +NORMALIZE_WHITESPACE [(('-b', '--bogabmo'), {'action': 'store_false'}), (('-m', '--mogambo'), {'action': 'store_true'})] """ def decorator(func): if not isinstance(func, CLIPluginFuncWrapper): func = CLIPluginFuncWrapper(do_run=func) # depends on [control=['if'], data=[]] func.add_argument(*args, **kwargs) return func return decorator
def get_operator_statistic(self, name): """|coro| Gets the operator unique statistic from the operator definitions dict Returns ------- str the name of the operator unique statistic""" opdefs = yield from self.get_operator_definitions() name = name.lower() if name not in opdefs: return None # some operators (e.g. Kaid and Nomad) don't have a unique statistic sectoin for some reason... if "uniqueStatistic" not in opdefs[name] or "pvp" not in opdefs[name]["uniqueStatistic"]: return None return opdefs[name]["uniqueStatistic"]["pvp"]["statisticId"]
def function[get_operator_statistic, parameter[self, name]]: constant[|coro| Gets the operator unique statistic from the operator definitions dict Returns ------- str the name of the operator unique statistic] variable[opdefs] assign[=] <ast.YieldFrom object at 0x7da1b13557e0> variable[name] assign[=] call[name[name].lower, parameter[]] if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[opdefs]] begin[:] return[constant[None]] if <ast.BoolOp object at 0x7da1b13568c0> begin[:] return[constant[None]] return[call[call[call[call[name[opdefs]][name[name]]][constant[uniqueStatistic]]][constant[pvp]]][constant[statisticId]]]
keyword[def] identifier[get_operator_statistic] ( identifier[self] , identifier[name] ): literal[string] identifier[opdefs] = keyword[yield] keyword[from] identifier[self] . identifier[get_operator_definitions] () identifier[name] = identifier[name] . identifier[lower] () keyword[if] identifier[name] keyword[not] keyword[in] identifier[opdefs] : keyword[return] keyword[None] keyword[if] literal[string] keyword[not] keyword[in] identifier[opdefs] [ identifier[name] ] keyword[or] literal[string] keyword[not] keyword[in] identifier[opdefs] [ identifier[name] ][ literal[string] ]: keyword[return] keyword[None] keyword[return] identifier[opdefs] [ identifier[name] ][ literal[string] ][ literal[string] ][ literal[string] ]
def get_operator_statistic(self, name): """|coro| Gets the operator unique statistic from the operator definitions dict Returns ------- str the name of the operator unique statistic""" opdefs = (yield from self.get_operator_definitions()) name = name.lower() if name not in opdefs: return None # depends on [control=['if'], data=[]] # some operators (e.g. Kaid and Nomad) don't have a unique statistic sectoin for some reason... if 'uniqueStatistic' not in opdefs[name] or 'pvp' not in opdefs[name]['uniqueStatistic']: return None # depends on [control=['if'], data=[]] return opdefs[name]['uniqueStatistic']['pvp']['statisticId']
def device_log_list(self, **kwargs): # noqa: E501 """DEPRECATED: List all device events. # noqa: E501 DEPRECATED: List all device events. Use `/v3/device-events/` instead. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.device_log_list(asynchronous=True) >>> result = thread.get() :param asynchronous bool :param int limit: How many objects to retrieve in the page. :param str order: The order of the records based on creation time, `ASC` or `DESC`; by default `ASC`. :param str after: The ID of The item after which to retrieve the next page. :param str include: Comma-separated list of data fields to return. Currently supported: `total_count`. :param str filter: URL encoded query string parameter to filter returned data. ##### Filtering ```?filter={URL encoded query string}``` The query string is made up of key/value pairs separated by ampersands. So for a query of ```key1=value1&key2=value2&key3=value3``` this would be encoded as follows: ```?filter=key1%3Dvalue1%26key2%3Dvalue2%26key3%3Dvalue3``` ###### Filterable fields: The below table lists all the fields that can be filtered on with certain filters: <table> <thead> <tr> <th>Field</th> <th>= / __eq / __neq</th> <th>__in / __nin</th> <th>__lte / __gte</th> <tr> <thead> <tbody> <tr> <td>date_time</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> <tr> <td>description</td> <td>✓</td> <td>✓</td> <td>&nbsp;</td> </tr> <tr> <td>id</td> <td>✓</td> <td>✓</td> <td>&nbsp;</td> </tr> <tr> <td>device_id</td> <td>✓</td> <td>✓</td> <td>&nbsp;</td> </tr> <tr> <td>event_type</td> <td>✓</td> <td>✓</td> <td>&nbsp;</td> </tr> <tr> <td>state_change</td> <td>✓</td> <td>✓</td> <td>&nbsp;</td> </tr> </tbody> </table> &nbsp; The examples below show the queries in *unencoded* form. ###### By id: ```id={id}``` ###### By state change: ```state_change=[True|False]``` ###### By event type: ```event_type={value}``` ###### On date-time fields: Date-time fields should be specified in UTC RFC3339 format ```YYYY-MM-DDThh:mm:ss.msZ```. There are three permitted variations: * UTC RFC3339 with milliseconds e.g. 2016-11-30T16:25:12.1234Z * UTC RFC3339 without milliseconds e.g. 2016-11-30T16:25:12Z * UTC RFC3339 shortened - without milliseconds and punctuation e.g. 20161130T162512Z Date-time filtering supports three operators: * equality * greater than or equal to &ndash; field name suffixed with ```__gte``` * less than or equal to &ndash; field name suffixed with ```__lte``` Lower and upper limits to a date-time range may be specified by including both the ```__gte``` and ```__lte``` forms in the filter. ```{field name}[|__lte|__gte]={UTC RFC3339 date-time}``` ##### Multi-field example ```id=0158d38771f70000000000010010038c&state_change=True&date_time__gte=2016-11-30T16:25:12.1234Z``` Encoded: ```?filter=id%3D0158d38771f70000000000010010038c%26state_change%3DTrue%26date_time__gte%3D2016-11-30T16%3A25%3A12.1234Z``` ##### Filtering with filter operators String field filtering supports the following operators: * equality: `__eq` * non-equality: `__neq` * in : `__in` * not in: `__nin` For `__in` and `__nin` filters list of parameters must be comma-separated: `event_type__in=update.device.device-created,update.device.device-updated` :return: DeviceEventPage If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.device_log_list_with_http_info(**kwargs) # noqa: E501 else: (data) = self.device_log_list_with_http_info(**kwargs) # noqa: E501 return data
def function[device_log_list, parameter[self]]: constant[DEPRECATED: List all device events. # noqa: E501 DEPRECATED: List all device events. Use `/v3/device-events/` instead. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.device_log_list(asynchronous=True) >>> result = thread.get() :param asynchronous bool :param int limit: How many objects to retrieve in the page. :param str order: The order of the records based on creation time, `ASC` or `DESC`; by default `ASC`. :param str after: The ID of The item after which to retrieve the next page. :param str include: Comma-separated list of data fields to return. Currently supported: `total_count`. :param str filter: URL encoded query string parameter to filter returned data. ##### Filtering ```?filter={URL encoded query string}``` The query string is made up of key/value pairs separated by ampersands. So for a query of ```key1=value1&key2=value2&key3=value3``` this would be encoded as follows: ```?filter=key1%3Dvalue1%26key2%3Dvalue2%26key3%3Dvalue3``` ###### Filterable fields: The below table lists all the fields that can be filtered on with certain filters: <table> <thead> <tr> <th>Field</th> <th>= / __eq / __neq</th> <th>__in / __nin</th> <th>__lte / __gte</th> <tr> <thead> <tbody> <tr> <td>date_time</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> <tr> <td>description</td> <td>✓</td> <td>✓</td> <td>&nbsp;</td> </tr> <tr> <td>id</td> <td>✓</td> <td>✓</td> <td>&nbsp;</td> </tr> <tr> <td>device_id</td> <td>✓</td> <td>✓</td> <td>&nbsp;</td> </tr> <tr> <td>event_type</td> <td>✓</td> <td>✓</td> <td>&nbsp;</td> </tr> <tr> <td>state_change</td> <td>✓</td> <td>✓</td> <td>&nbsp;</td> </tr> </tbody> </table> &nbsp; The examples below show the queries in *unencoded* form. ###### By id: ```id={id}``` ###### By state change: ```state_change=[True|False]``` ###### By event type: ```event_type={value}``` ###### On date-time fields: Date-time fields should be specified in UTC RFC3339 format ```YYYY-MM-DDThh:mm:ss.msZ```. There are three permitted variations: * UTC RFC3339 with milliseconds e.g. 2016-11-30T16:25:12.1234Z * UTC RFC3339 without milliseconds e.g. 2016-11-30T16:25:12Z * UTC RFC3339 shortened - without milliseconds and punctuation e.g. 20161130T162512Z Date-time filtering supports three operators: * equality * greater than or equal to &ndash; field name suffixed with ```__gte``` * less than or equal to &ndash; field name suffixed with ```__lte``` Lower and upper limits to a date-time range may be specified by including both the ```__gte``` and ```__lte``` forms in the filter. ```{field name}[|__lte|__gte]={UTC RFC3339 date-time}``` ##### Multi-field example ```id=0158d38771f70000000000010010038c&state_change=True&date_time__gte=2016-11-30T16:25:12.1234Z``` Encoded: ```?filter=id%3D0158d38771f70000000000010010038c%26state_change%3DTrue%26date_time__gte%3D2016-11-30T16%3A25%3A12.1234Z``` ##### Filtering with filter operators String field filtering supports the following operators: * equality: `__eq` * non-equality: `__neq` * in : `__in` * not in: `__nin` For `__in` and `__nin` filters list of parameters must be comma-separated: `event_type__in=update.device.device-created,update.device.device-updated` :return: DeviceEventPage If the method is called asynchronously, returns the request thread. ] call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True] if call[name[kwargs].get, parameter[constant[asynchronous]]] begin[:] return[call[name[self].device_log_list_with_http_info, parameter[]]]
keyword[def] identifier[device_log_list] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[kwargs] [ literal[string] ]= keyword[True] keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ): keyword[return] identifier[self] . identifier[device_log_list_with_http_info] (** identifier[kwargs] ) keyword[else] : ( identifier[data] )= identifier[self] . identifier[device_log_list_with_http_info] (** identifier[kwargs] ) keyword[return] identifier[data]
def device_log_list(self, **kwargs): # noqa: E501 'DEPRECATED: List all device events. # noqa: E501\n\n DEPRECATED: List all device events. Use `/v3/device-events/` instead. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass asynchronous=True\n >>> thread = api.device_log_list(asynchronous=True)\n >>> result = thread.get()\n\n :param asynchronous bool\n :param int limit: How many objects to retrieve in the page.\n :param str order: The order of the records based on creation time, `ASC` or `DESC`; by default `ASC`.\n :param str after: The ID of The item after which to retrieve the next page.\n :param str include: Comma-separated list of data fields to return. Currently supported: `total_count`.\n :param str filter: URL encoded query string parameter to filter returned data. ##### Filtering ```?filter={URL encoded query string}``` The query string is made up of key/value pairs separated by ampersands. So for a query of ```key1=value1&key2=value2&key3=value3``` this would be encoded as follows: ```?filter=key1%3Dvalue1%26key2%3Dvalue2%26key3%3Dvalue3``` ###### Filterable fields: The below table lists all the fields that can be filtered on with certain filters: <table> <thead> <tr> <th>Field</th> <th>= / __eq / __neq</th> <th>__in / __nin</th> <th>__lte / __gte</th> <tr> <thead> <tbody> <tr> <td>date_time</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> <tr> <td>description</td> <td>✓</td> <td>✓</td> <td>&nbsp;</td> </tr> <tr> <td>id</td> <td>✓</td> <td>✓</td> <td>&nbsp;</td> </tr> <tr> <td>device_id</td> <td>✓</td> <td>✓</td> <td>&nbsp;</td> </tr> <tr> <td>event_type</td> <td>✓</td> <td>✓</td> <td>&nbsp;</td> </tr> <tr> <td>state_change</td> <td>✓</td> <td>✓</td> <td>&nbsp;</td> </tr> </tbody> </table> &nbsp; The examples below show the queries in *unencoded* form. ###### By id: ```id={id}``` ###### By state change: ```state_change=[True|False]``` ###### By event type: ```event_type={value}``` ###### On date-time fields: Date-time fields should be specified in UTC RFC3339 format ```YYYY-MM-DDThh:mm:ss.msZ```. There are three permitted variations: * UTC RFC3339 with milliseconds e.g. 2016-11-30T16:25:12.1234Z * UTC RFC3339 without milliseconds e.g. 2016-11-30T16:25:12Z * UTC RFC3339 shortened - without milliseconds and punctuation e.g. 20161130T162512Z Date-time filtering supports three operators: * equality * greater than or equal to &ndash; field name suffixed with ```__gte``` * less than or equal to &ndash; field name suffixed with ```__lte``` Lower and upper limits to a date-time range may be specified by including both the ```__gte``` and ```__lte``` forms in the filter. ```{field name}[|__lte|__gte]={UTC RFC3339 date-time}``` ##### Multi-field example ```id=0158d38771f70000000000010010038c&state_change=True&date_time__gte=2016-11-30T16:25:12.1234Z``` Encoded: ```?filter=id%3D0158d38771f70000000000010010038c%26state_change%3DTrue%26date_time__gte%3D2016-11-30T16%3A25%3A12.1234Z``` ##### Filtering with filter operators String field filtering supports the following operators: * equality: `__eq` * non-equality: `__neq` * in : `__in` * not in: `__nin` For `__in` and `__nin` filters list of parameters must be comma-separated: `event_type__in=update.device.device-created,update.device.device-updated`\n :return: DeviceEventPage\n If the method is called asynchronously,\n returns the request thread.\n ' kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.device_log_list_with_http_info(**kwargs) # noqa: E501 # depends on [control=['if'], data=[]] else: data = self.device_log_list_with_http_info(**kwargs) # noqa: E501 return data
def bind_unix_socket(path): """ Returns a unix file socket bound on (path). """ assert path bindsocket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) try: os.unlink(path) except OSError: if os.path.exists(path): raise try: bindsocket.bind(path) except socket.error: logger.error("Couldn't bind socket on %s", path) return None logger.info('Listening on %s', path) bindsocket.listen(0) return bindsocket
def function[bind_unix_socket, parameter[path]]: constant[ Returns a unix file socket bound on (path). ] assert[name[path]] variable[bindsocket] assign[=] call[name[socket].socket, parameter[name[socket].AF_UNIX, name[socket].SOCK_STREAM]] <ast.Try object at 0x7da204621f60> <ast.Try object at 0x7da1b0e33460> call[name[logger].info, parameter[constant[Listening on %s], name[path]]] call[name[bindsocket].listen, parameter[constant[0]]] return[name[bindsocket]]
keyword[def] identifier[bind_unix_socket] ( identifier[path] ): literal[string] keyword[assert] identifier[path] identifier[bindsocket] = identifier[socket] . identifier[socket] ( identifier[socket] . identifier[AF_UNIX] , identifier[socket] . identifier[SOCK_STREAM] ) keyword[try] : identifier[os] . identifier[unlink] ( identifier[path] ) keyword[except] identifier[OSError] : keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[path] ): keyword[raise] keyword[try] : identifier[bindsocket] . identifier[bind] ( identifier[path] ) keyword[except] identifier[socket] . identifier[error] : identifier[logger] . identifier[error] ( literal[string] , identifier[path] ) keyword[return] keyword[None] identifier[logger] . identifier[info] ( literal[string] , identifier[path] ) identifier[bindsocket] . identifier[listen] ( literal[int] ) keyword[return] identifier[bindsocket]
def bind_unix_socket(path): """ Returns a unix file socket bound on (path). """ assert path bindsocket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) try: os.unlink(path) # depends on [control=['try'], data=[]] except OSError: if os.path.exists(path): raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]] try: bindsocket.bind(path) # depends on [control=['try'], data=[]] except socket.error: logger.error("Couldn't bind socket on %s", path) return None # depends on [control=['except'], data=[]] logger.info('Listening on %s', path) bindsocket.listen(0) return bindsocket
def json_decode(s: str) -> Any: """ Decodes an object from JSON using our custom decoder. """ try: return json.JSONDecoder(object_hook=json_class_decoder_hook).decode(s) except json.JSONDecodeError: log.warning("Failed to decode JSON (returning None): {!r}", s) return None
def function[json_decode, parameter[s]]: constant[ Decodes an object from JSON using our custom decoder. ] <ast.Try object at 0x7da1b185d300>
keyword[def] identifier[json_decode] ( identifier[s] : identifier[str] )-> identifier[Any] : literal[string] keyword[try] : keyword[return] identifier[json] . identifier[JSONDecoder] ( identifier[object_hook] = identifier[json_class_decoder_hook] ). identifier[decode] ( identifier[s] ) keyword[except] identifier[json] . identifier[JSONDecodeError] : identifier[log] . identifier[warning] ( literal[string] , identifier[s] ) keyword[return] keyword[None]
def json_decode(s: str) -> Any: """ Decodes an object from JSON using our custom decoder. """ try: return json.JSONDecoder(object_hook=json_class_decoder_hook).decode(s) # depends on [control=['try'], data=[]] except json.JSONDecodeError: log.warning('Failed to decode JSON (returning None): {!r}', s) return None # depends on [control=['except'], data=[]]
def calculate_size(name, new_value): """ Calculates the request payload size""" data_size = 0 data_size += calculate_size_str(name) data_size += BOOLEAN_SIZE_IN_BYTES if new_value is not None: data_size += calculate_size_data(new_value) return data_size
def function[calculate_size, parameter[name, new_value]]: constant[ Calculates the request payload size] variable[data_size] assign[=] constant[0] <ast.AugAssign object at 0x7da1b1722f20> <ast.AugAssign object at 0x7da1b1722440> if compare[name[new_value] is_not constant[None]] begin[:] <ast.AugAssign object at 0x7da1b2347e80> return[name[data_size]]
keyword[def] identifier[calculate_size] ( identifier[name] , identifier[new_value] ): literal[string] identifier[data_size] = literal[int] identifier[data_size] += identifier[calculate_size_str] ( identifier[name] ) identifier[data_size] += identifier[BOOLEAN_SIZE_IN_BYTES] keyword[if] identifier[new_value] keyword[is] keyword[not] keyword[None] : identifier[data_size] += identifier[calculate_size_data] ( identifier[new_value] ) keyword[return] identifier[data_size]
def calculate_size(name, new_value): """ Calculates the request payload size""" data_size = 0 data_size += calculate_size_str(name) data_size += BOOLEAN_SIZE_IN_BYTES if new_value is not None: data_size += calculate_size_data(new_value) # depends on [control=['if'], data=['new_value']] return data_size
def _extract_body(self): """ Extract the body content from HTML. """ def is_descendant_node(parent, node): node = node.getparent() while node is not None: if node == parent: return True node = node.getparent() return False for pattern in self.config.body: items = self.parsed_tree.xpath(pattern) if len(items) == 1: if self.config.prune: self.body = Document(etree.tostring(items[0])).summary() else: self.body = etree.tostring(items[0]) # We've got a body now. break else: appended_something = False body = etree.Element("root") for item in items: if item.getparent() is None: continue is_descendant = False for parent in body: if (is_descendant_node(parent, item)): is_descendant = True break if not is_descendant: if self.config.prune: # Clean with readability. Needs # to-string conversion first. pruned_string = Document( etree.tostring(item)).summary() # Re-parse the readability string # output and include it in our body. new_tree = etree.parse( StringIO(pruned_string), self.parser) failed = False try: body.append( new_tree.xpath('//html/body/div/div')[0] ) except IndexError: if 'id="readabilityBody"' in pruned_string: try: body.append( new_tree.xpath('//body') ) except: failed = True else: failed = True if failed: LOGGER.error(u'Pruning item failed:' u'\n\n%s\n\nWe got: “%s” ' u'and skipped it.', etree.tostring( item).replace(u'\n', u''), pruned_string.replace(u'\n', u''), extra={'siteconfig': self.config.host}) pass else: body.append(item) appended_something = True if appended_something: self.body = etree.tostring(body) # We've got a body now. break
def function[_extract_body, parameter[self]]: constant[ Extract the body content from HTML. ] def function[is_descendant_node, parameter[parent, node]]: variable[node] assign[=] call[name[node].getparent, parameter[]] while compare[name[node] is_not constant[None]] begin[:] if compare[name[node] equal[==] name[parent]] begin[:] return[constant[True]] variable[node] assign[=] call[name[node].getparent, parameter[]] return[constant[False]] for taget[name[pattern]] in starred[name[self].config.body] begin[:] variable[items] assign[=] call[name[self].parsed_tree.xpath, parameter[name[pattern]]] if compare[call[name[len], parameter[name[items]]] equal[==] constant[1]] begin[:] if name[self].config.prune begin[:] name[self].body assign[=] call[call[name[Document], parameter[call[name[etree].tostring, parameter[call[name[items]][constant[0]]]]]].summary, parameter[]] break
keyword[def] identifier[_extract_body] ( identifier[self] ): literal[string] keyword[def] identifier[is_descendant_node] ( identifier[parent] , identifier[node] ): identifier[node] = identifier[node] . identifier[getparent] () keyword[while] identifier[node] keyword[is] keyword[not] keyword[None] : keyword[if] identifier[node] == identifier[parent] : keyword[return] keyword[True] identifier[node] = identifier[node] . identifier[getparent] () keyword[return] keyword[False] keyword[for] identifier[pattern] keyword[in] identifier[self] . identifier[config] . identifier[body] : identifier[items] = identifier[self] . identifier[parsed_tree] . identifier[xpath] ( identifier[pattern] ) keyword[if] identifier[len] ( identifier[items] )== literal[int] : keyword[if] identifier[self] . identifier[config] . identifier[prune] : identifier[self] . identifier[body] = identifier[Document] ( identifier[etree] . identifier[tostring] ( identifier[items] [ literal[int] ])). identifier[summary] () keyword[else] : identifier[self] . identifier[body] = identifier[etree] . identifier[tostring] ( identifier[items] [ literal[int] ]) keyword[break] keyword[else] : identifier[appended_something] = keyword[False] identifier[body] = identifier[etree] . identifier[Element] ( literal[string] ) keyword[for] identifier[item] keyword[in] identifier[items] : keyword[if] identifier[item] . identifier[getparent] () keyword[is] keyword[None] : keyword[continue] identifier[is_descendant] = keyword[False] keyword[for] identifier[parent] keyword[in] identifier[body] : keyword[if] ( identifier[is_descendant_node] ( identifier[parent] , identifier[item] )): identifier[is_descendant] = keyword[True] keyword[break] keyword[if] keyword[not] identifier[is_descendant] : keyword[if] identifier[self] . identifier[config] . identifier[prune] : identifier[pruned_string] = identifier[Document] ( identifier[etree] . identifier[tostring] ( identifier[item] )). identifier[summary] () identifier[new_tree] = identifier[etree] . identifier[parse] ( identifier[StringIO] ( identifier[pruned_string] ), identifier[self] . identifier[parser] ) identifier[failed] = keyword[False] keyword[try] : identifier[body] . identifier[append] ( identifier[new_tree] . identifier[xpath] ( literal[string] )[ literal[int] ] ) keyword[except] identifier[IndexError] : keyword[if] literal[string] keyword[in] identifier[pruned_string] : keyword[try] : identifier[body] . identifier[append] ( identifier[new_tree] . identifier[xpath] ( literal[string] ) ) keyword[except] : identifier[failed] = keyword[True] keyword[else] : identifier[failed] = keyword[True] keyword[if] identifier[failed] : identifier[LOGGER] . identifier[error] ( literal[string] literal[string] literal[string] , identifier[etree] . identifier[tostring] ( identifier[item] ). identifier[replace] ( literal[string] , literal[string] ), identifier[pruned_string] . identifier[replace] ( literal[string] , literal[string] ), identifier[extra] ={ literal[string] : identifier[self] . identifier[config] . identifier[host] }) keyword[pass] keyword[else] : identifier[body] . identifier[append] ( identifier[item] ) identifier[appended_something] = keyword[True] keyword[if] identifier[appended_something] : identifier[self] . identifier[body] = identifier[etree] . identifier[tostring] ( identifier[body] ) keyword[break]
def _extract_body(self): """ Extract the body content from HTML. """ def is_descendant_node(parent, node): node = node.getparent() while node is not None: if node == parent: return True # depends on [control=['if'], data=[]] node = node.getparent() # depends on [control=['while'], data=['node']] return False for pattern in self.config.body: items = self.parsed_tree.xpath(pattern) if len(items) == 1: if self.config.prune: self.body = Document(etree.tostring(items[0])).summary() # depends on [control=['if'], data=[]] else: self.body = etree.tostring(items[0]) # We've got a body now. break # depends on [control=['if'], data=[]] else: appended_something = False body = etree.Element('root') for item in items: if item.getparent() is None: continue # depends on [control=['if'], data=[]] is_descendant = False for parent in body: if is_descendant_node(parent, item): is_descendant = True break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['parent']] if not is_descendant: if self.config.prune: # Clean with readability. Needs # to-string conversion first. pruned_string = Document(etree.tostring(item)).summary() # Re-parse the readability string # output and include it in our body. new_tree = etree.parse(StringIO(pruned_string), self.parser) failed = False try: body.append(new_tree.xpath('//html/body/div/div')[0]) # depends on [control=['try'], data=[]] except IndexError: if 'id="readabilityBody"' in pruned_string: try: body.append(new_tree.xpath('//body')) # depends on [control=['try'], data=[]] except: failed = True # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] else: failed = True # depends on [control=['except'], data=[]] if failed: LOGGER.error(u'Pruning item failed:\n\n%s\n\nWe got: “%s” and skipped it.', etree.tostring(item).replace(u'\n', u''), pruned_string.replace(u'\n', u''), extra={'siteconfig': self.config.host}) pass # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: body.append(item) appended_something = True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']] if appended_something: self.body = etree.tostring(body) # We've got a body now. break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['pattern']]
def get_label(self): """ get label as ndarray from ImageFeature """ label = callBigDlFunc(self.bigdl_type, "imageFeatureToLabelTensor", self.value) return label.to_ndarray()
def function[get_label, parameter[self]]: constant[ get label as ndarray from ImageFeature ] variable[label] assign[=] call[name[callBigDlFunc], parameter[name[self].bigdl_type, constant[imageFeatureToLabelTensor], name[self].value]] return[call[name[label].to_ndarray, parameter[]]]
keyword[def] identifier[get_label] ( identifier[self] ): literal[string] identifier[label] = identifier[callBigDlFunc] ( identifier[self] . identifier[bigdl_type] , literal[string] , identifier[self] . identifier[value] ) keyword[return] identifier[label] . identifier[to_ndarray] ()
def get_label(self): """ get label as ndarray from ImageFeature """ label = callBigDlFunc(self.bigdl_type, 'imageFeatureToLabelTensor', self.value) return label.to_ndarray()
def geoadd(self, name, *values): """ Add the specified geospatial items to the specified key identified by the ``name`` argument. The Geospatial items are given as ordered members of the ``values`` argument, each item or place is formed by the triad longitude, latitude and name. """ if len(values) % 3 != 0: raise DataError("GEOADD requires places with lon, lat and name" " values") return self.execute_command('GEOADD', name, *values)
def function[geoadd, parameter[self, name]]: constant[ Add the specified geospatial items to the specified key identified by the ``name`` argument. The Geospatial items are given as ordered members of the ``values`` argument, each item or place is formed by the triad longitude, latitude and name. ] if compare[binary_operation[call[name[len], parameter[name[values]]] <ast.Mod object at 0x7da2590d6920> constant[3]] not_equal[!=] constant[0]] begin[:] <ast.Raise object at 0x7da1b1f96e60> return[call[name[self].execute_command, parameter[constant[GEOADD], name[name], <ast.Starred object at 0x7da1b1f94340>]]]
keyword[def] identifier[geoadd] ( identifier[self] , identifier[name] ,* identifier[values] ): literal[string] keyword[if] identifier[len] ( identifier[values] )% literal[int] != literal[int] : keyword[raise] identifier[DataError] ( literal[string] literal[string] ) keyword[return] identifier[self] . identifier[execute_command] ( literal[string] , identifier[name] ,* identifier[values] )
def geoadd(self, name, *values): """ Add the specified geospatial items to the specified key identified by the ``name`` argument. The Geospatial items are given as ordered members of the ``values`` argument, each item or place is formed by the triad longitude, latitude and name. """ if len(values) % 3 != 0: raise DataError('GEOADD requires places with lon, lat and name values') # depends on [control=['if'], data=[]] return self.execute_command('GEOADD', name, *values)
def breadcrumb(self): """List of ``(url, title)`` tuples defining the current breadcrumb path. """ if self.path == '.': return [] path = self.path breadcrumb = [((self.url_ext or '.'), self.title)] while True: path = os.path.normpath(os.path.join(path, '..')) if path == '.': break url = (url_from_path(os.path.relpath(path, self.path)) + '/' + self.url_ext) breadcrumb.append((url, self.gallery.albums[path].title)) breadcrumb.reverse() return breadcrumb
def function[breadcrumb, parameter[self]]: constant[List of ``(url, title)`` tuples defining the current breadcrumb path. ] if compare[name[self].path equal[==] constant[.]] begin[:] return[list[[]]] variable[path] assign[=] name[self].path variable[breadcrumb] assign[=] list[[<ast.Tuple object at 0x7da1b016c5b0>]] while constant[True] begin[:] variable[path] assign[=] call[name[os].path.normpath, parameter[call[name[os].path.join, parameter[name[path], constant[..]]]]] if compare[name[path] equal[==] constant[.]] begin[:] break variable[url] assign[=] binary_operation[binary_operation[call[name[url_from_path], parameter[call[name[os].path.relpath, parameter[name[path], name[self].path]]]] + constant[/]] + name[self].url_ext] call[name[breadcrumb].append, parameter[tuple[[<ast.Name object at 0x7da1b016f490>, <ast.Attribute object at 0x7da1b016e9b0>]]]] call[name[breadcrumb].reverse, parameter[]] return[name[breadcrumb]]
keyword[def] identifier[breadcrumb] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[path] == literal[string] : keyword[return] [] identifier[path] = identifier[self] . identifier[path] identifier[breadcrumb] =[(( identifier[self] . identifier[url_ext] keyword[or] literal[string] ), identifier[self] . identifier[title] )] keyword[while] keyword[True] : identifier[path] = identifier[os] . identifier[path] . identifier[normpath] ( identifier[os] . identifier[path] . identifier[join] ( identifier[path] , literal[string] )) keyword[if] identifier[path] == literal[string] : keyword[break] identifier[url] =( identifier[url_from_path] ( identifier[os] . identifier[path] . identifier[relpath] ( identifier[path] , identifier[self] . identifier[path] ))+ literal[string] + identifier[self] . identifier[url_ext] ) identifier[breadcrumb] . identifier[append] (( identifier[url] , identifier[self] . identifier[gallery] . identifier[albums] [ identifier[path] ]. identifier[title] )) identifier[breadcrumb] . identifier[reverse] () keyword[return] identifier[breadcrumb]
def breadcrumb(self): """List of ``(url, title)`` tuples defining the current breadcrumb path. """ if self.path == '.': return [] # depends on [control=['if'], data=[]] path = self.path breadcrumb = [(self.url_ext or '.', self.title)] while True: path = os.path.normpath(os.path.join(path, '..')) if path == '.': break # depends on [control=['if'], data=[]] url = url_from_path(os.path.relpath(path, self.path)) + '/' + self.url_ext breadcrumb.append((url, self.gallery.albums[path].title)) # depends on [control=['while'], data=[]] breadcrumb.reverse() return breadcrumb
def authenticate(self): """Authenticate the session""" postdata = self.authentication_postdata jar = requests.cookies.cookielib.CookieJar() self.cookies = jar resp = self.get(self.authentication_base_url) authtok = _extract_authenticity_token(resp.content) if postdata is None: # This works for GitHub postdata = {"login": self.oauth2_username, "password": self._oauth2_password, "authenticity_token": authtok, "commit": "Sign+in", "utf8": u"\u2713", } # pylint: disable=bad-continuation self.authentication_postdata = postdata if self.authentication_session_url is None: # This is also for GitHub authentication_session_url = "https://github.com/session" self.authentication_session_url = authentication_session_url self.post(self.authentication_session_url, data=postdata)
def function[authenticate, parameter[self]]: constant[Authenticate the session] variable[postdata] assign[=] name[self].authentication_postdata variable[jar] assign[=] call[name[requests].cookies.cookielib.CookieJar, parameter[]] name[self].cookies assign[=] name[jar] variable[resp] assign[=] call[name[self].get, parameter[name[self].authentication_base_url]] variable[authtok] assign[=] call[name[_extract_authenticity_token], parameter[name[resp].content]] if compare[name[postdata] is constant[None]] begin[:] variable[postdata] assign[=] dictionary[[<ast.Constant object at 0x7da18bc71420>, <ast.Constant object at 0x7da18bc73af0>, <ast.Constant object at 0x7da18bc70df0>, <ast.Constant object at 0x7da18bc70100>, <ast.Constant object at 0x7da18bc706d0>], [<ast.Attribute object at 0x7da18bc72650>, <ast.Attribute object at 0x7da18bc715d0>, <ast.Name object at 0x7da18bc73f70>, <ast.Constant object at 0x7da18bc737c0>, <ast.Constant object at 0x7da18bc72cb0>]] name[self].authentication_postdata assign[=] name[postdata] if compare[name[self].authentication_session_url is constant[None]] begin[:] variable[authentication_session_url] assign[=] constant[https://github.com/session] name[self].authentication_session_url assign[=] name[authentication_session_url] call[name[self].post, parameter[name[self].authentication_session_url]]
keyword[def] identifier[authenticate] ( identifier[self] ): literal[string] identifier[postdata] = identifier[self] . identifier[authentication_postdata] identifier[jar] = identifier[requests] . identifier[cookies] . identifier[cookielib] . identifier[CookieJar] () identifier[self] . identifier[cookies] = identifier[jar] identifier[resp] = identifier[self] . identifier[get] ( identifier[self] . identifier[authentication_base_url] ) identifier[authtok] = identifier[_extract_authenticity_token] ( identifier[resp] . identifier[content] ) keyword[if] identifier[postdata] keyword[is] keyword[None] : identifier[postdata] ={ literal[string] : identifier[self] . identifier[oauth2_username] , literal[string] : identifier[self] . identifier[_oauth2_password] , literal[string] : identifier[authtok] , literal[string] : literal[string] , literal[string] : literal[string] , } identifier[self] . identifier[authentication_postdata] = identifier[postdata] keyword[if] identifier[self] . identifier[authentication_session_url] keyword[is] keyword[None] : identifier[authentication_session_url] = literal[string] identifier[self] . identifier[authentication_session_url] = identifier[authentication_session_url] identifier[self] . identifier[post] ( identifier[self] . identifier[authentication_session_url] , identifier[data] = identifier[postdata] )
def authenticate(self): """Authenticate the session""" postdata = self.authentication_postdata jar = requests.cookies.cookielib.CookieJar() self.cookies = jar resp = self.get(self.authentication_base_url) authtok = _extract_authenticity_token(resp.content) if postdata is None: # This works for GitHub postdata = {'login': self.oauth2_username, 'password': self._oauth2_password, 'authenticity_token': authtok, 'commit': 'Sign+in', 'utf8': u'✓'} # pylint: disable=bad-continuation self.authentication_postdata = postdata # depends on [control=['if'], data=['postdata']] if self.authentication_session_url is None: # This is also for GitHub authentication_session_url = 'https://github.com/session' self.authentication_session_url = authentication_session_url # depends on [control=['if'], data=[]] self.post(self.authentication_session_url, data=postdata)
def P(self): """Diffusion operator (cached) Return or calculate the diffusion operator Returns ------- P : array-like, shape=[n_samples, n_samples] diffusion operator defined as a row-stochastic form of the kernel matrix """ try: return self._diff_op except AttributeError: self._diff_op = normalize(self.kernel, 'l1', axis=1) return self._diff_op
def function[P, parameter[self]]: constant[Diffusion operator (cached) Return or calculate the diffusion operator Returns ------- P : array-like, shape=[n_samples, n_samples] diffusion operator defined as a row-stochastic form of the kernel matrix ] <ast.Try object at 0x7da1b0b04040>
keyword[def] identifier[P] ( identifier[self] ): literal[string] keyword[try] : keyword[return] identifier[self] . identifier[_diff_op] keyword[except] identifier[AttributeError] : identifier[self] . identifier[_diff_op] = identifier[normalize] ( identifier[self] . identifier[kernel] , literal[string] , identifier[axis] = literal[int] ) keyword[return] identifier[self] . identifier[_diff_op]
def P(self): """Diffusion operator (cached) Return or calculate the diffusion operator Returns ------- P : array-like, shape=[n_samples, n_samples] diffusion operator defined as a row-stochastic form of the kernel matrix """ try: return self._diff_op # depends on [control=['try'], data=[]] except AttributeError: self._diff_op = normalize(self.kernel, 'l1', axis=1) return self._diff_op # depends on [control=['except'], data=[]]
def annotate_segments(self, Z): """ Report the copy number and start-end segment """ # We need a way to go from compressed idices to original indices P = Z.copy() P[~np.isfinite(P)] = -1 _, mapping = np.unique(np.cumsum(P >= 0), return_index=True) dZ = Z.compressed() uniq, idx = np.unique(dZ, return_inverse=True) segments = [] for i, mean_cn in enumerate(uniq): if not np.isfinite(mean_cn): continue for rr in contiguous_regions(idx == i): segments.append((mean_cn, mapping[rr])) return segments
def function[annotate_segments, parameter[self, Z]]: constant[ Report the copy number and start-end segment ] variable[P] assign[=] call[name[Z].copy, parameter[]] call[name[P]][<ast.UnaryOp object at 0x7da1b09bf730>] assign[=] <ast.UnaryOp object at 0x7da1b09bf4c0> <ast.Tuple object at 0x7da1b09bda20> assign[=] call[name[np].unique, parameter[call[name[np].cumsum, parameter[compare[name[P] greater_or_equal[>=] constant[0]]]]]] variable[dZ] assign[=] call[name[Z].compressed, parameter[]] <ast.Tuple object at 0x7da1b09be860> assign[=] call[name[np].unique, parameter[name[dZ]]] variable[segments] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da1b09bf820>, <ast.Name object at 0x7da1b09bedd0>]]] in starred[call[name[enumerate], parameter[name[uniq]]]] begin[:] if <ast.UnaryOp object at 0x7da1b09bdae0> begin[:] continue for taget[name[rr]] in starred[call[name[contiguous_regions], parameter[compare[name[idx] equal[==] name[i]]]]] begin[:] call[name[segments].append, parameter[tuple[[<ast.Name object at 0x7da1b09bf760>, <ast.Subscript object at 0x7da1b09bec20>]]]] return[name[segments]]
keyword[def] identifier[annotate_segments] ( identifier[self] , identifier[Z] ): literal[string] identifier[P] = identifier[Z] . identifier[copy] () identifier[P] [~ identifier[np] . identifier[isfinite] ( identifier[P] )]=- literal[int] identifier[_] , identifier[mapping] = identifier[np] . identifier[unique] ( identifier[np] . identifier[cumsum] ( identifier[P] >= literal[int] ), identifier[return_index] = keyword[True] ) identifier[dZ] = identifier[Z] . identifier[compressed] () identifier[uniq] , identifier[idx] = identifier[np] . identifier[unique] ( identifier[dZ] , identifier[return_inverse] = keyword[True] ) identifier[segments] =[] keyword[for] identifier[i] , identifier[mean_cn] keyword[in] identifier[enumerate] ( identifier[uniq] ): keyword[if] keyword[not] identifier[np] . identifier[isfinite] ( identifier[mean_cn] ): keyword[continue] keyword[for] identifier[rr] keyword[in] identifier[contiguous_regions] ( identifier[idx] == identifier[i] ): identifier[segments] . identifier[append] (( identifier[mean_cn] , identifier[mapping] [ identifier[rr] ])) keyword[return] identifier[segments]
def annotate_segments(self, Z): """ Report the copy number and start-end segment """ # We need a way to go from compressed idices to original indices P = Z.copy() P[~np.isfinite(P)] = -1 (_, mapping) = np.unique(np.cumsum(P >= 0), return_index=True) dZ = Z.compressed() (uniq, idx) = np.unique(dZ, return_inverse=True) segments = [] for (i, mean_cn) in enumerate(uniq): if not np.isfinite(mean_cn): continue # depends on [control=['if'], data=[]] for rr in contiguous_regions(idx == i): segments.append((mean_cn, mapping[rr])) # depends on [control=['for'], data=['rr']] # depends on [control=['for'], data=[]] return segments
def parse_cgmlst_alleles(cgmlst_fasta): """Parse cgMLST alleles from fasta file cgMLST FASTA file must have a header format of ">{marker name}|{allele name}" Args: cgmlst_fasta (str): cgMLST fasta file path Returns: dict of list: Marker name to list of allele sequences """ out = defaultdict(list) for header, seq in parse_fasta(cgmlst_fasta): if not '|' in header: raise Exception('Unexpected format for cgMLST fasta file header. No "|" (pipe) delimiter present! Header="{}"'.format(header)) marker_name, allele_name = header.split('|') out[marker_name].append(seq) return out
def function[parse_cgmlst_alleles, parameter[cgmlst_fasta]]: constant[Parse cgMLST alleles from fasta file cgMLST FASTA file must have a header format of ">{marker name}|{allele name}" Args: cgmlst_fasta (str): cgMLST fasta file path Returns: dict of list: Marker name to list of allele sequences ] variable[out] assign[=] call[name[defaultdict], parameter[name[list]]] for taget[tuple[[<ast.Name object at 0x7da1b1a22080>, <ast.Name object at 0x7da1b1a21a80>]]] in starred[call[name[parse_fasta], parameter[name[cgmlst_fasta]]]] begin[:] if <ast.UnaryOp object at 0x7da1b1a217b0> begin[:] <ast.Raise object at 0x7da1b1a22170> <ast.Tuple object at 0x7da1b1a222f0> assign[=] call[name[header].split, parameter[constant[|]]] call[call[name[out]][name[marker_name]].append, parameter[name[seq]]] return[name[out]]
keyword[def] identifier[parse_cgmlst_alleles] ( identifier[cgmlst_fasta] ): literal[string] identifier[out] = identifier[defaultdict] ( identifier[list] ) keyword[for] identifier[header] , identifier[seq] keyword[in] identifier[parse_fasta] ( identifier[cgmlst_fasta] ): keyword[if] keyword[not] literal[string] keyword[in] identifier[header] : keyword[raise] identifier[Exception] ( literal[string] . identifier[format] ( identifier[header] )) identifier[marker_name] , identifier[allele_name] = identifier[header] . identifier[split] ( literal[string] ) identifier[out] [ identifier[marker_name] ]. identifier[append] ( identifier[seq] ) keyword[return] identifier[out]
def parse_cgmlst_alleles(cgmlst_fasta): """Parse cgMLST alleles from fasta file cgMLST FASTA file must have a header format of ">{marker name}|{allele name}" Args: cgmlst_fasta (str): cgMLST fasta file path Returns: dict of list: Marker name to list of allele sequences """ out = defaultdict(list) for (header, seq) in parse_fasta(cgmlst_fasta): if not '|' in header: raise Exception('Unexpected format for cgMLST fasta file header. No "|" (pipe) delimiter present! Header="{}"'.format(header)) # depends on [control=['if'], data=[]] (marker_name, allele_name) = header.split('|') out[marker_name].append(seq) # depends on [control=['for'], data=[]] return out
def as_posix(self): """Return the string representation of the path with forward (/) slashes.""" f = self._flavour return str(self).replace(f.sep, '/')
def function[as_posix, parameter[self]]: constant[Return the string representation of the path with forward (/) slashes.] variable[f] assign[=] name[self]._flavour return[call[call[name[str], parameter[name[self]]].replace, parameter[name[f].sep, constant[/]]]]
keyword[def] identifier[as_posix] ( identifier[self] ): literal[string] identifier[f] = identifier[self] . identifier[_flavour] keyword[return] identifier[str] ( identifier[self] ). identifier[replace] ( identifier[f] . identifier[sep] , literal[string] )
def as_posix(self): """Return the string representation of the path with forward (/) slashes.""" f = self._flavour return str(self).replace(f.sep, '/')
def probabilistic_collocation(order, dist, subset=.1): """ Probabilistic collocation method. Args: order (int, numpy.ndarray) : Quadrature order along each axis. dist (Dist) : Distribution to generate samples from. subset (float) : Rate of which to removed samples. """ abscissas, weights = chaospy.quad.collection.golub_welsch(order, dist) likelihood = dist.pdf(abscissas) alpha = numpy.random.random(len(weights)) alpha = likelihood > alpha*subset*numpy.max(likelihood) abscissas = abscissas.T[alpha].T weights = weights[alpha] return abscissas, weights
def function[probabilistic_collocation, parameter[order, dist, subset]]: constant[ Probabilistic collocation method. Args: order (int, numpy.ndarray) : Quadrature order along each axis. dist (Dist) : Distribution to generate samples from. subset (float) : Rate of which to removed samples. ] <ast.Tuple object at 0x7da20c6aac20> assign[=] call[name[chaospy].quad.collection.golub_welsch, parameter[name[order], name[dist]]] variable[likelihood] assign[=] call[name[dist].pdf, parameter[name[abscissas]]] variable[alpha] assign[=] call[name[numpy].random.random, parameter[call[name[len], parameter[name[weights]]]]] variable[alpha] assign[=] compare[name[likelihood] greater[>] binary_operation[binary_operation[name[alpha] * name[subset]] * call[name[numpy].max, parameter[name[likelihood]]]]] variable[abscissas] assign[=] call[name[abscissas].T][name[alpha]].T variable[weights] assign[=] call[name[weights]][name[alpha]] return[tuple[[<ast.Name object at 0x7da204567fa0>, <ast.Name object at 0x7da204567d90>]]]
keyword[def] identifier[probabilistic_collocation] ( identifier[order] , identifier[dist] , identifier[subset] = literal[int] ): literal[string] identifier[abscissas] , identifier[weights] = identifier[chaospy] . identifier[quad] . identifier[collection] . identifier[golub_welsch] ( identifier[order] , identifier[dist] ) identifier[likelihood] = identifier[dist] . identifier[pdf] ( identifier[abscissas] ) identifier[alpha] = identifier[numpy] . identifier[random] . identifier[random] ( identifier[len] ( identifier[weights] )) identifier[alpha] = identifier[likelihood] > identifier[alpha] * identifier[subset] * identifier[numpy] . identifier[max] ( identifier[likelihood] ) identifier[abscissas] = identifier[abscissas] . identifier[T] [ identifier[alpha] ]. identifier[T] identifier[weights] = identifier[weights] [ identifier[alpha] ] keyword[return] identifier[abscissas] , identifier[weights]
def probabilistic_collocation(order, dist, subset=0.1): """ Probabilistic collocation method. Args: order (int, numpy.ndarray) : Quadrature order along each axis. dist (Dist) : Distribution to generate samples from. subset (float) : Rate of which to removed samples. """ (abscissas, weights) = chaospy.quad.collection.golub_welsch(order, dist) likelihood = dist.pdf(abscissas) alpha = numpy.random.random(len(weights)) alpha = likelihood > alpha * subset * numpy.max(likelihood) abscissas = abscissas.T[alpha].T weights = weights[alpha] return (abscissas, weights)
def get_wrapping_class(node): """Get the class that wraps the given node. We consider that a class wraps a node if the class is a parent for the said node. :returns: The class that wraps the given node :rtype: ClassDef or None """ klass = node.frame() while klass is not None and not isinstance(klass, ClassDef): if klass.parent is None: klass = None else: klass = klass.parent.frame() return klass
def function[get_wrapping_class, parameter[node]]: constant[Get the class that wraps the given node. We consider that a class wraps a node if the class is a parent for the said node. :returns: The class that wraps the given node :rtype: ClassDef or None ] variable[klass] assign[=] call[name[node].frame, parameter[]] while <ast.BoolOp object at 0x7da1b1ec1ab0> begin[:] if compare[name[klass].parent is constant[None]] begin[:] variable[klass] assign[=] constant[None] return[name[klass]]
keyword[def] identifier[get_wrapping_class] ( identifier[node] ): literal[string] identifier[klass] = identifier[node] . identifier[frame] () keyword[while] identifier[klass] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[isinstance] ( identifier[klass] , identifier[ClassDef] ): keyword[if] identifier[klass] . identifier[parent] keyword[is] keyword[None] : identifier[klass] = keyword[None] keyword[else] : identifier[klass] = identifier[klass] . identifier[parent] . identifier[frame] () keyword[return] identifier[klass]
def get_wrapping_class(node): """Get the class that wraps the given node. We consider that a class wraps a node if the class is a parent for the said node. :returns: The class that wraps the given node :rtype: ClassDef or None """ klass = node.frame() while klass is not None and (not isinstance(klass, ClassDef)): if klass.parent is None: klass = None # depends on [control=['if'], data=[]] else: klass = klass.parent.frame() # depends on [control=['while'], data=[]] return klass
def FetchRequestsAndResponses(self, session_id, timestamp=None): """Fetches all outstanding requests and responses for this flow. We first cache all requests and responses for this flow in memory to prevent round trips. Args: session_id: The session_id to get the requests/responses for. timestamp: Tuple (start, end) with a time range. Fetched requests and responses will have timestamp in this range. Yields: an tuple (request protobufs, list of responses messages) in ascending order of request ids. Raises: MoreDataException: When there is more data available than read by the limited query. """ if timestamp is None: timestamp = (0, self.frozen_timestamp or rdfvalue.RDFDatetime.Now()) num_requests = 0 for request, responses in self.data_store.ReadRequestsAndResponses( session_id, timestamp=timestamp, request_limit=self.request_limit, response_limit=self.response_limit): yield (request, responses) num_requests += 1 if num_requests >= self.request_limit: raise MoreDataException()
def function[FetchRequestsAndResponses, parameter[self, session_id, timestamp]]: constant[Fetches all outstanding requests and responses for this flow. We first cache all requests and responses for this flow in memory to prevent round trips. Args: session_id: The session_id to get the requests/responses for. timestamp: Tuple (start, end) with a time range. Fetched requests and responses will have timestamp in this range. Yields: an tuple (request protobufs, list of responses messages) in ascending order of request ids. Raises: MoreDataException: When there is more data available than read by the limited query. ] if compare[name[timestamp] is constant[None]] begin[:] variable[timestamp] assign[=] tuple[[<ast.Constant object at 0x7da1b1b47c40>, <ast.BoolOp object at 0x7da1b1b47670>]] variable[num_requests] assign[=] constant[0] for taget[tuple[[<ast.Name object at 0x7da1b1b47f40>, <ast.Name object at 0x7da1b1b47160>]]] in starred[call[name[self].data_store.ReadRequestsAndResponses, parameter[name[session_id]]]] begin[:] <ast.Yield object at 0x7da1b1b469e0> <ast.AugAssign object at 0x7da1b1b45120> if compare[name[num_requests] greater_or_equal[>=] name[self].request_limit] begin[:] <ast.Raise object at 0x7da1b1b44400>
keyword[def] identifier[FetchRequestsAndResponses] ( identifier[self] , identifier[session_id] , identifier[timestamp] = keyword[None] ): literal[string] keyword[if] identifier[timestamp] keyword[is] keyword[None] : identifier[timestamp] =( literal[int] , identifier[self] . identifier[frozen_timestamp] keyword[or] identifier[rdfvalue] . identifier[RDFDatetime] . identifier[Now] ()) identifier[num_requests] = literal[int] keyword[for] identifier[request] , identifier[responses] keyword[in] identifier[self] . identifier[data_store] . identifier[ReadRequestsAndResponses] ( identifier[session_id] , identifier[timestamp] = identifier[timestamp] , identifier[request_limit] = identifier[self] . identifier[request_limit] , identifier[response_limit] = identifier[self] . identifier[response_limit] ): keyword[yield] ( identifier[request] , identifier[responses] ) identifier[num_requests] += literal[int] keyword[if] identifier[num_requests] >= identifier[self] . identifier[request_limit] : keyword[raise] identifier[MoreDataException] ()
def FetchRequestsAndResponses(self, session_id, timestamp=None): """Fetches all outstanding requests and responses for this flow. We first cache all requests and responses for this flow in memory to prevent round trips. Args: session_id: The session_id to get the requests/responses for. timestamp: Tuple (start, end) with a time range. Fetched requests and responses will have timestamp in this range. Yields: an tuple (request protobufs, list of responses messages) in ascending order of request ids. Raises: MoreDataException: When there is more data available than read by the limited query. """ if timestamp is None: timestamp = (0, self.frozen_timestamp or rdfvalue.RDFDatetime.Now()) # depends on [control=['if'], data=['timestamp']] num_requests = 0 for (request, responses) in self.data_store.ReadRequestsAndResponses(session_id, timestamp=timestamp, request_limit=self.request_limit, response_limit=self.response_limit): yield (request, responses) num_requests += 1 # depends on [control=['for'], data=[]] if num_requests >= self.request_limit: raise MoreDataException() # depends on [control=['if'], data=[]]
def new_completion_from_position(self, position): """ (Only for internal use!) Get a new completion by splitting this one. Used by `CommandLineInterface` when it needs to have a list of new completions after inserting the common prefix. """ assert isinstance(position, int) and position - self.start_position >= 0 return Completion( text=self.text[position - self.start_position:], display=self.display, display_meta=self._display_meta, get_display_meta=self._get_display_meta)
def function[new_completion_from_position, parameter[self, position]]: constant[ (Only for internal use!) Get a new completion by splitting this one. Used by `CommandLineInterface` when it needs to have a list of new completions after inserting the common prefix. ] assert[<ast.BoolOp object at 0x7da18bcc9a50>] return[call[name[Completion], parameter[]]]
keyword[def] identifier[new_completion_from_position] ( identifier[self] , identifier[position] ): literal[string] keyword[assert] identifier[isinstance] ( identifier[position] , identifier[int] ) keyword[and] identifier[position] - identifier[self] . identifier[start_position] >= literal[int] keyword[return] identifier[Completion] ( identifier[text] = identifier[self] . identifier[text] [ identifier[position] - identifier[self] . identifier[start_position] :], identifier[display] = identifier[self] . identifier[display] , identifier[display_meta] = identifier[self] . identifier[_display_meta] , identifier[get_display_meta] = identifier[self] . identifier[_get_display_meta] )
def new_completion_from_position(self, position): """ (Only for internal use!) Get a new completion by splitting this one. Used by `CommandLineInterface` when it needs to have a list of new completions after inserting the common prefix. """ assert isinstance(position, int) and position - self.start_position >= 0 return Completion(text=self.text[position - self.start_position:], display=self.display, display_meta=self._display_meta, get_display_meta=self._get_display_meta)
def set_server_key(self, zmq_socket, server_secret_key_path): '''must call before bind''' load_and_set_key(zmq_socket, server_secret_key_path) zmq_socket.curve_server = True
def function[set_server_key, parameter[self, zmq_socket, server_secret_key_path]]: constant[must call before bind] call[name[load_and_set_key], parameter[name[zmq_socket], name[server_secret_key_path]]] name[zmq_socket].curve_server assign[=] constant[True]
keyword[def] identifier[set_server_key] ( identifier[self] , identifier[zmq_socket] , identifier[server_secret_key_path] ): literal[string] identifier[load_and_set_key] ( identifier[zmq_socket] , identifier[server_secret_key_path] ) identifier[zmq_socket] . identifier[curve_server] = keyword[True]
def set_server_key(self, zmq_socket, server_secret_key_path): """must call before bind""" load_and_set_key(zmq_socket, server_secret_key_path) zmq_socket.curve_server = True
def auto_zoom(zoomx=True, zoomy=True, axes="gca", x_space=0.04, y_space=0.04, draw=True): """ Looks at the bounds of the plotted data and zooms accordingly, leaving some space around the data. """ # Disable auto-updating by default. _pylab.ioff() if axes=="gca": axes = _pylab.gca() # get the current bounds x10, x20 = axes.get_xlim() y10, y20 = axes.get_ylim() # Autoscale using pylab's technique (catches the error bars!) axes.autoscale(enable=True, tight=True) # Add padding if axes.get_xscale() == 'linear': x1, x2 = axes.get_xlim() xc = 0.5*(x1+x2) xs = 0.5*(1+x_space)*(x2-x1) axes.set_xlim(xc-xs, xc+xs) if axes.get_yscale() == 'linear': y1, y2 = axes.get_ylim() yc = 0.5*(y1+y2) ys = 0.5*(1+y_space)*(y2-y1) axes.set_ylim(yc-ys, yc+ys) # If we weren't supposed to zoom x or y, reset them if not zoomx: axes.set_xlim(x10, x20) if not zoomy: axes.set_ylim(y10, y20) if draw: _pylab.ion() _pylab.draw()
def function[auto_zoom, parameter[zoomx, zoomy, axes, x_space, y_space, draw]]: constant[ Looks at the bounds of the plotted data and zooms accordingly, leaving some space around the data. ] call[name[_pylab].ioff, parameter[]] if compare[name[axes] equal[==] constant[gca]] begin[:] variable[axes] assign[=] call[name[_pylab].gca, parameter[]] <ast.Tuple object at 0x7da20c6c4550> assign[=] call[name[axes].get_xlim, parameter[]] <ast.Tuple object at 0x7da20c6c53c0> assign[=] call[name[axes].get_ylim, parameter[]] call[name[axes].autoscale, parameter[]] if compare[call[name[axes].get_xscale, parameter[]] equal[==] constant[linear]] begin[:] <ast.Tuple object at 0x7da18ede59c0> assign[=] call[name[axes].get_xlim, parameter[]] variable[xc] assign[=] binary_operation[constant[0.5] * binary_operation[name[x1] + name[x2]]] variable[xs] assign[=] binary_operation[binary_operation[constant[0.5] * binary_operation[constant[1] + name[x_space]]] * binary_operation[name[x2] - name[x1]]] call[name[axes].set_xlim, parameter[binary_operation[name[xc] - name[xs]], binary_operation[name[xc] + name[xs]]]] if compare[call[name[axes].get_yscale, parameter[]] equal[==] constant[linear]] begin[:] <ast.Tuple object at 0x7da18ede5c90> assign[=] call[name[axes].get_ylim, parameter[]] variable[yc] assign[=] binary_operation[constant[0.5] * binary_operation[name[y1] + name[y2]]] variable[ys] assign[=] binary_operation[binary_operation[constant[0.5] * binary_operation[constant[1] + name[y_space]]] * binary_operation[name[y2] - name[y1]]] call[name[axes].set_ylim, parameter[binary_operation[name[yc] - name[ys]], binary_operation[name[yc] + name[ys]]]] if <ast.UnaryOp object at 0x7da18ede6d10> begin[:] call[name[axes].set_xlim, parameter[name[x10], name[x20]]] if <ast.UnaryOp object at 0x7da18ede5930> begin[:] call[name[axes].set_ylim, parameter[name[y10], name[y20]]] if name[draw] begin[:] call[name[_pylab].ion, parameter[]] call[name[_pylab].draw, parameter[]]
keyword[def] identifier[auto_zoom] ( identifier[zoomx] = keyword[True] , identifier[zoomy] = keyword[True] , identifier[axes] = literal[string] , identifier[x_space] = literal[int] , identifier[y_space] = literal[int] , identifier[draw] = keyword[True] ): literal[string] identifier[_pylab] . identifier[ioff] () keyword[if] identifier[axes] == literal[string] : identifier[axes] = identifier[_pylab] . identifier[gca] () identifier[x10] , identifier[x20] = identifier[axes] . identifier[get_xlim] () identifier[y10] , identifier[y20] = identifier[axes] . identifier[get_ylim] () identifier[axes] . identifier[autoscale] ( identifier[enable] = keyword[True] , identifier[tight] = keyword[True] ) keyword[if] identifier[axes] . identifier[get_xscale] ()== literal[string] : identifier[x1] , identifier[x2] = identifier[axes] . identifier[get_xlim] () identifier[xc] = literal[int] *( identifier[x1] + identifier[x2] ) identifier[xs] = literal[int] *( literal[int] + identifier[x_space] )*( identifier[x2] - identifier[x1] ) identifier[axes] . identifier[set_xlim] ( identifier[xc] - identifier[xs] , identifier[xc] + identifier[xs] ) keyword[if] identifier[axes] . identifier[get_yscale] ()== literal[string] : identifier[y1] , identifier[y2] = identifier[axes] . identifier[get_ylim] () identifier[yc] = literal[int] *( identifier[y1] + identifier[y2] ) identifier[ys] = literal[int] *( literal[int] + identifier[y_space] )*( identifier[y2] - identifier[y1] ) identifier[axes] . identifier[set_ylim] ( identifier[yc] - identifier[ys] , identifier[yc] + identifier[ys] ) keyword[if] keyword[not] identifier[zoomx] : identifier[axes] . identifier[set_xlim] ( identifier[x10] , identifier[x20] ) keyword[if] keyword[not] identifier[zoomy] : identifier[axes] . identifier[set_ylim] ( identifier[y10] , identifier[y20] ) keyword[if] identifier[draw] : identifier[_pylab] . identifier[ion] () identifier[_pylab] . identifier[draw] ()
def auto_zoom(zoomx=True, zoomy=True, axes='gca', x_space=0.04, y_space=0.04, draw=True): """ Looks at the bounds of the plotted data and zooms accordingly, leaving some space around the data. """ # Disable auto-updating by default. _pylab.ioff() if axes == 'gca': axes = _pylab.gca() # depends on [control=['if'], data=['axes']] # get the current bounds (x10, x20) = axes.get_xlim() (y10, y20) = axes.get_ylim() # Autoscale using pylab's technique (catches the error bars!) axes.autoscale(enable=True, tight=True) # Add padding if axes.get_xscale() == 'linear': (x1, x2) = axes.get_xlim() xc = 0.5 * (x1 + x2) xs = 0.5 * (1 + x_space) * (x2 - x1) axes.set_xlim(xc - xs, xc + xs) # depends on [control=['if'], data=[]] if axes.get_yscale() == 'linear': (y1, y2) = axes.get_ylim() yc = 0.5 * (y1 + y2) ys = 0.5 * (1 + y_space) * (y2 - y1) axes.set_ylim(yc - ys, yc + ys) # depends on [control=['if'], data=[]] # If we weren't supposed to zoom x or y, reset them if not zoomx: axes.set_xlim(x10, x20) # depends on [control=['if'], data=[]] if not zoomy: axes.set_ylim(y10, y20) # depends on [control=['if'], data=[]] if draw: _pylab.ion() _pylab.draw() # depends on [control=['if'], data=[]]
def HA1(realm, username, password, algorithm): """Create HA1 hash by realm, username, password HA1 = md5(A1) = MD5(username:realm:password) """ if not realm: realm = u'' return H(b":".join([username.encode('utf-8'), realm.encode('utf-8'), password.encode('utf-8')]), algorithm)
def function[HA1, parameter[realm, username, password, algorithm]]: constant[Create HA1 hash by realm, username, password HA1 = md5(A1) = MD5(username:realm:password) ] if <ast.UnaryOp object at 0x7da1b21bb070> begin[:] variable[realm] assign[=] constant[] return[call[name[H], parameter[call[constant[b':'].join, parameter[list[[<ast.Call object at 0x7da1b21b8d90>, <ast.Call object at 0x7da1b21b9c90>, <ast.Call object at 0x7da1b21b8a00>]]]], name[algorithm]]]]
keyword[def] identifier[HA1] ( identifier[realm] , identifier[username] , identifier[password] , identifier[algorithm] ): literal[string] keyword[if] keyword[not] identifier[realm] : identifier[realm] = literal[string] keyword[return] identifier[H] ( literal[string] . identifier[join] ([ identifier[username] . identifier[encode] ( literal[string] ), identifier[realm] . identifier[encode] ( literal[string] ), identifier[password] . identifier[encode] ( literal[string] )]), identifier[algorithm] )
def HA1(realm, username, password, algorithm): """Create HA1 hash by realm, username, password HA1 = md5(A1) = MD5(username:realm:password) """ if not realm: realm = u'' # depends on [control=['if'], data=[]] return H(b':'.join([username.encode('utf-8'), realm.encode('utf-8'), password.encode('utf-8')]), algorithm)
def cumulative_std(self): """ Return the cumulative standard deviation of the elements in the SArray. Returns an SArray where each element in the output corresponds to the standard deviation of all the elements preceding and including it. The SArray is expected to be of numeric type, or a numeric vector type. Returns ------- out : SArray[int, float] Notes ----- - Missing values are ignored while performing the cumulative aggregate operation. Examples -------- >>> sa = SArray([1, 2, 3, 4, 0]) >>> sa.cumulative_std() dtype: float rows: 3 [0.0, 0.5, 0.816496580927726, 1.118033988749895, 1.4142135623730951] """ from .. import extensions agg_op = "__builtin__cum_std__" return SArray(_proxy = self.__proxy__.builtin_cumulative_aggregate(agg_op))
def function[cumulative_std, parameter[self]]: constant[ Return the cumulative standard deviation of the elements in the SArray. Returns an SArray where each element in the output corresponds to the standard deviation of all the elements preceding and including it. The SArray is expected to be of numeric type, or a numeric vector type. Returns ------- out : SArray[int, float] Notes ----- - Missing values are ignored while performing the cumulative aggregate operation. Examples -------- >>> sa = SArray([1, 2, 3, 4, 0]) >>> sa.cumulative_std() dtype: float rows: 3 [0.0, 0.5, 0.816496580927726, 1.118033988749895, 1.4142135623730951] ] from relative_module[None] import module[extensions] variable[agg_op] assign[=] constant[__builtin__cum_std__] return[call[name[SArray], parameter[]]]
keyword[def] identifier[cumulative_std] ( identifier[self] ): literal[string] keyword[from] .. keyword[import] identifier[extensions] identifier[agg_op] = literal[string] keyword[return] identifier[SArray] ( identifier[_proxy] = identifier[self] . identifier[__proxy__] . identifier[builtin_cumulative_aggregate] ( identifier[agg_op] ))
def cumulative_std(self): """ Return the cumulative standard deviation of the elements in the SArray. Returns an SArray where each element in the output corresponds to the standard deviation of all the elements preceding and including it. The SArray is expected to be of numeric type, or a numeric vector type. Returns ------- out : SArray[int, float] Notes ----- - Missing values are ignored while performing the cumulative aggregate operation. Examples -------- >>> sa = SArray([1, 2, 3, 4, 0]) >>> sa.cumulative_std() dtype: float rows: 3 [0.0, 0.5, 0.816496580927726, 1.118033988749895, 1.4142135623730951] """ from .. import extensions agg_op = '__builtin__cum_std__' return SArray(_proxy=self.__proxy__.builtin_cumulative_aggregate(agg_op))
def adjustPhase(self, adjustment): """ Adjust the accelerating phase of the cavity by the value of ``adjustment``. The adjustment is additive, so a value of ``scalingFactor = 0.0`` will result in no change of the phase. """ self.phase = self.phase._replace(val = self.phase.val + adjustment)
def function[adjustPhase, parameter[self, adjustment]]: constant[ Adjust the accelerating phase of the cavity by the value of ``adjustment``. The adjustment is additive, so a value of ``scalingFactor = 0.0`` will result in no change of the phase. ] name[self].phase assign[=] call[name[self].phase._replace, parameter[]]
keyword[def] identifier[adjustPhase] ( identifier[self] , identifier[adjustment] ): literal[string] identifier[self] . identifier[phase] = identifier[self] . identifier[phase] . identifier[_replace] ( identifier[val] = identifier[self] . identifier[phase] . identifier[val] + identifier[adjustment] )
def adjustPhase(self, adjustment): """ Adjust the accelerating phase of the cavity by the value of ``adjustment``. The adjustment is additive, so a value of ``scalingFactor = 0.0`` will result in no change of the phase. """ self.phase = self.phase._replace(val=self.phase.val + adjustment)