code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def get_reports(self): """ Retrieve all reports submitted for this Sample. :return: A list of :class:`.Report` """ url = '{}reports/'.format(self.url) return Report._get_list_from_url(url, append_base_url=False)
Retrieve all reports submitted for this Sample. :return: A list of :class:`.Report`
def norm(self, valu): ''' Normalize the value for a given type. Args: valu (obj): The value to normalize. Returns: ((obj,dict)): The normalized valu, info tuple. Notes: The info dictionary uses the following key conventions: subs (dict): The normalized sub-fields as name: valu entries. ''' func = self._type_norms.get(type(valu)) if func is None: raise s_exc.NoSuchFunc(name=self.name, mesg='no norm for type: %r' % (type(valu),)) return func(valu)
Normalize the value for a given type. Args: valu (obj): The value to normalize. Returns: ((obj,dict)): The normalized valu, info tuple. Notes: The info dictionary uses the following key conventions: subs (dict): The normalized sub-fields as name: valu entries.
def getFloat(self, name, default=0.0, parent_search=False, multikeys_search=False): """ récupération d'un élément float """ try: value = self.get(name, default, parent_search, multikeys_search) return float(value) except: # pas de configuration trouvé ou convertion impossible ? return default
récupération d'un élément float
def circleconvert(amount, currentformat, newformat): """ Convert a circle measurement. :type amount: number :param amount: The number to convert. :type currentformat: string :param currentformat: The format of the provided value. :type newformat: string :param newformat: The intended format of the value. >>> circleconvert(45, "radius", "diameter") 90 """ # If the same format was provided if currentformat.lower() == newformat.lower(): # Return the provided value return amount # If the lowercase version of the current format is 'radius' if currentformat.lower() == 'radius': # If the lowercase version of the new format is 'diameter' if newformat.lower() == 'diameter': # Return the converted value return amount * 2 # If the lowercase version of the new format is 'circumference' elif newformat.lower() == 'circumference': # Return the converted value return amount * 2 * math.pi # Raise a warning raise ValueError("Invalid new format provided.") # If the lowercase version of the current format is 'diameter' elif currentformat.lower() == 'diameter': # If the lowercase version of the new format is 'radius' if newformat.lower() == 'radius': # Return the converted value return amount / 2 # If the lowercase version of the new format is 'circumference' elif newformat.lower() == 'circumference': # Return the converted value return amount * math.pi # Raise a warning raise ValueError("Invalid new format provided.") # If the lowercase version of the current format is 'circumference' elif currentformat.lower() == 'circumference': # If the lowercase version of the new format is 'radius' if newformat.lower() == 'radius': # Return the converted value return amount / math.pi / 2 # If the lowercase version of the new format is 'diameter' elif newformat.lower() == 'diameter': # Return the converted value return amount / math.pi
Convert a circle measurement. :type amount: number :param amount: The number to convert. :type currentformat: string :param currentformat: The format of the provided value. :type newformat: string :param newformat: The intended format of the value. >>> circleconvert(45, "radius", "diameter") 90
def _ftp_pwd(self): """Variant of `self.ftp.pwd()` that supports encoding-fallback. Returns: Current working directory as native string. """ try: return self.ftp.pwd() except UnicodeEncodeError: if compat.PY2 or self.ftp.encoding != "utf-8": raise # should not happen, since Py2 does not try to encode # TODO: this is NOT THREAD-SAFE! prev_encoding = self.ftp.encoding try: write("ftp.pwd() failed with utf-8: trying Cp1252...", warning=True) return self.ftp.pwd() finally: self.ftp.encoding = prev_encoding
Variant of `self.ftp.pwd()` that supports encoding-fallback. Returns: Current working directory as native string.
def name(self): """ :return: A unicode string of the field name of the chosen alternative """ if not self._name: self._name = self._alternatives[self._choice][0] return self._name
:return: A unicode string of the field name of the chosen alternative
def dir2fn(ofn, ifn, suffix) -> Union[None, Path]: """ ofn = filename or output directory, to create filename based on ifn ifn = input filename (don't overwrite!) suffix = desired file extension e.g. .h5 """ if not ofn: # no output file desired return None ofn = Path(ofn).expanduser() ifn = Path(ifn).expanduser() assert ifn.is_file() if ofn.suffix == suffix: # must already be a filename pass else: # must be a directory assert ofn.is_dir(), f'create directory {ofn}' ofn = ofn / ifn.with_suffix(suffix).name try: assert not ofn.samefile(ifn), f'do not overwrite input file! {ifn}' except FileNotFoundError: # a good thing, the output file doesn't exist and hence it's not the input file pass return ofn
ofn = filename or output directory, to create filename based on ifn ifn = input filename (don't overwrite!) suffix = desired file extension e.g. .h5
def get_static(root=None): ''' .. versionadded:: 2015.8.5 Return a list of all static services root Enable/disable/mask unit files in the specified root directory CLI Example: .. code-block:: bash salt '*' service.get_static ''' ret = set() # Get static systemd units. Can't use --state=static here because it's # not present until systemd 216. out = __salt__['cmd.run']( _systemctl_cmd('--full --no-legend --no-pager list-unit-files', root=root), python_shell=False, ignore_retcode=True) for line in salt.utils.itertools.split(out, '\n'): try: fullname, unit_state = line.strip().split(None, 1) except ValueError: continue else: if unit_state != 'static': continue try: unit_name, unit_type = fullname.rsplit('.', 1) except ValueError: continue if unit_type in VALID_UNIT_TYPES: ret.add(unit_name if unit_type == 'service' else fullname) # sysvinit services cannot be static return sorted(ret)
.. versionadded:: 2015.8.5 Return a list of all static services root Enable/disable/mask unit files in the specified root directory CLI Example: .. code-block:: bash salt '*' service.get_static
def write_vtu(Verts, Cells, pdata=None, pvdata=None, cdata=None, cvdata=None, fname='output.vtk'): """Write a .vtu file in xml format. Parameters ---------- fname : {string} file to be written, e.g. 'mymesh.vtu' Verts : {array} Ndof x 3 (if 2, then expanded by 0) list of (x,y,z) point coordinates Cells : {dictionary} Dictionary of with the keys pdata : {array} Ndof x Nfields array of scalar values for the vertices pvdata : {array} Nfields*3 x Ndof array of vector values for the vertices cdata : {dictionary} scalar valued cell data cvdata : {dictionary} vector valued cell data Returns ------- writes a .vtu file for use in Paraview Notes ----- - Poly data not supported - Non-Poly data is stored in Numpy array: Ncell x vtk_cell_info - Each I1 must be >=3 - pdata = Ndof x Nfields - pvdata = 3*Ndof x Nfields - cdata,cvdata = list of dictionaries in the form of Cells ===== =================== ============= === keys type n points dim ===== =================== ============= === 1 VTK_VERTEX: 1 point 2d 2 VTK_POLY_VERTEX: n points 2d 3 VTK_LINE: 2 points 2d 4 VTK_POLY_LINE: n+1 points 2d 5 VTK_TRIANGLE: 3 points 2d 6 VTK_TRIANGLE_STRIP: n+2 points 2d 7 VTK_POLYGON: n points 2d 8 VTK_PIXEL: 4 points 2d 9 VTK_QUAD: 4 points 2d 10 VTK_TETRA: 4 points 3d 11 VTK_VOXEL: 8 points 3d 12 VTK_HEXAHEDRON: 8 points 3d 13 VTK_WEDGE: 6 points 3d 14 VTK_PYRAMID: 5 points 3d ===== =================== ============= === Examples -------- >>> from pyamg.vis import write_vtu >>> import numpy as np >>> Verts = np.array([[0.0,0.0], ... [1.0,0.0], ... [2.0,0.0], ... [0.0,1.0], ... [1.0,1.0], ... [2.0,1.0], ... [0.0,2.0], ... [1.0,2.0], ... [2.0,2.0], ... [0.0,3.0], ... [1.0,3.0], ... [2.0,3.0]]) >>> E2V = np.array([[0,4,3], ... [0,1,4], ... [1,5,4], ... [1,2,5], ... [3,7,6], ... [3,4,7], ... [4,8,7], ... [4,5,8], ... [6,10,9], ... [6,7,10], ... [7,11,10], ... [7,8,11]]) >>> E2edge = np.array([[0,1]]) >>> E2point = np.array([2,3,4,5]) >>> Cells = {5:E2V,3:E2edge,1:E2point} >>> pdata=np.ones((12,2)) >>> pvdata=np.ones((12*3,2)) >>> cdata={5:np.ones((12,2)),3:np.ones((1,2)),1:np.ones((4,2))} >>> cvdata={5:np.ones((3*12,2)),3:np.ones((3*1,2)), 1:np.ones((3*4,2))} >>> write_vtu(Verts=Verts, Cells=Cells, fname='test.vtu') See Also -------- write_mesh """ # number of indices per cell for each cell type vtk_cell_info = [-1, 1, None, 2, None, 3, None, None, 4, 4, 4, 8, 8, 6, 5] # check fname if isinstance(fname, str): try: fname = open(fname, 'w') except IOError as e: print(".vtu error (%s): %s" % (e.errno, e.strerror)) else: raise ValueError('fname is assumed to be a string') # check Verts # get dimension and verify that it's 3d data Ndof, dim = Verts.shape if dim == 2: # always use 3d coordinates (x,y) -> (x,y,0) Verts = np.hstack((Verts, np.zeros((Ndof, 1)))) # check Cells # keys must ve valid (integer and not "None" in vtk_cell_info) # Cell data can't be empty for a non empty key for key in Cells: if ((not isinstance(key, int)) or (key not in list(range(1, 15)))): raise ValueError('cell array must have positive integer keys\ in [1,14]') if (vtk_cell_info[key] is None) and (Cells[key] is not None): # Poly data raise NotImplementedError('Poly Data not implemented yet') if Cells[key] is None: raise ValueError('cell array cannot be empty for\ key %d' % (key)) if np.ndim(Cells[key]) != 2: Cells[key] = Cells[key].reshape((Cells[key].size, 1)) if vtk_cell_info[key] != Cells[key].shape[1]: raise ValueError('cell array has %d columns, expected %d' % (Cells[key].shape[1], vtk_cell_info[key])) # check pdata # must be Ndof x n_pdata n_pdata = 0 if pdata is not None: if np.ndim(pdata) > 1: n_pdata = pdata.shape[1] else: n_pdata = 1 pdata = pdata.reshape((pdata.size, 1)) if pdata.shape[0] != Ndof: raise ValueError('pdata array should be length %d (it is %d)' % (Ndof, pdata.shape[0])) # check pvdata # must be 3*Ndof x n_pvdata n_pvdata = 0 if pvdata is not None: if np.ndim(pvdata) > 1: n_pvdata = pvdata.shape[1] else: n_pvdata = 1 pvdata = pvdata.reshape((pvdata.size, 1)) if pvdata.shape[0] != 3*Ndof: raise ValueError('pvdata array should be of size %d (or multiples)\ (it is now %d)' % (Ndof*3, pvdata.shape[0])) # check cdata # must be NCells x n_cdata for each key n_cdata = 0 if cdata is not None: for key in Cells: # all valid now if np.ndim(cdata[key]) > 1: if n_cdata == 0: n_cdata = cdata[key].shape[1] elif n_cdata != cdata[key].shape[1]: raise ValueError('cdata dimension problem') else: n_cdata = 1 cdata[key] = cdata[key].reshape((cdata[key].size, 1)) if cdata[key].shape[0] != Cells[key].shape[0]: raise ValueError('size mismatch with cdata %d and Cells %d' % (cdata[key].shape[0], Cells[key].shape[0])) if cdata[key] is None: raise ValueError('cdata array cannot be empty for key %d' % (key)) # check cvdata # must be NCells*3 x n_cdata for each key n_cvdata = 0 if cvdata is not None: for key in Cells: # all valid now if np.ndim(cvdata[key]) > 1: if n_cvdata == 0: n_cvdata = cvdata[key].shape[1] elif n_cvdata != cvdata[key].shape[1]: raise ValueError('cvdata dimension problem') else: n_cvdata = 1 cvdata[key] = cvdata[key].reshape((cvdata[key].size, 1)) if cvdata[key].shape[0] != 3*Cells[key].shape[0]: raise ValueError('size mismatch with cvdata and Cells') if cvdata[key] is None: raise ValueError('cvdata array cannot be empty for key %d' % (key)) Ncells = 0 cell_ind = [] cell_offset = [] # np.zeros((Ncells,1),dtype=uint8) # zero indexed cell_type = [] # np.zeros((Ncells,1),dtype=uint8) cdata_all = None cvdata_all = None for key in Cells: # non-Poly data sz = Cells[key].shape[0] offset = Cells[key].shape[1] Ncells += sz uu = np.ones((sz,), dtype='uint8') cell_ind = np.hstack((cell_ind, Cells[key].ravel())) cell_offset = np.hstack((cell_offset, offset*uu)) cell_type = np.hstack((cell_type, key*uu)) if cdata is not None: if cdata_all is None: cdata_all = cdata[key] else: cdata_all = np.vstack((cdata_all, cdata[key])) if cvdata is not None: if cvdata_all is None: cvdata_all = cvdata[key] else: cvdata_all = np.vstack((cvdata_all, cvdata[key])) # doc element doc = xml.dom.minidom.Document() # vtk element root = doc.createElementNS('VTK', 'VTKFile') d = {'type': 'UnstructuredGrid', 'version': '0.1', 'byte_order': 'LittleEndian'} set_attributes(d, root) # unstructured element grid = doc.createElementNS('VTK', 'UnstructuredGrid') # piece element piece = doc.createElementNS('VTK', 'Piece') d = {'NumberOfPoints': str(Ndof), 'NumberOfCells': str(Ncells)} set_attributes(d, piece) # POINTS # points element points = doc.createElementNS('VTK', 'Points') # data element points_data = doc.createElementNS('VTK', 'DataArray') d = {'type': 'Float32', 'Name': 'vertices', 'NumberOfComponents': '3', 'format': 'ascii'} set_attributes(d, points_data) # string for data element points_data_str = doc.createTextNode(a2s(Verts)) # CELLS # points element cells = doc.createElementNS('VTK', 'Cells') # data element cells_data = doc.createElementNS('VTK', 'DataArray') d = {'type': 'Int32', 'Name': 'connectivity', 'format': 'ascii'} set_attributes(d, cells_data) # string for data element cells_data_str = doc.createTextNode(a2s(cell_ind)) # offset data element cells_offset_data = doc.createElementNS('VTK', 'DataArray') d = {'type': 'Int32', 'Name': 'offsets', 'format': 'ascii'} set_attributes(d, cells_offset_data) # string for data element cells_offset_data_str = doc.createTextNode(a2s(cell_offset.cumsum())) # offset data element cells_type_data = doc.createElementNS('VTK', 'DataArray') d = {'type': 'UInt8', 'Name': 'types', 'format': 'ascii'} set_attributes(d, cells_type_data) # string for data element cells_type_data_str = doc.createTextNode(a2s(cell_type)) # POINT DATA pointdata = doc.createElementNS('VTK', 'PointData') # pdata pdata_obj = [] pdata_str = [] for i in range(0, n_pdata): pdata_obj.append(doc.createElementNS('VTK', 'DataArray')) d = {'type': 'Float32', 'Name': 'pdata %d' % (i), 'NumberOfComponents': '1', 'format': 'ascii'} set_attributes(d, pdata_obj[i]) pdata_str.append(doc.createTextNode(a2s(pdata[:, i]))) # pvdata pvdata_obj = [] pvdata_str = [] for i in range(0, n_pvdata): pvdata_obj.append(doc.createElementNS('VTK', 'DataArray')) d = {'type': 'Float32', 'Name': 'pvdata %d' % (i), 'NumberOfComponents': '3', 'format': 'ascii'} set_attributes(d, pvdata_obj[i]) pvdata_str.append(doc.createTextNode(a2s(pvdata[:, i]))) # CELL DATA celldata = doc.createElementNS('VTK', 'CellData') # cdata cdata_obj = [] cdata_str = [] for i in range(0, n_cdata): cdata_obj.append(doc.createElementNS('VTK', 'DataArray')) d = {'type': 'Float32', 'Name': 'cdata %d' % (i), 'NumberOfComponents': '1', 'format': 'ascii'} set_attributes(d, cdata_obj[i]) cdata_str.append(doc.createTextNode(a2s(cdata_all[:, i]))) # cvdata cvdata_obj = [] cvdata_str = [] for i in range(0, n_cvdata): cvdata_obj.append(doc.createElementNS('VTK', 'DataArray')) d = {'type': 'Float32', 'Name': 'cvdata %d' % (i), 'NumberOfComponents': '3', 'format': 'ascii'} set_attributes(d, cvdata_obj[i]) cvdata_str.append(doc.createTextNode(a2s(cvdata_all[:, i]))) doc.appendChild(root) root.appendChild(grid) grid.appendChild(piece) piece.appendChild(points) points.appendChild(points_data) points_data.appendChild(points_data_str) piece.appendChild(cells) cells.appendChild(cells_data) cells.appendChild(cells_offset_data) cells.appendChild(cells_type_data) cells_data.appendChild(cells_data_str) cells_offset_data.appendChild(cells_offset_data_str) cells_type_data.appendChild(cells_type_data_str) piece.appendChild(pointdata) for i in range(0, n_pdata): pointdata.appendChild(pdata_obj[i]) pdata_obj[i].appendChild(pdata_str[i]) for i in range(0, n_pvdata): pointdata.appendChild(pvdata_obj[i]) pvdata_obj[i].appendChild(pvdata_str[i]) piece.appendChild(celldata) for i in range(0, n_cdata): celldata.appendChild(cdata_obj[i]) cdata_obj[i].appendChild(cdata_str[i]) for i in range(0, n_cvdata): celldata.appendChild(cvdata_obj[i]) cvdata_obj[i].appendChild(cvdata_str[i]) doc.writexml(fname, newl='\n') fname.close()
Write a .vtu file in xml format. Parameters ---------- fname : {string} file to be written, e.g. 'mymesh.vtu' Verts : {array} Ndof x 3 (if 2, then expanded by 0) list of (x,y,z) point coordinates Cells : {dictionary} Dictionary of with the keys pdata : {array} Ndof x Nfields array of scalar values for the vertices pvdata : {array} Nfields*3 x Ndof array of vector values for the vertices cdata : {dictionary} scalar valued cell data cvdata : {dictionary} vector valued cell data Returns ------- writes a .vtu file for use in Paraview Notes ----- - Poly data not supported - Non-Poly data is stored in Numpy array: Ncell x vtk_cell_info - Each I1 must be >=3 - pdata = Ndof x Nfields - pvdata = 3*Ndof x Nfields - cdata,cvdata = list of dictionaries in the form of Cells ===== =================== ============= === keys type n points dim ===== =================== ============= === 1 VTK_VERTEX: 1 point 2d 2 VTK_POLY_VERTEX: n points 2d 3 VTK_LINE: 2 points 2d 4 VTK_POLY_LINE: n+1 points 2d 5 VTK_TRIANGLE: 3 points 2d 6 VTK_TRIANGLE_STRIP: n+2 points 2d 7 VTK_POLYGON: n points 2d 8 VTK_PIXEL: 4 points 2d 9 VTK_QUAD: 4 points 2d 10 VTK_TETRA: 4 points 3d 11 VTK_VOXEL: 8 points 3d 12 VTK_HEXAHEDRON: 8 points 3d 13 VTK_WEDGE: 6 points 3d 14 VTK_PYRAMID: 5 points 3d ===== =================== ============= === Examples -------- >>> from pyamg.vis import write_vtu >>> import numpy as np >>> Verts = np.array([[0.0,0.0], ... [1.0,0.0], ... [2.0,0.0], ... [0.0,1.0], ... [1.0,1.0], ... [2.0,1.0], ... [0.0,2.0], ... [1.0,2.0], ... [2.0,2.0], ... [0.0,3.0], ... [1.0,3.0], ... [2.0,3.0]]) >>> E2V = np.array([[0,4,3], ... [0,1,4], ... [1,5,4], ... [1,2,5], ... [3,7,6], ... [3,4,7], ... [4,8,7], ... [4,5,8], ... [6,10,9], ... [6,7,10], ... [7,11,10], ... [7,8,11]]) >>> E2edge = np.array([[0,1]]) >>> E2point = np.array([2,3,4,5]) >>> Cells = {5:E2V,3:E2edge,1:E2point} >>> pdata=np.ones((12,2)) >>> pvdata=np.ones((12*3,2)) >>> cdata={5:np.ones((12,2)),3:np.ones((1,2)),1:np.ones((4,2))} >>> cvdata={5:np.ones((3*12,2)),3:np.ones((3*1,2)), 1:np.ones((3*4,2))} >>> write_vtu(Verts=Verts, Cells=Cells, fname='test.vtu') See Also -------- write_mesh
def get_object(self, url, month_format='%b', day_format='%d'): """ Parses the date from a url and uses it in the query. For objects which are unique for date. """ params = self.get_params(url) try: year = params[self._meta.year_part] month = params[self._meta.month_part] day = params[self._meta.day_part] except KeyError: try: # named lookups failed, so try to get the date using the first # three parameters year, month, day = params['_0'], params['_1'], params['_2'] except KeyError: raise OEmbedException('Error extracting date from url parameters') try: tt = time.strptime('%s-%s-%s' % (year, month, day), '%s-%s-%s' % ('%Y', month_format, day_format)) date = datetime.date(*tt[:3]) except ValueError: raise OEmbedException('Error parsing date from: %s' % url) # apply the date-specific lookups if isinstance(self._meta.model._meta.get_field(self._meta.date_field), DateTimeField): min_date = datetime.datetime.combine(date, datetime.time.min) max_date = datetime.datetime.combine(date, datetime.time.max) query = {'%s__range' % self._meta.date_field: (min_date, max_date)} else: query = {self._meta.date_field: date} # apply the regular search lookups for key, value in self._meta.fields_to_match.iteritems(): try: query[value] = params[key] except KeyError: raise OEmbedException('%s was not found in the urlpattern parameters. Valid names are: %s' % (key, ', '.join(params.keys()))) try: obj = self.get_queryset().get(**query) except self._meta.model.DoesNotExist: raise OEmbedException('Requested object not found') return obj
Parses the date from a url and uses it in the query. For objects which are unique for date.
def server_inspect_exception(self, req_event, rep_event, task_ctx, exc_info): """Called when an exception has been raised in the code run by ZeroRPC""" # Hide the zerorpc internal frames for readability, for a REQ/REP or # REQ/STREAM server the frames to hide are: # - core.ServerBase._async_task # - core.Pattern*.process_call # - core.DecoratorBase.__call__ # # For a PUSH/PULL or PUB/SUB server the frame to hide is: # - core.Puller._receiver if self._hide_zerorpc_frames: traceback = exc_info[2] while traceback: zerorpc_frame = traceback.tb_frame zerorpc_frame.f_locals["__traceback_hide__"] = True frame_info = inspect.getframeinfo(zerorpc_frame) # Is there a better way than this (or looking up the filenames # or hardcoding the number of frames to skip) to know when we # are out of zerorpc? if frame_info.function == "__call__" or frame_info.function == "_receiver": break traceback = traceback.tb_next self._elasticapm_client.capture_exception(exc_info, extra=task_ctx, handled=False)
Called when an exception has been raised in the code run by ZeroRPC
def close(self): """Close this change stream. Stops any "async for" loops using this change stream. """ if self.delegate: return self._close() # Never started. future = self._framework.get_future(self.get_io_loop()) future.set_result(None) return future
Close this change stream. Stops any "async for" loops using this change stream.
def get_location(dom, location): """ Get the node at the specified location in the dom. Location is a sequence of child indices, starting at the children of the root element. If there is no node at this location, raise a ValueError. """ node = dom.documentElement for i in location: node = get_child(node, i) if not node: raise ValueError('Node at location %s does not exist.' % location) #TODO: line not covered return node
Get the node at the specified location in the dom. Location is a sequence of child indices, starting at the children of the root element. If there is no node at this location, raise a ValueError.
def allele_reads_from_locus_reads(locus_reads, n_ref): """ Given a collection of LocusRead objects, returns a list of AlleleRead objects (which are split into prefix/allele/suffix nucleotide strings). Parameters ---------- locus_reads : sequence of LocusRead records n_ref : int Number of reference nucleotides affected by variant. Generates AlleleRead objects. """ for locus_read in locus_reads: allele_read = AlleleRead.from_locus_read(locus_read, n_ref) if allele_read is None: continue else: yield allele_read
Given a collection of LocusRead objects, returns a list of AlleleRead objects (which are split into prefix/allele/suffix nucleotide strings). Parameters ---------- locus_reads : sequence of LocusRead records n_ref : int Number of reference nucleotides affected by variant. Generates AlleleRead objects.
def _compensate_pressure(self, adc_p): """Compensate pressure. Formula from datasheet Bosch BME280 Environmental sensor. 8.1 Compensation formulas in double precision floating point Edition BST-BME280-DS001-10 | Revision 1.1 | May 2015. """ var_1 = (self._temp_fine / 2.0) - 64000.0 var_2 = ((var_1 / 4.0) * (var_1 / 4.0)) / 2048 var_2 *= self._calibration_p[5] var_2 += ((var_1 * self._calibration_p[4]) * 2.0) var_2 = (var_2 / 4.0) + (self._calibration_p[3] * 65536.0) var_1 = (((self._calibration_p[2] * (((var_1 / 4.0) * (var_1 / 4.0)) / 8192)) / 8) + ((self._calibration_p[1] * var_1) / 2.0)) var_1 /= 262144 var_1 = ((32768 + var_1) * self._calibration_p[0]) / 32768 if var_1 == 0: return 0 pressure = ((1048576 - adc_p) - (var_2 / 4096)) * 3125 if pressure < 0x80000000: pressure = (pressure * 2.0) / var_1 else: pressure = (pressure / var_1) * 2 var_1 = (self._calibration_p[8] * (((pressure / 8.0) * (pressure / 8.0)) / 8192.0)) / 4096 var_2 = ((pressure / 4.0) * self._calibration_p[7]) / 8192.0 pressure += ((var_1 + var_2 + self._calibration_p[6]) / 16.0) return pressure / 100
Compensate pressure. Formula from datasheet Bosch BME280 Environmental sensor. 8.1 Compensation formulas in double precision floating point Edition BST-BME280-DS001-10 | Revision 1.1 | May 2015.
def split_sentences(tokens): """Split sentences (based on tokenised data), returns sentences as a list of lists of tokens, each sentence is a list of tokens""" begin = 0 for i, token in enumerate(tokens): if is_end_of_sentence(tokens, i): yield tokens[begin:i+1] begin = i+1 if begin <= len(tokens)-1: yield tokens[begin:]
Split sentences (based on tokenised data), returns sentences as a list of lists of tokens, each sentence is a list of tokens
def editors(self): """ Returns the editors that are associated with this edit. :return [<XLineEdit>, ..] """ lay = self.layout() return [lay.itemAt(i).widget() for i in range(lay.count())]
Returns the editors that are associated with this edit. :return [<XLineEdit>, ..]
def action_spatial(self, action): """Given an Action, return the right spatial action.""" if self.surf.surf_type & SurfType.FEATURE: return action.action_feature_layer elif self.surf.surf_type & SurfType.RGB: return action.action_render else: assert self.surf.surf_type & (SurfType.RGB | SurfType.FEATURE)
Given an Action, return the right spatial action.
def connect(self): """Connects and subscribes to the WebSocket Feed.""" if not self.connected(): self._ws = create_connection(self.WS_URI) message = { 'type':self.WS_TYPE, 'product_id':self.WS_PRODUCT_ID } self._ws.send(dumps(message)) # There will be only one keep alive thread per client instance with self._lock: if not self._thread: thread = Thread(target=self._keep_alive_thread, args=[]) thread.start()
Connects and subscribes to the WebSocket Feed.
def add_value(self, value, index_point): """The function is addeing new value to provied index. If index does not exist""" if index_point not in self.index: self.values.append(value) self.index.append(index_point)
The function is addeing new value to provied index. If index does not exist
def ensure(assertion, message=None): """ Checks an assertion argument for truth-ness. Will return ``True`` or explicitly raise ``AssertionError``. This is to deal with environments using ``python -O` or ``PYTHONOPTIMIZE=``. :param assertion: some value to evaluate for truth-ness :param message: optional message used for raising AssertionError """ message = message or assertion if not assertion: raise AssertionError(message) return True
Checks an assertion argument for truth-ness. Will return ``True`` or explicitly raise ``AssertionError``. This is to deal with environments using ``python -O` or ``PYTHONOPTIMIZE=``. :param assertion: some value to evaluate for truth-ness :param message: optional message used for raising AssertionError
def bulk_overwrite(self, entities_and_kinds): """ Update the group to the given entities and sub-entity groups. After this operation, the only members of this EntityGroup will be the given entities, and sub-entity groups. :type entities_and_kinds: List of (Entity, EntityKind) pairs. :param entities_and_kinds: A list of entity, entity-kind pairs to set to the EntityGroup. In the pairs the entity-kind can be ``None``, to add a single entity, or some entity kind to add all sub-entities of that kind. """ EntityGroupMembership.objects.filter(entity_group=self).delete() return self.bulk_add_entities(entities_and_kinds)
Update the group to the given entities and sub-entity groups. After this operation, the only members of this EntityGroup will be the given entities, and sub-entity groups. :type entities_and_kinds: List of (Entity, EntityKind) pairs. :param entities_and_kinds: A list of entity, entity-kind pairs to set to the EntityGroup. In the pairs the entity-kind can be ``None``, to add a single entity, or some entity kind to add all sub-entities of that kind.
def setCodecPreferences(self, codecs): """ Override the default codec preferences. See :meth:`RTCRtpSender.getCapabilities` and :meth:`RTCRtpReceiver.getCapabilities` for the supported codecs. :param: codecs: A list of :class:`RTCRtpCodecCapability`, in decreasing order of preference. If empty, restores the default preferences. """ if not codecs: self._preferred_codecs = [] capabilities = get_capabilities(self.kind).codecs unique = [] for codec in reversed(codecs): if codec not in capabilities: raise ValueError('Codec is not in capabilities') if codec not in unique: unique.insert(0, codec) self._preferred_codecs = unique
Override the default codec preferences. See :meth:`RTCRtpSender.getCapabilities` and :meth:`RTCRtpReceiver.getCapabilities` for the supported codecs. :param: codecs: A list of :class:`RTCRtpCodecCapability`, in decreasing order of preference. If empty, restores the default preferences.
def compute_acf(cls, filename, start_index=None, end_index=None, per_walker=False, walkers=None, parameters=None, temps=None): """Computes the autocorrleation function of the model params in the given file. By default, parameter values are averaged over all walkers at each iteration. The ACF is then calculated over the averaged chain for each temperature. An ACF per-walker will be returned instead if ``per_walker=True``. Parameters ----------- filename : str Name of a samples file to compute ACFs for. start_index : {None, int} The start index to compute the acl from. If None, will try to use the number of burn-in iterations in the file; otherwise, will start at the first sample. end_index : {None, int} The end index to compute the acl to. If None, will go to the end of the current iteration. per_walker : optional, bool Return the ACF for each walker separately. Default is False. walkers : optional, int or array Calculate the ACF using only the given walkers. If None (the default) all walkers will be used. parameters : optional, str or array Calculate the ACF for only the given parameters. If None (the default) will calculate the ACF for all of the model params. temps : optional, (list of) int or 'all' The temperature index (or list of indices) to retrieve. If None (the default), the ACF will only be computed for the coldest (= 0) temperature chain. To compute an ACF for all temperates pass 'all', or a list of all of the temperatures. Returns ------- dict : Dictionary of arrays giving the ACFs for each parameter. If ``per-walker`` is True, the arrays will have shape ``ntemps x nwalkers x niterations``. Otherwise, the returned array will have shape ``ntemps x niterations``. """ acfs = {} with cls._io(filename, 'r') as fp: if parameters is None: parameters = fp.variable_params if isinstance(parameters, str) or isinstance(parameters, unicode): parameters = [parameters] if isinstance(temps, int): temps = [temps] elif temps == 'all': temps = numpy.arange(fp.ntemps) elif temps is None: temps = [0] for param in parameters: subacfs = [] for tk in temps: if per_walker: # just call myself with a single walker if walkers is None: walkers = numpy.arange(fp.nwalkers) arrays = [cls.compute_acfs(filename, start_index=start_index, end_index=end_index, per_walker=False, walkers=ii, parameters=param, temps=tk)[param][0, :] for ii in walkers] # we'll stack all of the walker arrays to make a single # nwalkers x niterations array; when these are stacked # below, we'll get a ntemps x nwalkers x niterations # array subacfs.append(numpy.vstack(arrays)) else: samples = fp.read_raw_samples( param, thin_start=start_index, thin_interval=1, thin_end=end_index, walkers=walkers, temps=tk, flatten=False)[param] # contract the walker dimension using the mean, and # flatten the (length 1) temp dimension samples = samples.mean(axis=1)[0, :] thisacf = autocorrelation.calculate_acf( samples).numpy() subacfs.append(thisacf) # stack the temperatures acfs[param] = numpy.stack(subacfs) return acfs
Computes the autocorrleation function of the model params in the given file. By default, parameter values are averaged over all walkers at each iteration. The ACF is then calculated over the averaged chain for each temperature. An ACF per-walker will be returned instead if ``per_walker=True``. Parameters ----------- filename : str Name of a samples file to compute ACFs for. start_index : {None, int} The start index to compute the acl from. If None, will try to use the number of burn-in iterations in the file; otherwise, will start at the first sample. end_index : {None, int} The end index to compute the acl to. If None, will go to the end of the current iteration. per_walker : optional, bool Return the ACF for each walker separately. Default is False. walkers : optional, int or array Calculate the ACF using only the given walkers. If None (the default) all walkers will be used. parameters : optional, str or array Calculate the ACF for only the given parameters. If None (the default) will calculate the ACF for all of the model params. temps : optional, (list of) int or 'all' The temperature index (or list of indices) to retrieve. If None (the default), the ACF will only be computed for the coldest (= 0) temperature chain. To compute an ACF for all temperates pass 'all', or a list of all of the temperatures. Returns ------- dict : Dictionary of arrays giving the ACFs for each parameter. If ``per-walker`` is True, the arrays will have shape ``ntemps x nwalkers x niterations``. Otherwise, the returned array will have shape ``ntemps x niterations``.
def _perturbation(self): """ Internal function for parameter initialization Returns Gaussian perturbation """ if self.P>1: scales = [] for term_i in range(self.n_randEffs): _scales = sp.randn(self.diag[term_i].shape[0]) if self.jitter[term_i]>0: _scales = sp.concatenate((_scales,sp.zeros(1))) scales.append(_scales) scales = sp.concatenate(scales) else: scales = sp.randn(self.vd.getNumberScales()) return scales
Internal function for parameter initialization Returns Gaussian perturbation
def _get_webapi_requests(self): """Update headers of webapi for Requests.""" headers = { 'Accept': '*/*', 'Accept-Language': 'zh-CN,zh;q=0.8,gl;q=0.6,zh-TW;q=0.4', 'Connection': 'keep-alive', 'Content-Type': 'application/x-www-form-urlencoded', 'Referer': 'http://music.163.com', 'Host': 'music.163.com', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36' } NCloudBot.req.headers.update(headers) return NCloudBot.req
Update headers of webapi for Requests.
def sim( self, src, tar, qval=1, mode='winkler', long_strings=False, boost_threshold=0.7, scaling_factor=0.1, ): """Return the Jaro or Jaro-Winkler similarity of two strings. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison qval : int The length of each q-gram (defaults to 1: character-wise matching) mode : str Indicates which variant of this distance metric to compute: - ``winkler`` -- computes the Jaro-Winkler distance (default) which increases the score for matches near the start of the word - ``jaro`` -- computes the Jaro distance long_strings : bool Set to True to "Increase the probability of a match when the number of matched characters is large. This option allows for a little more tolerance when the strings are large. It is not an appropriate test when comparing fixed length fields such as phone and social security numbers." (Used in 'winkler' mode only.) boost_threshold : float A value between 0 and 1, below which the Winkler boost is not applied (defaults to 0.7). (Used in 'winkler' mode only.) scaling_factor : float A value between 0 and 0.25, indicating by how much to boost scores for matching prefixes (defaults to 0.1). (Used in 'winkler' mode only.) Returns ------- float Jaro or Jaro-Winkler similarity Raises ------ ValueError Unsupported boost_threshold assignment; boost_threshold must be between 0 and 1. ValueError Unsupported scaling_factor assignment; scaling_factor must be between 0 and 0.25.' Examples -------- >>> round(sim_jaro_winkler('cat', 'hat'), 12) 0.777777777778 >>> round(sim_jaro_winkler('Niall', 'Neil'), 12) 0.805 >>> round(sim_jaro_winkler('aluminum', 'Catalan'), 12) 0.60119047619 >>> round(sim_jaro_winkler('ATCG', 'TAGC'), 12) 0.833333333333 >>> round(sim_jaro_winkler('cat', 'hat', mode='jaro'), 12) 0.777777777778 >>> round(sim_jaro_winkler('Niall', 'Neil', mode='jaro'), 12) 0.783333333333 >>> round(sim_jaro_winkler('aluminum', 'Catalan', mode='jaro'), 12) 0.60119047619 >>> round(sim_jaro_winkler('ATCG', 'TAGC', mode='jaro'), 12) 0.833333333333 """ if mode == 'winkler': if boost_threshold > 1 or boost_threshold < 0: raise ValueError( 'Unsupported boost_threshold assignment; ' + 'boost_threshold must be between 0 and 1.' ) if scaling_factor > 0.25 or scaling_factor < 0: raise ValueError( 'Unsupported scaling_factor assignment; ' + 'scaling_factor must be between 0 and 0.25.' ) if src == tar: return 1.0 src = QGrams(src.strip(), qval)._ordered_list tar = QGrams(tar.strip(), qval)._ordered_list lens = len(src) lent = len(tar) # If either string is blank - return - added in Version 2 if lens == 0 or lent == 0: return 0.0 if lens > lent: search_range = lens minv = lent else: search_range = lent minv = lens # Zero out the flags src_flag = [0] * search_range tar_flag = [0] * search_range search_range = max(0, search_range // 2 - 1) # Looking only within the search range, # count and flag the matched pairs. num_com = 0 yl1 = lent - 1 for i in range(lens): low_lim = (i - search_range) if (i >= search_range) else 0 hi_lim = (i + search_range) if ((i + search_range) <= yl1) else yl1 for j in range(low_lim, hi_lim + 1): if (tar_flag[j] == 0) and (tar[j] == src[i]): tar_flag[j] = 1 src_flag[i] = 1 num_com += 1 break # If no characters in common - return if num_com == 0: return 0.0 # Count the number of transpositions k = n_trans = 0 for i in range(lens): if src_flag[i] != 0: j = 0 for j in range(k, lent): # pragma: no branch if tar_flag[j] != 0: k = j + 1 break if src[i] != tar[j]: n_trans += 1 n_trans //= 2 # Main weight computation for Jaro distance weight = ( num_com / lens + num_com / lent + (num_com - n_trans) / num_com ) weight /= 3.0 # Continue to boost the weight if the strings are similar # This is the Winkler portion of Jaro-Winkler distance if mode == 'winkler' and weight > boost_threshold: # Adjust for having up to the first 4 characters in common j = 4 if (minv >= 4) else minv i = 0 while (i < j) and (src[i] == tar[i]): i += 1 weight += i * scaling_factor * (1.0 - weight) # Optionally adjust for long strings. # After agreeing beginning chars, at least two more must agree and # the agreeing characters must be > .5 of remaining characters. if ( long_strings and (minv > 4) and (num_com > i + 1) and (2 * num_com >= minv + i) ): weight += (1.0 - weight) * ( (num_com - i - 1) / (lens + lent - i * 2 + 2) ) return weight
Return the Jaro or Jaro-Winkler similarity of two strings. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison qval : int The length of each q-gram (defaults to 1: character-wise matching) mode : str Indicates which variant of this distance metric to compute: - ``winkler`` -- computes the Jaro-Winkler distance (default) which increases the score for matches near the start of the word - ``jaro`` -- computes the Jaro distance long_strings : bool Set to True to "Increase the probability of a match when the number of matched characters is large. This option allows for a little more tolerance when the strings are large. It is not an appropriate test when comparing fixed length fields such as phone and social security numbers." (Used in 'winkler' mode only.) boost_threshold : float A value between 0 and 1, below which the Winkler boost is not applied (defaults to 0.7). (Used in 'winkler' mode only.) scaling_factor : float A value between 0 and 0.25, indicating by how much to boost scores for matching prefixes (defaults to 0.1). (Used in 'winkler' mode only.) Returns ------- float Jaro or Jaro-Winkler similarity Raises ------ ValueError Unsupported boost_threshold assignment; boost_threshold must be between 0 and 1. ValueError Unsupported scaling_factor assignment; scaling_factor must be between 0 and 0.25.' Examples -------- >>> round(sim_jaro_winkler('cat', 'hat'), 12) 0.777777777778 >>> round(sim_jaro_winkler('Niall', 'Neil'), 12) 0.805 >>> round(sim_jaro_winkler('aluminum', 'Catalan'), 12) 0.60119047619 >>> round(sim_jaro_winkler('ATCG', 'TAGC'), 12) 0.833333333333 >>> round(sim_jaro_winkler('cat', 'hat', mode='jaro'), 12) 0.777777777778 >>> round(sim_jaro_winkler('Niall', 'Neil', mode='jaro'), 12) 0.783333333333 >>> round(sim_jaro_winkler('aluminum', 'Catalan', mode='jaro'), 12) 0.60119047619 >>> round(sim_jaro_winkler('ATCG', 'TAGC', mode='jaro'), 12) 0.833333333333
def make_filter(self, fieldname, query_func, expct_value): ''' makes a filter that will be appliead to an object's property based on query_func ''' def actual_filter(item): value = getattr(item, fieldname) if query_func in NULL_AFFECTED_FILTERS and value is None: return False if query_func == 'eq': return value == expct_value elif query_func == 'ne': return value != expct_value elif query_func == 'lt': return value < expct_value elif query_func == 'lte': return value <= expct_value elif query_func == 'gt': return value > expct_value elif query_func == 'gte': return value >= expct_value elif query_func == 'startswith': return value.startswith(expct_value) elif query_func == 'endswith': return value.endswith(expct_value) actual_filter.__doc__ = '{} {} {}'.format('val', query_func, expct_value) return actual_filter
makes a filter that will be appliead to an object's property based on query_func
def translate(s, table, deletions=""): """translate(s,table [,deletions]) -> string Return a copy of the string s, where all characters occurring in the optional argument deletions are removed, and the remaining characters have been mapped through the given translation table, which must be a string of length 256. The deletions argument is not allowed for Unicode strings. """ if deletions or table is None: return s.translate(table, deletions) else: # Add s[:0] so that if s is Unicode and table is an 8-bit string, # table is converted to Unicode. This means that table *cannot* # be a dictionary -- for that feature, use u.translate() directly. return s.translate(table + s[:0])
translate(s,table [,deletions]) -> string Return a copy of the string s, where all characters occurring in the optional argument deletions are removed, and the remaining characters have been mapped through the given translation table, which must be a string of length 256. The deletions argument is not allowed for Unicode strings.
def get(aadb: str): """ Retrieves a value from config """ if (aadb): cfg = Config() value = cfg.get(ConfigKeys.asset_allocation_database_path) click.echo(value) if not aadb: click.echo("Use --help for more information.")
Retrieves a value from config
def tpu_estimator_model_fn(model_type, transformer_model, model_dir, use_tpu, mesh_shape, layout_rules, batch_size, sequence_length, autostack, metric_names): """Create a TPUEstimator model function. Args: model_type: a string transformer_model: a transformer.Unitransformer or transformer.Bitransformer model_dir: a string use_tpu: a boolean mesh_shape: a mtf.Shape layout_rules: a mtf.LayoutRules batch_size: an integer sequence_length: an integer autostack: a boolean metric_names: list of strings giving the metric names. If None, then computes padded_neg_log_perplexity Returns: a function to be passed to TPUEstimator """ def my_model_fn(features, labels, mode, params=None, config=None): """Estimator model function. Args: features: input features dictionary labels: ignored mode: a tf.estimator.ModeKeys params: something config: something Returns: something """ del labels, config global_step = tf.train.get_global_step() if use_tpu: ctx = params["context"] num_hosts = ctx.num_hosts host_placement_fn = ctx.tpu_host_placement_function device_list = [host_placement_fn(host_id=t) for t in range(num_hosts)] # TODO(ylc): Better estimation of replica cache size? replica_cache_size = 300 * 1000000 # 300M per replica # Worker 0 caches all the TPU binaries. worker0_mem = replica_cache_size * ctx.num_replicas devices_memeory_usage = [worker0_mem] + [0] * (num_hosts - 1) var_placer = mtf.utils.BalancedVariablePlacer(device_list, devices_memeory_usage) mesh_devices = [""] * mesh_shape.size mesh_impl = mtf.simd_mesh_impl.SimdMeshImpl( mesh_shape, layout_rules, mesh_devices, ctx.device_assignment) else: var_placer = None mesh_devices = [""] * mesh_shape.size mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl( mesh_shape, layout_rules, mesh_devices) graph = mtf.Graph() mesh = mtf.Mesh(graph, "my_mesh", var_placer) def _import_feature(key, allow_missing=False): """Import a feature from the features dictionary into a mtf.Tensor. Args: key: a string allow_missing: a boolean Returns: a mtf.Tensor with dtype int32 and shape [batch_dim, length_dim] """ batch_dim = mtf.Dimension("batch", batch_size) length_dim = mtf.Dimension("length", sequence_length) mtf_shape = mtf.Shape([batch_dim, length_dim]) if key not in features: if allow_missing: return None else: raise ValueError( "feature not found %s - features %s = " % (key, features)) tf.logging.info("Import feature %s: %s" % (key, features[key])) x = tf.to_int32(features[key]) if not use_tpu: x = tf.Print( x, [x], "import feature %s" % key, summarize=1000, first_n=1) return mtf.import_fully_replicated(mesh, x, mtf_shape, name=key) if mode == tf.estimator.ModeKeys.PREDICT: inputs = _import_feature("inputs") if isinstance(transformer_model, transformer.Unitransformer): mtf_samples = transformer_model.sample_autoregressive( inputs, variable_dtype=get_variable_dtype()) elif isinstance(transformer_model, transformer.Bitransformer): mtf_samples = transformer_model.decode( inputs, variable_dtype=get_variable_dtype()) else: raise ValueError("unrecognized class") mtf_samples = mtf.anonymize(mtf_samples) lowering = mtf.Lowering(graph, {mesh: mesh_impl}, autostack=autostack) outputs = lowering.export_to_tf_tensor(mtf_samples) predictions = {"outputs": outputs} return tpu_estimator.TPUEstimatorSpec( mode=tf.estimator.ModeKeys.PREDICT, predictions=predictions, prediction_hooks=[mtf.MtfRestoreHook(lowering)]) targets = _import_feature("targets") anon_targets = mtf.anonymize(targets) if model_type == "lm": _, length_dim = targets.shape inputs = mtf.shift(targets, offset=1, dim=length_dim, wrap=False) else: inputs = _import_feature("inputs") if mode == tf.estimator.ModeKeys.EVAL: if isinstance(transformer_model, transformer.Unitransformer): mtf_samples = transformer_model.sample_autoregressive( inputs, variable_dtype=get_variable_dtype()) elif isinstance(transformer_model, transformer.Bitransformer): mtf_samples = transformer_model.decode( inputs, variable_dtype=get_variable_dtype()) else: raise ValueError("unrecognized class") mtf_samples = mtf.anonymize(mtf_samples) lowering = mtf.Lowering(graph, {mesh: mesh_impl}, autostack=autostack) outputs = lowering.export_to_tf_tensor(mtf_samples) labels = lowering.export_to_tf_tensor(anon_targets) restore_hook = mtf.MtfRestoreHook(lowering) # metric_names becomes locally scoped if we simply assign # ["padded_neg_log_perplexity"] to it conditioned on if it's None. local_metric_names = metric_names or ["token_accuracy"] def metric_fn(labels, outputs): return metric_utils.get_metric_fns( local_metric_names, labels, outputs ) eval_metrics = (metric_fn, [labels, outputs]) return tpu_estimator.TPUEstimatorSpec( tf.estimator.ModeKeys.EVAL, # Unfortunately TPUEstimatorSpec requires us to provide a value for # loss when in EVAL mode. Since we are sampling or decoding from the # model, we don't have a loss to report. loss=tf.constant(0.), evaluation_hooks=[restore_hook], eval_metrics=eval_metrics) if isinstance(transformer_model, transformer.Unitransformer): position_kwargs = dict( sequence_id=_import_feature("targets_segmentation", True), position=_import_feature("targets_position", True), ) elif isinstance(transformer_model, transformer.Bitransformer): position_kwargs = dict( encoder_sequence_id=_import_feature("inputs_segmentation", True), decoder_sequence_id=_import_feature("targets_segmentation", True), encoder_position=_import_feature("inputs_position", True), decoder_position=_import_feature("targets_position", True), ) else: raise ValueError("unrecognized class") logits, loss = transformer_model.call_simple( inputs=inputs, targets=targets, compute_loss=True, mode=mode, variable_dtype=get_variable_dtype(), **position_kwargs) if use_tpu and logits is not None: logits = mtf.anonymize(logits) # TRAIN mode if mode == tf.estimator.ModeKeys.TRAIN: var_grads = mtf.gradients( [loss], [v.outputs[0] for v in graph.trainable_variables]) optimizer = mtf.optimize.AdafactorOptimizer() update_ops = optimizer.apply_grads(var_grads, graph.trainable_variables) lowering = mtf.Lowering(graph, {mesh: mesh_impl}, autostack=autostack) tf_loss = lowering.export_to_tf_tensor(loss) tf_loss = tf.to_float(tf_loss) if not use_tpu: tf_loss = tf.Print(tf_loss, [tf_loss, tf.train.get_global_step()], "step, tf_loss") if mode == tf.estimator.ModeKeys.TRAIN: tf_update_ops = [lowering.lowered_operation(op) for op in update_ops] tf_update_ops.append(tf.assign_add(global_step, 1)) train_op = tf.group(tf_update_ops) with mtf.utils.outside_all_rewrites(): # Copy master variables to slices. Must be called first. restore_hook = mtf.MtfRestoreHook(lowering) saver = tf.train.Saver( tf.global_variables(), sharded=True, max_to_keep=10, keep_checkpoint_every_n_hours=2, defer_build=False, save_relative_paths=True) tf.add_to_collection(tf.GraphKeys.SAVERS, saver) saver_listener = mtf.MtfCheckpointSaverListener(lowering) saver_hook = tf.train.CheckpointSaverHook( model_dir, save_steps=1000, saver=saver, listeners=[saver_listener]) gin_config_saver_hook = gin.tf.GinConfigSaverHook( model_dir, summarize_config=True) if mode == tf.estimator.ModeKeys.TRAIN: if use_tpu: return tpu_estimator.TPUEstimatorSpec( mode=tf.estimator.ModeKeys.TRAIN, loss=tf_loss, train_op=train_op, training_hooks=[ restore_hook, saver_hook, gin_config_saver_hook, ]) else: return tf.estimator.EstimatorSpec( tf.estimator.ModeKeys.TRAIN, loss=tf_loss, train_op=train_op, training_chief_hooks=[ restore_hook, saver_hook, gin_config_saver_hook, ]) return my_model_fn
Create a TPUEstimator model function. Args: model_type: a string transformer_model: a transformer.Unitransformer or transformer.Bitransformer model_dir: a string use_tpu: a boolean mesh_shape: a mtf.Shape layout_rules: a mtf.LayoutRules batch_size: an integer sequence_length: an integer autostack: a boolean metric_names: list of strings giving the metric names. If None, then computes padded_neg_log_perplexity Returns: a function to be passed to TPUEstimator
def _get_logical_raid_levels(self): """Gets the different raid levels configured on a server. :returns a dictionary of logical_raid_levels set to true. Example if raid level 1+0 and 6 are configured, it returns {'logical_raid_level_10': 'true', 'logical_raid_level_6': 'true'} """ logical_drive_details = self._get_logical_drive_resource() raid_level = {} if logical_drive_details: for item in logical_drive_details: if 'Raid' in item: raid_level_var = "logical_raid_level_" + item['Raid'] raid_level.update({raid_level_var: 'true'}) return raid_level if len(raid_level.keys()) > 0 else None
Gets the different raid levels configured on a server. :returns a dictionary of logical_raid_levels set to true. Example if raid level 1+0 and 6 are configured, it returns {'logical_raid_level_10': 'true', 'logical_raid_level_6': 'true'}
def clickmap(parser, token): """ Clickmap tracker template tag. Renders Javascript code to track page visits. You must supply your clickmap tracker ID (as a string) in the ``CLICKMAP_TRACKER_ID`` setting. """ bits = token.split_contents() if len(bits) > 1: raise TemplateSyntaxError("'%s' takes no arguments" % bits[0]) return ClickmapNode()
Clickmap tracker template tag. Renders Javascript code to track page visits. You must supply your clickmap tracker ID (as a string) in the ``CLICKMAP_TRACKER_ID`` setting.
def _send(self): """ Send data to statsd. Fire and forget. Cross fingers and it'll arrive. """ if not statsd: return for metric in self.metrics: # Split the path into a prefix and a name # to work with the statsd module's view of the world. # It will get re-joined by the python-statsd module. # # For the statsd module, you specify prefix in the constructor # so we just use the full metric path. (prefix, name) = metric.path.rsplit(".", 1) logging.debug("Sending %s %s|g", name, metric.value) if metric.metric_type == 'GAUGE': if hasattr(statsd, 'StatsClient'): self.connection.gauge(metric.path, metric.value) else: statsd.Gauge(prefix, self.connection).send( name, metric.value) else: # To send a counter, we need to just send the delta # but without any time delta changes value = metric.raw_value if metric.path in self.old_values: value = value - self.old_values[metric.path] self.old_values[metric.path] = metric.raw_value if hasattr(statsd, 'StatsClient'): self.connection.incr(metric.path, value) else: statsd.Counter(prefix, self.connection).increment( name, value) if hasattr(statsd, 'StatsClient'): self.connection.send() self.metrics = []
Send data to statsd. Fire and forget. Cross fingers and it'll arrive.
def jit_load(self): """ Import and instantiate this JIT object Returns ------- """ try: model = importlib.import_module('.' + self.model, 'andes.models') device = getattr(model, self.device) self.system.__dict__[self.name] = device(self.system, self.name) g = self.system.__dict__[self.name]._group self.system.group_add(g) self.system.__dict__[g].register_model(self.name) # register device after loading self.system.devman.register_device(self.name) self.loaded = 1 logger.debug('Imported model <{:s}.{:s}>.'.format( self.model, self.device)) except ImportError: logger.error( 'non-JIT model <{:s}.{:s}> import error' .format(self.model, self.device)) except AttributeError: logger.error( 'model <{:s}.{:s}> not exist. Check models/__init__.py' .format(self.model, self.device))
Import and instantiate this JIT object Returns -------
def get_attrs(self): """Get the global attributes from underlying data set.""" return FrozenOrderedDict((a, getattr(self.ds, a)) for a in self.ds.ncattrs())
Get the global attributes from underlying data set.
def delete_all(config=None): """ Deletes all hosts from ssh config. """ storm_ = get_storm_instance(config) try: storm_.delete_all_entries() print(get_formatted_message('all entries deleted.', 'success')) except Exception as error: print(get_formatted_message(str(error), 'error'), file=sys.stderr) sys.exit(1)
Deletes all hosts from ssh config.
def open_db(db, zipped=None, encoding=None, fieldnames_lower=True, case_sensitive=True): """Context manager. Allows reading DBF file (maybe even from zip). :param str|unicode|file db: .dbf file name or a file-like object. :param str|unicode zipped: .zip file path or a file-like object. :param str|unicode encoding: Encoding used by DB. This will be used if there's no encoding information in the DB itself. :param bool fieldnames_lower: Lowercase field names. :param bool case_sensitive: Whether DB filename is case sensitive. :rtype: Dbf """ kwargs = dict( encoding=encoding, fieldnames_lower=fieldnames_lower, case_sensitive=case_sensitive, ) if zipped: with Dbf.open_zip(db, zipped, **kwargs) as dbf: yield dbf else: with Dbf.open(db, **kwargs) as dbf: yield dbf
Context manager. Allows reading DBF file (maybe even from zip). :param str|unicode|file db: .dbf file name or a file-like object. :param str|unicode zipped: .zip file path or a file-like object. :param str|unicode encoding: Encoding used by DB. This will be used if there's no encoding information in the DB itself. :param bool fieldnames_lower: Lowercase field names. :param bool case_sensitive: Whether DB filename is case sensitive. :rtype: Dbf
def distributions(self, _args): """Lists all distributions currently available (i.e. that have already been built).""" ctx = self.ctx dists = Distribution.get_distributions(ctx) if dists: print('{Style.BRIGHT}Distributions currently installed are:' '{Style.RESET_ALL}'.format(Style=Out_Style, Fore=Out_Fore)) pretty_log_dists(dists, print) else: print('{Style.BRIGHT}There are no dists currently built.' '{Style.RESET_ALL}'.format(Style=Out_Style))
Lists all distributions currently available (i.e. that have already been built).
def append_manage_data_op(self, data_name, data_value, source=None): """Append a :class:`ManageData <stellar_base.operation.ManageData>` operation to the list of operations. :param str data_name: String up to 64 bytes long. If this is a new Name it will add the given name/value pair to the account. If this Name is already present then the associated value will be modified. :param data_value: If not present then the existing Name will be deleted. If present then this value will be set in the DataEntry. Up to 64 bytes long. :type data_value: str, bytes, None :param str source: The source account on which data is being managed. operation. :return: This builder instance. """ op = operation.ManageData(data_name, data_value, source) return self.append_op(op)
Append a :class:`ManageData <stellar_base.operation.ManageData>` operation to the list of operations. :param str data_name: String up to 64 bytes long. If this is a new Name it will add the given name/value pair to the account. If this Name is already present then the associated value will be modified. :param data_value: If not present then the existing Name will be deleted. If present then this value will be set in the DataEntry. Up to 64 bytes long. :type data_value: str, bytes, None :param str source: The source account on which data is being managed. operation. :return: This builder instance.
def dumps(self, script): "Return a compressed representation of script as a binary string." string = BytesIO() self._dump(script, string, self._protocol, self._version) return string.getvalue()
Return a compressed representation of script as a binary string.
def set_entries(self, entries: List[Tuple[str, str]], titles, resources): """ Provide the template the data for the toc entries """ self.entries = [] for flag, pagename in entries: title = titles[pagename].children[0] resource = resources.get(pagename, None) if resource and hasattr(resource, 'is_published') and not \ resource.is_published: continue # Even if there is no resource for this tocentry, we can # use the toctree info self.entries.append(dict( title=title, href=pagename, resource=resource )) self.result_count = len(self.entries)
Provide the template the data for the toc entries
def get_support_variables(polynomial): """Gets the support of a polynomial. """ support = [] if is_number_type(polynomial): return support for monomial in polynomial.expand().as_coefficients_dict(): mon, _ = __separate_scalar_factor(monomial) symbolic_support = flatten(split_commutative_parts(mon)) for s in symbolic_support: if isinstance(s, Pow): base = s.base if is_adjoint(base): base = base.adjoint() support.append(base) elif is_adjoint(s): support.append(s.adjoint()) elif isinstance(s, Operator): support.append(s) return support
Gets the support of a polynomial.
def create_ver_browser(self, layout): """Create a version browser and insert it into the given layout :param layout: the layout to insert the browser into :type layout: QLayout :returns: the created browser :rtype: :class:`jukeboxcore.gui.widgets.browser.ComboBoxBrowser` :raises: None """ brws = ComboBoxBrowser(1, headers=['Version:']) layout.insertWidget(1, brws) return brws
Create a version browser and insert it into the given layout :param layout: the layout to insert the browser into :type layout: QLayout :returns: the created browser :rtype: :class:`jukeboxcore.gui.widgets.browser.ComboBoxBrowser` :raises: None
def integers(start, count): '''Generates in sequence the integral numbers within a range. Note: This method uses deferred execution. Args: start: The first integer in the sequence. count: The number of sequential integers to generate. Returns: A Queryable over the specified range of integers. Raises: ValueError: If count is negative. ''' if count < 0: raise ValueError("integers() count cannot be negative") return query(irange(start, start + count))
Generates in sequence the integral numbers within a range. Note: This method uses deferred execution. Args: start: The first integer in the sequence. count: The number of sequential integers to generate. Returns: A Queryable over the specified range of integers. Raises: ValueError: If count is negative.
def get_published(self, layer_id, expand=[]): """ Get the latest published version of this layer. :raises NotFound: if there is no published version. """ target_url = self.client.get_url('VERSION', 'GET', 'published', {'layer_id': layer_id}) return self._get(target_url, expand=expand)
Get the latest published version of this layer. :raises NotFound: if there is no published version.
def get_extra_functions(self) -> Dict[str, Callable]: """Get a list of additional features Returns: Dict[str, Callable]: A dict of methods marked as additional features. Method can be called with ``get_extra_functions()["methodName"]()``. """ if self.channel_type == ChannelType.Master: raise NameError("get_extra_function is not available on master channels.") methods = {} for mName in dir(self): m = getattr(self, mName) if callable(m) and getattr(m, "extra_fn", False): methods[mName] = m return methods
Get a list of additional features Returns: Dict[str, Callable]: A dict of methods marked as additional features. Method can be called with ``get_extra_functions()["methodName"]()``.
def is_de_listed(self): """ 判断合约是否过期 """ env = Environment.get_instance() instrument = env.get_instrument(self._order_book_id) current_date = env.trading_dt if instrument.de_listed_date is not None: if instrument.de_listed_date.date() > env.config.base.end_date: return False if current_date >= env.data_proxy.get_previous_trading_date(instrument.de_listed_date): return True return False
判断合约是否过期
def _write_cache(self, lines, append=False): """Write virtualenv metadata to cache.""" mode = 'at' if append else 'wt' with open(self.filepath, mode, encoding='utf8') as fh: fh.writelines(line + '\n' for line in lines)
Write virtualenv metadata to cache.
def fit(self, train_x, train_y): """ Fit the regressor with more data. Args: train_x: A list of NetworkDescriptor. train_y: A list of metric values. """ if self.first_fitted: self.incremental_fit(train_x, train_y) else: self.first_fit(train_x, train_y)
Fit the regressor with more data. Args: train_x: A list of NetworkDescriptor. train_y: A list of metric values.
def _process_results(self, raw_results, *args, **kwargs): """ Naively translate between the 'aggregations' search result data structure returned by ElasticSearch 2+ in response to 'aggs' queries into a structure with 'facets'-like content that Haystack (2.6.1) can understand and process, then pass it on to Haystack's default result processing code. WARNING: Only 'terms' facet types are currently supported. An example result: { 'hits': <BLAH> 'aggregations': { 'type_exact': { 'doc_count_error_upper_bound': 0, 'sum_other_doc_count': 0, 'buckets': [ {'key': 'artwork', 'doc_count': 14145}, {'key': 'artist', 'doc_count': 3360}, {'key': 'event', 'doc_count': 2606}, {'key': 'exhibition', 'doc_count': 416}, {'key': 'essay', 'doc_count': 20}, {'key': 'publication', 'doc_count': 1} ] } } } Will be translated to look like: { 'hits': <BLAH> 'facets': { 'type_exact': { '_type': 'terms', 'terms': [ {'term': 'artwork', 'count': 14145}, {'term': 'artist', 'count': 3360}, {'term': 'event', 'count': 2606}, {'term': 'exhibition', 'count': 416}, {'term': 'essay', 'count': 20}, {'term': 'publication', 'count': 1} ] } } } NOTE: We don't bother cleaning up the data quite this much really, we just translate and duplicate item names and leave the old ones in place for a time when Haystack may support the real returned results. """ if 'aggregations' in raw_results: for agg_fieldname, agg_info in raw_results['aggregations'].items(): agg_info['_type'] = 'terms' for bucket_item in agg_info['buckets']: if 'doc_count' in bucket_item: bucket_item['term'] = bucket_item['key'] bucket_item['count'] = bucket_item['doc_count'] agg_info['terms'] = agg_info['buckets'] raw_results['facets'] = raw_results['aggregations'] return super(ICEkitConfigurableElasticBackend, self) \ ._process_results(raw_results, *args, **kwargs)
Naively translate between the 'aggregations' search result data structure returned by ElasticSearch 2+ in response to 'aggs' queries into a structure with 'facets'-like content that Haystack (2.6.1) can understand and process, then pass it on to Haystack's default result processing code. WARNING: Only 'terms' facet types are currently supported. An example result: { 'hits': <BLAH> 'aggregations': { 'type_exact': { 'doc_count_error_upper_bound': 0, 'sum_other_doc_count': 0, 'buckets': [ {'key': 'artwork', 'doc_count': 14145}, {'key': 'artist', 'doc_count': 3360}, {'key': 'event', 'doc_count': 2606}, {'key': 'exhibition', 'doc_count': 416}, {'key': 'essay', 'doc_count': 20}, {'key': 'publication', 'doc_count': 1} ] } } } Will be translated to look like: { 'hits': <BLAH> 'facets': { 'type_exact': { '_type': 'terms', 'terms': [ {'term': 'artwork', 'count': 14145}, {'term': 'artist', 'count': 3360}, {'term': 'event', 'count': 2606}, {'term': 'exhibition', 'count': 416}, {'term': 'essay', 'count': 20}, {'term': 'publication', 'count': 1} ] } } } NOTE: We don't bother cleaning up the data quite this much really, we just translate and duplicate item names and leave the old ones in place for a time when Haystack may support the real returned results.
def read_raw(data_path): """ Parameters ---------- data_path : str """ with open(data_path, 'rb') as f: data = pickle.load(f) return data
Parameters ---------- data_path : str
def get_trees(self, data, showerrors = False): # -> list: """ returns a list of trees with valid guesses """ if showerrors: raise NotImplementedError("This parser doesn't implement errors") self.data = data self.index = 0 try: return [self.__aux_parser(self._productionset.initialsymbol)] except (IndexError, ParseError): return []
returns a list of trees with valid guesses
def direct_messages(self, delegate, params={}, extra_args=None): """Get direct messages for the authenticating user. Search results are returned one message at a time a DirectMessage objects""" return self.__get('/direct_messages.xml', delegate, params, txml.Direct, extra_args=extra_args)
Get direct messages for the authenticating user. Search results are returned one message at a time a DirectMessage objects
def _on_group_stream_changed(self, data): """Handle group stream change.""" self._groups.get(data.get('id')).update_stream(data)
Handle group stream change.
def _run_apt_command(cmd, fatal=False): """Run an apt command with optional retries. :param: cmd: str: The apt command to run. :param: fatal: bool: Whether the command's output should be checked and retried. """ # Provide DEBIAN_FRONTEND=noninteractive if not present in the environment. cmd_env = { 'DEBIAN_FRONTEND': os.environ.get('DEBIAN_FRONTEND', 'noninteractive')} if fatal: _run_with_retries( cmd, cmd_env=cmd_env, retry_exitcodes=(1, APT_NO_LOCK,), retry_message="Couldn't acquire DPKG lock") else: env = os.environ.copy() env.update(cmd_env) subprocess.call(cmd, env=env)
Run an apt command with optional retries. :param: cmd: str: The apt command to run. :param: fatal: bool: Whether the command's output should be checked and retried.
async def _on_trace_notification(self, trace_event): """Callback function called when a trace chunk is received. Args: trace_chunk (dict): The received trace chunk information """ conn_string = trace_event.get('connection_string') payload = trace_event.get('payload') await self.notify_event(conn_string, 'trace', payload)
Callback function called when a trace chunk is received. Args: trace_chunk (dict): The received trace chunk information
def on_quote_changed(self, tiny_quote): """报价、摆盘实时数据变化时,会触发该回调""" # TinyQuoteData data = tiny_quote str_log = "on_quote_changed symbol=%s open=%s high=%s close=%s low=%s" % (data.symbol, data.openPrice, data.highPrice, data.lastPrice, data.lowPrice) self.log(str_log)
报价、摆盘实时数据变化时,会触发该回调
def create(self, data): """ Create object from the given data. The given data may or may not have been validated prior to calling this function. This function will try its best in creating the object. If the resulting object cannot be produced, raises ``ValidationError``. The spec can affect how individual fields will be created by implementing ``clean()`` for the fields needing customization. :param data: the data as a dictionary. :return: instance of ``klass`` or dictionary. :raises: ``ValidationError`` if factory is unable to create object. """ # todo: copy-paste code from representation.validate -> refactor if data is None: return None prototype = {} errors = {} # create and populate the prototype for field_name, field_spec in self.spec.fields.items(): try: value = self._create_value(data, field_name, self.spec) except ValidationError, e: if field_name not in self.default_create_values: if hasattr(e, 'message_dict'): # prefix error keys with top level field name errors.update(dict(zip( [field_name + '.' + key for key in e.message_dict.keys()], e.message_dict.values()))) else: errors[field_name] = e.messages else: key_name = self.property_name_map[field_name] prototype[key_name] = value # check extra fields if self.prevent_extra_fields: extras = set(data.keys()) - set(self.property_name_map.keys()) if extras: errors[', '.join(extras)] = ['field(s) not allowed'] # if errors, raise ValidationError if errors: raise ValidationError(errors) # return dict or object based on the prototype _data = deepcopy(self.default_create_values) _data.update(prototype) if self.klass: instance = self.klass() instance.__dict__.update(prototype) return instance else: return prototype
Create object from the given data. The given data may or may not have been validated prior to calling this function. This function will try its best in creating the object. If the resulting object cannot be produced, raises ``ValidationError``. The spec can affect how individual fields will be created by implementing ``clean()`` for the fields needing customization. :param data: the data as a dictionary. :return: instance of ``klass`` or dictionary. :raises: ``ValidationError`` if factory is unable to create object.
def calculated_intervals(self, value): """ Set the calculated intervals This will be written to the stream_status collection if it's in the database channel :param value: The calculated intervals :type value: TimeIntervals, TimeInterval, list[TimeInterval] """ if not value: self._calculated_intervals = TimeIntervals() return if isinstance(value, TimeInterval): value = TimeIntervals([value]) elif isinstance(value, TimeIntervals): pass elif isinstance(value, list): value = TimeIntervals(value) else: raise TypeError("Expected list/TimeInterval/TimeIntervals, got {}".format(type(value))) for interval in value: if interval.end > utcnow(): raise ValueError("Calculated intervals should not be in the future") self._calculated_intervals = value
Set the calculated intervals This will be written to the stream_status collection if it's in the database channel :param value: The calculated intervals :type value: TimeIntervals, TimeInterval, list[TimeInterval]
def addExpectedFailure(self, test: unittest.case.TestCase, err: tuple) -> None: """ Transforms the test in a serializable version of it and sends it to a queue for further analysis :param test: the test to save :param err: tuple of the form (Exception class, Exception instance, traceback) """ # noinspection PyTypeChecker self.add_result(TestState.expected_failure, test, err)
Transforms the test in a serializable version of it and sends it to a queue for further analysis :param test: the test to save :param err: tuple of the form (Exception class, Exception instance, traceback)
def poll_event(self): """ Waits for an event to happen and returns a string related to the event. If the event is a normal (letter) key press, the letter is returned (case sensitive) :return: Event type """ # Flush all inputs before this one that were done since last poll curses.flushinp() ch = self.screen.getch() if ch == 27: return EVENT_ESC elif ch == -1 or ch == curses.KEY_RESIZE: return EVENT_RESIZE elif ch == 10 or ch == curses.KEY_ENTER: return EVENT_ENTER elif ch == 127 or ch == curses.KEY_BACKSPACE: return EVENT_BACKSPACE elif ch == curses.KEY_UP: return EVENT_UP elif ch == curses.KEY_DOWN: return EVENT_DOWN elif ch == curses.KEY_LEFT: return EVENT_LEFT elif ch == curses.KEY_RIGHT: return EVENT_RIGHT elif ch == 3: return EVENT_CTRL_C elif 0 <= ch < 256: return chr(ch) else: return EVENT_UNHANDLED
Waits for an event to happen and returns a string related to the event. If the event is a normal (letter) key press, the letter is returned (case sensitive) :return: Event type
def eigenvectors_nrev(T, right=True): r"""Compute eigenvectors of transition matrix. Parameters ---------- T : (d, d) ndarray Transition matrix (stochastic matrix) k : int or tuple of ints, optional Compute the first k eigenvalues of T right : bool, optional If right=True compute right eigenvectors, left eigenvectors otherwise Returns ------- eigvec : (d, d) ndarray The eigenvectors of T ordered with decreasing absolute value of the corresponding eigenvalue """ if right: val, R = eig(T, left=False, right=True) """ Sorted eigenvalues and left and right eigenvectors. """ perm = np.argsort(np.abs(val))[::-1] # eigval=val[perm] eigvec = R[:, perm] else: val, L = eig(T, left=True, right=False) """ Sorted eigenvalues and left and right eigenvectors. """ perm = np.argsort(np.abs(val))[::-1] # eigval=val[perm] eigvec = L[:, perm] return eigvec
r"""Compute eigenvectors of transition matrix. Parameters ---------- T : (d, d) ndarray Transition matrix (stochastic matrix) k : int or tuple of ints, optional Compute the first k eigenvalues of T right : bool, optional If right=True compute right eigenvectors, left eigenvectors otherwise Returns ------- eigvec : (d, d) ndarray The eigenvectors of T ordered with decreasing absolute value of the corresponding eigenvalue
def consume_network_packet_messages_from_redis(): """consume_network_packet_messages_from_redis Setup a ``celery_connectors.KombuSubscriber`` to consume meessages from the ``FORWARD_BROKER_URL`` broker in the ``FORWARD_QUEUE`` queue. """ # end of recv_message # Initialize KombuSubscriber sub = KombuSubscriber( name, FORWARD_BROKER_URL, FORWARD_SSL_OPTIONS) # Now consume: seconds_to_consume = 10.0 heartbeat = 60 serializer = "application/json" queue = FORWARD_QUEUE sub.consume( callback=recv_msg, queue=queue, exchange=None, routing_key=None, serializer=serializer, heartbeat=heartbeat, time_to_wait=seconds_to_consume) log.info("end - {}".format(name))
consume_network_packet_messages_from_redis Setup a ``celery_connectors.KombuSubscriber`` to consume meessages from the ``FORWARD_BROKER_URL`` broker in the ``FORWARD_QUEUE`` queue.
def get_imports(filename): """Get all the imports in a file. Each import is a tuple of: (name, alias, is_from, is_star, source_file) """ with open(filename, "rb") as f: src = f.read() finder = ImportFinder() finder.visit(ast.parse(src, filename=filename)) imports = [] for i in finder.imports: name, _, is_from, is_star = i imports.append(i + (resolve_import(name, is_from, is_star),)) return imports
Get all the imports in a file. Each import is a tuple of: (name, alias, is_from, is_star, source_file)
def get(self, file_id: str) -> [typing.BinaryIO, str, datetime.datetime]: """Return the file identified by a file_id string, its file name and upload date.""" raise NotImplementedError("Downloading files for downloading files in FileStore has not been implemented yet.")
Return the file identified by a file_id string, its file name and upload date.
def fit(self, X, y=None, groups=None, **fit_params): """ Run fit on the estimator with parameters chosen sequentially by SigOpt. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vector, where n_samples in the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_output], optional Target relative to X for classification or regression; None for unsupervised learning. """ return self._fit(X, y=y, groups=groups, **fit_params)
Run fit on the estimator with parameters chosen sequentially by SigOpt. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vector, where n_samples in the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_output], optional Target relative to X for classification or regression; None for unsupervised learning.
def entrypoint(cls): """Mark the decorated command as the intended entrypoint of the command module. """ if not isinstance(cls, type) or not issubclass(cls, Command): raise TypeError(f"inappropriate entrypoint instance of type {cls.__class__}") cls._argcmdr_entrypoint_ = True return cls
Mark the decorated command as the intended entrypoint of the command module.
def url_(client_id: str, redirect_uri: str, *, scope: str = None, state: str = None, secure: bool = True) -> str: """Construct a OAuth2 URL instead of an OAuth2 object.""" attrs = { 'client_id': client_id, 'redirect_uri': quote(redirect_uri) } if scope is not None: attrs['scope'] = quote(scope) if state is not None: attrs['state'] = state parameters = '&'.join('{0}={1}'.format(*item) for item in attrs.items()) return OAuth2._BASE.format(parameters=parameters)
Construct a OAuth2 URL instead of an OAuth2 object.
def crosslisting_feature(catalog, soup): """Parses all the crosslistings. These refer to the similar CRNs, such as a grad & undergrad level course. """ listing = {} for elem in soup.coursedb.findAll('crosslisting'): seats = int(elem['seats']) crns = [safeInt(crn.string) for crn in elem.findAll('crn')] # we want to refer to the same object to save space cl = CrossListing(crns, seats) for crn in crns: listing[crn] = cl catalog.crosslistings = FrozenDict(listing) logger.info('Catalog has %d course crosslistings' % len(catalog.crosslistings))
Parses all the crosslistings. These refer to the similar CRNs, such as a grad & undergrad level course.
def _get_value(data_structure, key): """Return the value of a data_structure given a path. :param data_structure: Dictionary, list or subscriptable object. :param key: Array with the defined path ordered. """ if len(key) == 0: raise KeyError() value = data_structure[key[0]] if len(key) > 1: return _get_value(value, key[1:]) return value
Return the value of a data_structure given a path. :param data_structure: Dictionary, list or subscriptable object. :param key: Array with the defined path ordered.
def getSparseTensor(numNonzeros, inputSize, outputSize, onlyPositive=False, fixedRange=1.0/24): """ Return a random tensor that is initialized like a weight matrix Size is outputSize X inputSize, where weightSparsity% of each row is non-zero """ # Initialize weights in the typical fashion. w = torch.Tensor(outputSize, inputSize, ) if onlyPositive: w.data.uniform_(0, fixedRange) else: w.data.uniform_(-fixedRange, fixedRange) # Zero out weights for sparse weight matrices if numNonzeros < inputSize: numZeros = inputSize - numNonzeros outputIndices = np.arange(outputSize) inputIndices = np.array([np.random.permutation(inputSize)[:numZeros] for _ in outputIndices], dtype=np.long) # Create tensor indices for all non-zero weights zeroIndices = np.empty((outputSize, numZeros, 2), dtype=np.long) zeroIndices[:, :, 0] = outputIndices[:, None] zeroIndices[:, :, 1] = inputIndices zeroIndices = torch.LongTensor(zeroIndices.reshape(-1, 2)) zeroWts = (zeroIndices[:, 0], zeroIndices[:, 1]) w.data[zeroWts] = 0.0 return w
Return a random tensor that is initialized like a weight matrix Size is outputSize X inputSize, where weightSparsity% of each row is non-zero
def step( self, local_inv=None, peer_table=None, peer_queue=None, con=None, path=None ): """ Execute one round of the peer discovery algorithm: * Add at most 10 new peers from the pending peer queue (but ping them first, and drop hosts if the pending queue gets to be too long). * Execute one step of the MHRWDA algorithm. Add any new peers from the neighbor sets discovered. * Remove at most 10 old, unresponsive peers from the peer DB. """ if path is None: path = self.atlasdb_path if self.max_neighbors is None: self.max_neighbors = atlas_max_neighbors() log.debug("%s: max neighbors is %s" % (self.my_hostport, self.max_neighbors)) current_peers = self.get_current_peers( peer_table=peer_table ) # add some new peers num_added = self.update_new_peers( 10, current_peers, peer_queue=peer_queue, peer_table=peer_table, con=con, path=path ) # use MHRWDA to walk the peer graph. # first, begin the walk if we haven't already if self.current_peer is None and len(current_peers) > 0: self.current_peer = current_peers[ random.randint(0,len(current_peers)-1) ] log.debug("%s: crawl %s" % (self.my_hostport, self.current_peer)) peer_neighbors = self.get_neighbors( self.current_peer, peer_table=peer_table, path=path, con=con ) if peer_neighbors is None or len(peer_neighbors) == 0: log.debug("%s: no peers from %s" % (self.my_hostport, self.current_peer)) # try again later self.random_walk_reset() else: # success! self.current_peer_neighbors = [self.canonical_peer(p) for p in peer_neighbors] # don't talk to myself if self.my_hostport in self.current_peer_neighbors: self.current_peer_neighbors.remove(self.my_hostport) log.debug("%s: neighbors of %s are (%s): %s" % (self.my_hostport, self.current_peer, len(self.current_peer_neighbors), ",".join(self.current_peer_neighbors))) # remember to contact these peers later self.new_peers = list(set( self.new_peers + peer_neighbors )) # can we walk now? if self.current_peer is not None: next_peer, next_peer_neighbors = self.random_walk_graph( self.prev_peer, self.prev_peer_degree, self.current_peer, self.current_peer_neighbors, con=con, path=path, peer_table=peer_table ) if next_peer is not None and next_peer_neighbors is not None: # success! self.prev_peer = self.current_peer self.prev_peer_degree = len(self.current_peer_neighbors) self.current_peer = next_peer self.current_peer_neighbors = next_peer_neighbors # crawl new peers self.new_peers = list(set(self.new_peers + self.current_peer_neighbors)) else: log.error("%s: failed to walk from %s" % (self.my_hostport, self.current_peer)) self.random_walk_reset() # update the existing peer info num_removed = self.update_existing_peers( 10, con=con, path=path, peer_table=peer_table ) return num_added, num_removed
Execute one round of the peer discovery algorithm: * Add at most 10 new peers from the pending peer queue (but ping them first, and drop hosts if the pending queue gets to be too long). * Execute one step of the MHRWDA algorithm. Add any new peers from the neighbor sets discovered. * Remove at most 10 old, unresponsive peers from the peer DB.
def namespace(self): """ Return the Namespace URI (if any) as a String for the current tag """ if self.m_name == -1 or (self.m_event != const.START_TAG and self.m_event != const.END_TAG): return u'' # No Namespace if self.m_namespaceUri == 0xFFFFFFFF: return u'' return self.sb[self.m_namespaceUri]
Return the Namespace URI (if any) as a String for the current tag
def get_repo(self, auth, username, repo_name): """ Returns a the repository with name ``repo_name`` owned by the user with username ``username``. :param auth.Authentication auth: authentication object :param str username: username of owner of repository :param str repo_name: name of repository :return: a representation of the retrieved repository :rtype: GogsRepo :raises NetworkFailure: if there is an error communicating with the server :raises ApiFailure: if the request cannot be serviced """ path = "/repos/{u}/{r}".format(u=username, r=repo_name) response = self.get(path, auth=auth) return GogsRepo.from_json(response.json())
Returns a the repository with name ``repo_name`` owned by the user with username ``username``. :param auth.Authentication auth: authentication object :param str username: username of owner of repository :param str repo_name: name of repository :return: a representation of the retrieved repository :rtype: GogsRepo :raises NetworkFailure: if there is an error communicating with the server :raises ApiFailure: if the request cannot be serviced
def eval(self, construct): """Evaluate an expression returning its value. The Python equivalent of the CLIPS eval command. """ data = clips.data.DataObject(self._env) if lib.EnvEval(self._env, construct.encode(), data.byref) != 1: raise CLIPSError(self._env) return data.value
Evaluate an expression returning its value. The Python equivalent of the CLIPS eval command.
def config_merge_text(source='running', merge_config=None, merge_path=None, saltenv='base'): ''' .. versionadded:: 2019.2.0 Return the merge result of the configuration from ``source`` with the merge configuration, as plain text (without loading the config on the device). source: ``running`` The configuration type to retrieve from the network device. Default: ``running``. Available options: ``running``, ``startup``, ``candidate``. merge_config The config to be merged into the initial config, sent as text. This argument is ignored when ``merge_path`` is set. merge_path Absolute or remote path from where to load the merge configuration text. This argument allows any URI supported by :py:func:`cp.get_url <salt.modules.cp.get_url>`), e.g., ``salt://``, ``https://``, ``s3://``, ``ftp:/``, etc. saltenv: ``base`` Salt fileserver environment from which to retrieve the file. Ignored if ``merge_path`` is not a ``salt://`` URL. CLI Example: .. code-block:: bash salt '*' napalm.config_merge_text merge_path=salt://path/to/merge.cfg ''' config_txt = __salt__['net.config'](source=source)['out'][source] return __salt__['iosconfig.merge_text'](initial_config=config_txt, merge_config=merge_config, merge_path=merge_path, saltenv=saltenv)
.. versionadded:: 2019.2.0 Return the merge result of the configuration from ``source`` with the merge configuration, as plain text (without loading the config on the device). source: ``running`` The configuration type to retrieve from the network device. Default: ``running``. Available options: ``running``, ``startup``, ``candidate``. merge_config The config to be merged into the initial config, sent as text. This argument is ignored when ``merge_path`` is set. merge_path Absolute or remote path from where to load the merge configuration text. This argument allows any URI supported by :py:func:`cp.get_url <salt.modules.cp.get_url>`), e.g., ``salt://``, ``https://``, ``s3://``, ``ftp:/``, etc. saltenv: ``base`` Salt fileserver environment from which to retrieve the file. Ignored if ``merge_path`` is not a ``salt://`` URL. CLI Example: .. code-block:: bash salt '*' napalm.config_merge_text merge_path=salt://path/to/merge.cfg
def delete_file(self, path, prefixed_path, source_storage): """ We don't need all the file_exists stuff because we have to override all files anyways. """ if self.faster: return True else: return super(Command, self).delete_file(path, prefixed_path, source_storage)
We don't need all the file_exists stuff because we have to override all files anyways.
def map(source = 'density', z = 0, x = 0, y = 0, format = '@1x.png', srs='EPSG:4326', bin=None, hexPerTile=None, style='classic.point', taxonKey=None, country=None, publishingCountry=None, publisher=None, datasetKey=None, year=None, basisOfRecord=None, **kwargs): ''' GBIF maps API :param source: [str] Either ``density`` for fast, precalculated tiles, or ``adhoc`` for any search :param z: [str] zoom level :param x: [str] longitude :param y: [str] latitude :param format: [str] format of returned data. One of: - ``.mvt`` - vector tile - ``@Hx.png`` - 256px raster tile (for legacy clients) - ``@1x.png`` - 512px raster tile, @2x.png for a 1024px raster tile - ``@2x.png`` - 1024px raster tile - ``@3x.png`` - 2048px raster tile - ``@4x.png`` - 4096px raster tile :param srs: [str] Spatial reference system. One of: - ``EPSG:3857`` (Web Mercator) - ``EPSG:4326`` (WGS84 plate caree) - ``EPSG:3575`` (Arctic LAEA) - ``EPSG:3031`` (Antarctic stereographic) :param bin: [str] square or hex to aggregate occurrence counts into squares or hexagons. Points by default. :param hexPerTile: [str] sets the size of the hexagons (the number horizontally across a tile) :param squareSize: [str] sets the size of the squares. Choose a factor of 4096 so they tessalate correctly: probably from 8, 16, 32, 64, 128, 256, 512. :param style: [str] for raster tiles, choose from the available styles. Defaults to classic.point. :param taxonKey: [int] A GBIF occurrence identifier :param datasetKey: [str] The occurrence dataset key (a uuid) :param country: [str] The 2-letter country code (as per ISO-3166-1) of the country in which the occurrence was recorded. See here http://en.wikipedia.org/wiki/ISO_3166-1_alpha-2 :param basisOfRecord: [str] Basis of record, as defined in the BasisOfRecord enum http://gbif.github.io/gbif-api/apidocs/org/gbif/api/vocabulary/BasisOfRecord.html Acceptable values are - ``FOSSIL_SPECIMEN`` An occurrence record describing a fossilized specimen. - ``HUMAN_OBSERVATION`` An occurrence record describing an observation made by one or more people. - ``LITERATURE`` An occurrence record based on literature alone. - ``LIVING_SPECIMEN`` An occurrence record describing a living specimen, e.g. - ``MACHINE_OBSERVATION`` An occurrence record describing an observation made by a machine. - ``OBSERVATION`` An occurrence record describing an observation. - ``PRESERVED_SPECIMEN`` An occurrence record describing a preserved specimen. - ``UNKNOWN`` Unknown basis for the record. :param year: [int] The 4 digit year. A year of 98 will be interpreted as AD 98. Supports range queries, smaller,larger (e.g., ``1990,1991``, whereas ``1991,1990`` wouldn't work) :param publishingCountry: [str] The 2-letter country code (as per ISO-3166-1) of the country in which the occurrence was recorded. :return: An object of class GbifMap For mvt format, see https://github.com/tilezen/mapbox-vector-tile to decode, and example below Usage:: from pygbif import maps out = maps.map(taxonKey = 2435098) out.response out.path out.img out.plot() out = maps.map(taxonKey = 2480498, year = range(2008, 2011+1)) out.response out.path out.img out.plot() # srs maps.map(taxonKey = 2480498, year = 2010, srs = "EPSG:3857") # bin maps.map(taxonKey = 212, year = 1998, bin = "hex", hexPerTile = 30, style = "classic-noborder.poly") # style maps.map(taxonKey = 2480498, style = "purpleYellow.point").plot() # basisOfRecord maps.map(taxonKey = 2480498, year = 2010, basisOfRecord = "HUMAN_OBSERVATION", bin = "hex", hexPerTile = 500).plot() maps.map(taxonKey = 2480498, year = 2010, basisOfRecord = ["HUMAN_OBSERVATION", "LIVING_SPECIMEN"], hexPerTile = 500, bin = "hex").plot() # map vector tiles, gives back raw bytes from pygbif import maps x = maps.map(taxonKey = 2480498, year = 2010, format = ".mvt") x.response x.path x.img # None import mapbox_vector_tile mapbox_vector_tile.decode(x.response.content) ''' if format not in ['.mvt', '@Hx.png', '@1x.png', '@2x.png', '@3x.png', '@4x.png']: raise ValueError("'format' not in allowed set, see docs") if source not in ['density', 'adhoc']: raise ValueError("'source' not in allowed set, see docs") if srs not in ['EPSG:3857', 'EPSG:4326', 'EPSG:3575', 'EPSG:3031']: raise ValueError("'srs' not in allowed set, see docs") if bin is not None: if bin not in ['square', 'hex']: raise ValueError("'bin' not in allowed set, see docs") if style is not None: if style not in map_styles: raise ValueError("'style' not in allowed set, see docs") maps_baseurl = 'https://api.gbif.org' url = maps_baseurl + '/v2/map/occurrence/%s/%s/%s/%s%s' url = url % ( source, z, x, y, format ) year = __handle_year(year) basisOfRecord = __handle_bor(basisOfRecord) args = {'srs': srs, 'bin': bin, 'hexPerTile': hexPerTile, 'style': style, 'taxonKey': taxonKey, 'country': country, 'publishingCountry': publishingCountry, 'publisher': publisher, 'datasetKey': datasetKey, 'year': year, 'basisOfRecord': basisOfRecord} kw = {key: kwargs[key] for key in kwargs if key not in requests_argset} if kw is not None: xx = dict(zip( [ re.sub('_', '.', x) for x in kw.keys() ], kw.values() )) args.update(xx) kwargs = {key: kwargs[key] for key in kwargs if key in requests_argset} ctype = 'image/png' if has(format, "png") else 'application/x-protobuf' out = gbif_GET_map(url, args, ctype, **kwargs) # return out return GbifMap(out)
GBIF maps API :param source: [str] Either ``density`` for fast, precalculated tiles, or ``adhoc`` for any search :param z: [str] zoom level :param x: [str] longitude :param y: [str] latitude :param format: [str] format of returned data. One of: - ``.mvt`` - vector tile - ``@Hx.png`` - 256px raster tile (for legacy clients) - ``@1x.png`` - 512px raster tile, @2x.png for a 1024px raster tile - ``@2x.png`` - 1024px raster tile - ``@3x.png`` - 2048px raster tile - ``@4x.png`` - 4096px raster tile :param srs: [str] Spatial reference system. One of: - ``EPSG:3857`` (Web Mercator) - ``EPSG:4326`` (WGS84 plate caree) - ``EPSG:3575`` (Arctic LAEA) - ``EPSG:3031`` (Antarctic stereographic) :param bin: [str] square or hex to aggregate occurrence counts into squares or hexagons. Points by default. :param hexPerTile: [str] sets the size of the hexagons (the number horizontally across a tile) :param squareSize: [str] sets the size of the squares. Choose a factor of 4096 so they tessalate correctly: probably from 8, 16, 32, 64, 128, 256, 512. :param style: [str] for raster tiles, choose from the available styles. Defaults to classic.point. :param taxonKey: [int] A GBIF occurrence identifier :param datasetKey: [str] The occurrence dataset key (a uuid) :param country: [str] The 2-letter country code (as per ISO-3166-1) of the country in which the occurrence was recorded. See here http://en.wikipedia.org/wiki/ISO_3166-1_alpha-2 :param basisOfRecord: [str] Basis of record, as defined in the BasisOfRecord enum http://gbif.github.io/gbif-api/apidocs/org/gbif/api/vocabulary/BasisOfRecord.html Acceptable values are - ``FOSSIL_SPECIMEN`` An occurrence record describing a fossilized specimen. - ``HUMAN_OBSERVATION`` An occurrence record describing an observation made by one or more people. - ``LITERATURE`` An occurrence record based on literature alone. - ``LIVING_SPECIMEN`` An occurrence record describing a living specimen, e.g. - ``MACHINE_OBSERVATION`` An occurrence record describing an observation made by a machine. - ``OBSERVATION`` An occurrence record describing an observation. - ``PRESERVED_SPECIMEN`` An occurrence record describing a preserved specimen. - ``UNKNOWN`` Unknown basis for the record. :param year: [int] The 4 digit year. A year of 98 will be interpreted as AD 98. Supports range queries, smaller,larger (e.g., ``1990,1991``, whereas ``1991,1990`` wouldn't work) :param publishingCountry: [str] The 2-letter country code (as per ISO-3166-1) of the country in which the occurrence was recorded. :return: An object of class GbifMap For mvt format, see https://github.com/tilezen/mapbox-vector-tile to decode, and example below Usage:: from pygbif import maps out = maps.map(taxonKey = 2435098) out.response out.path out.img out.plot() out = maps.map(taxonKey = 2480498, year = range(2008, 2011+1)) out.response out.path out.img out.plot() # srs maps.map(taxonKey = 2480498, year = 2010, srs = "EPSG:3857") # bin maps.map(taxonKey = 212, year = 1998, bin = "hex", hexPerTile = 30, style = "classic-noborder.poly") # style maps.map(taxonKey = 2480498, style = "purpleYellow.point").plot() # basisOfRecord maps.map(taxonKey = 2480498, year = 2010, basisOfRecord = "HUMAN_OBSERVATION", bin = "hex", hexPerTile = 500).plot() maps.map(taxonKey = 2480498, year = 2010, basisOfRecord = ["HUMAN_OBSERVATION", "LIVING_SPECIMEN"], hexPerTile = 500, bin = "hex").plot() # map vector tiles, gives back raw bytes from pygbif import maps x = maps.map(taxonKey = 2480498, year = 2010, format = ".mvt") x.response x.path x.img # None import mapbox_vector_tile mapbox_vector_tile.decode(x.response.content)
def cli(*args, **kwargs): """ 通用自动化处理工具 详情参考 `GitHub <https://github.com/littlemo/mohand>`_ """ log.debug('cli: {} {}'.format(args, kwargs)) # 使用终端传入的 option 更新 env 中的配置值 env.update(kwargs)
通用自动化处理工具 详情参考 `GitHub <https://github.com/littlemo/mohand>`_
def database(self, database_id, ddl_statements=(), pool=None): """Factory to create a database within this instance. :type database_id: str :param database_id: The ID of the instance. :type ddl_statements: list of string :param ddl_statements: (Optional) DDL statements, excluding the 'CREATE DATABSE' statement. :type pool: concrete subclass of :class:`~google.cloud.spanner_v1.pool.AbstractSessionPool`. :param pool: (Optional) session pool to be used by database. :rtype: :class:`~google.cloud.spanner_v1.database.Database` :returns: a database owned by this instance. """ return Database(database_id, self, ddl_statements=ddl_statements, pool=pool)
Factory to create a database within this instance. :type database_id: str :param database_id: The ID of the instance. :type ddl_statements: list of string :param ddl_statements: (Optional) DDL statements, excluding the 'CREATE DATABSE' statement. :type pool: concrete subclass of :class:`~google.cloud.spanner_v1.pool.AbstractSessionPool`. :param pool: (Optional) session pool to be used by database. :rtype: :class:`~google.cloud.spanner_v1.database.Database` :returns: a database owned by this instance.
def numeric_function_clean_dataframe(self, axis): """Preprocesses numeric functions to clean dataframe and pick numeric indices. Args: axis: '0' if columns and '1' if rows. Returns: Tuple with return value(if any), indices to apply func to & cleaned Manager. """ result = None query_compiler = self # If no numeric columns and over columns, then return empty Series if not axis and len(self.index) == 0: result = pandas.Series(dtype=np.int64) nonnumeric = [ col for col, dtype in zip(self.columns, self.dtypes) if not is_numeric_dtype(dtype) ] if len(nonnumeric) == len(self.columns): # If over rows and no numeric columns, return this if axis: result = pandas.Series([np.nan for _ in self.index]) else: result = pandas.Series([0 for _ in self.index]) else: query_compiler = self.drop(columns=nonnumeric) return result, query_compiler
Preprocesses numeric functions to clean dataframe and pick numeric indices. Args: axis: '0' if columns and '1' if rows. Returns: Tuple with return value(if any), indices to apply func to & cleaned Manager.
def javascript_escape(s, quote_double_quotes=True): """ Escape characters for javascript strings. """ ustring_re = re.compile(u"([\u0080-\uffff])") def fix(match): return r"\u%04x" % ord(match.group(1)) if type(s) == str: s = s.decode('utf-8') elif type(s) != six.text_type: raise TypeError(s) s = s.replace('\\', '\\\\') s = s.replace('\r', '\\r') s = s.replace('\n', '\\n') s = s.replace('\t', '\\t') s = s.replace("'", "\\'") if quote_double_quotes: s = s.replace('"', '&quot;') return str(ustring_re.sub(fix, s))
Escape characters for javascript strings.
def is_eligible(self, timestamp, status, notif_number, in_notif_time, interval, escal_period): # pylint: disable=too-many-return-statements """Check if the escalation is eligible (notification is escalated or not) Escalation is NOT eligible in ONE of the following condition is fulfilled:: * escalation is not time based and notification number not in range [first_notification;last_notification] (if last_notif == 0, it's infinity) * escalation is time based and notification time not in range [first_notification_time;last_notification_time] (if last_notif_time == 0, it's infinity) * status does not matches escalation_options ('WARNING' <=> 'w' ...) * escalation_period is not legit for this time (now usually) :param timestamp: timestamp to check if timeperiod is valid :type timestamp: int :param status: item status (one of the small_states key) :type status: str :param notif_number: current notification number :type notif_number: int :param in_notif_time: current notification time :type in_notif_time: int :param interval: time interval length :type interval: int :return: True if no condition has been fulfilled, otherwise False :rtype: bool """ short_states = { u'WARNING': 'w', u'UNKNOWN': 'u', u'CRITICAL': 'c', u'RECOVERY': 'r', u'FLAPPING': 'f', u'DOWNTIME': 's', u'DOWN': 'd', u'UNREACHABLE': 'x', u'OK': 'o', u'UP': 'o' } # If we are not time based, we check notification numbers: if not self.time_based: # Begin with the easy cases if notif_number < self.first_notification: return False # self.last_notification = 0 mean no end if self.last_notification and notif_number > self.last_notification: return False # Else we are time based, we must check for the good value else: # Begin with the easy cases if in_notif_time < self.first_notification_time * interval: return False if self.last_notification_time and \ in_notif_time > self.last_notification_time * interval: return False # If our status is not good, we bail out too if status in short_states and short_states[status] not in self.escalation_options: return False # Maybe the time is not in our escalation_period if escal_period is not None and not escal_period.is_time_valid(timestamp): return False # Ok, I do not see why not escalade. So it's True :) return True
Check if the escalation is eligible (notification is escalated or not) Escalation is NOT eligible in ONE of the following condition is fulfilled:: * escalation is not time based and notification number not in range [first_notification;last_notification] (if last_notif == 0, it's infinity) * escalation is time based and notification time not in range [first_notification_time;last_notification_time] (if last_notif_time == 0, it's infinity) * status does not matches escalation_options ('WARNING' <=> 'w' ...) * escalation_period is not legit for this time (now usually) :param timestamp: timestamp to check if timeperiod is valid :type timestamp: int :param status: item status (one of the small_states key) :type status: str :param notif_number: current notification number :type notif_number: int :param in_notif_time: current notification time :type in_notif_time: int :param interval: time interval length :type interval: int :return: True if no condition has been fulfilled, otherwise False :rtype: bool
def virtualenv_no_global(): """ Return True if in a venv and no system site packages. """ #this mirrors the logic in virtualenv.py for locating the no-global-site-packages.txt file site_mod_dir = os.path.dirname(os.path.abspath(site.__file__)) no_global_file = os.path.join(site_mod_dir, 'no-global-site-packages.txt') if running_under_virtualenv() and os.path.isfile(no_global_file): return True
Return True if in a venv and no system site packages.
def set_passport_data_errors(self, user_id, errors): """ Informs a user that some of the Telegram Passport elements they provided contains errors. The user will not be able to re-submit their Passport to you until the errors are fixed (the contents of the field for which you returned the error must change). Returns True on success. Use this if the data submitted by the user doesn't satisfy the standards your service requires for any reason. For example, if a birthday date seems invalid, a submitted document is blurry, a scan shows evidence of tampering, etc. Supply some details in the error message to make sure the user knows how to correct the issues. https://core.telegram.org/bots/api#setpassportdataerrors Parameters: :param user_id: User identifier :type user_id: int :param errors: A JSON-serialized array describing the errors :type errors: list of pytgbot.api_types.sendable.passport.PassportElementError Returns: :return: Returns True on success :rtype: bool """ from pytgbot.api_types.sendable.passport import PassportElementError assert_type_or_raise(user_id, int, parameter_name="user_id") assert_type_or_raise(errors, list, parameter_name="errors") result = self.do("setPassportDataErrors", user_id=user_id, errors=errors) if self.return_python_objects: logger.debug("Trying to parse {data}".format(data=repr(result))) try: return from_array_list(bool, result, list_level=0, is_builtin=True) except TgApiParseException: logger.debug("Failed parsing as primitive bool", exc_info=True) # end try # no valid parsing so far raise TgApiParseException("Could not parse result.") # See debug log for details! # end if return_python_objects return result
Informs a user that some of the Telegram Passport elements they provided contains errors. The user will not be able to re-submit their Passport to you until the errors are fixed (the contents of the field for which you returned the error must change). Returns True on success. Use this if the data submitted by the user doesn't satisfy the standards your service requires for any reason. For example, if a birthday date seems invalid, a submitted document is blurry, a scan shows evidence of tampering, etc. Supply some details in the error message to make sure the user knows how to correct the issues. https://core.telegram.org/bots/api#setpassportdataerrors Parameters: :param user_id: User identifier :type user_id: int :param errors: A JSON-serialized array describing the errors :type errors: list of pytgbot.api_types.sendable.passport.PassportElementError Returns: :return: Returns True on success :rtype: bool
def add_neighbor(self, edge: "Edge") -> None: """ Adds a new neighbor to the node. Arguments: edge (Edge): The edge that would connect this node with its neighbor. """ if edge is None or (edge.source != self and edge.target != self): return if edge.source == self: other: Node = edge.target elif edge.target == self: other: Node = edge.source else: raise ValueError("Tried to add a neighbor with an invalid edge.") edge_key: Tuple(int, int) = edge.key # The graph is considered undirected, check neighbor existence accordingly. if self._neighbors.get(edge_key) or self._neighbors.get((edge_key[1], edge_key[0])): return # The neighbor is already added. self._neighbors[edge_key] = edge self.dispatch_event(NeighborAddedEvent(other))
Adds a new neighbor to the node. Arguments: edge (Edge): The edge that would connect this node with its neighbor.
def header(self, array): """Specify the header of the table """ self._check_row_size(array) self._header = list(map(obj2unicode, array)) return self
Specify the header of the table
def relations_used(self): """ Return list of all relations used to connect edges """ g = self.get_graph() types = set() for (x,y,d) in g.edges(data=True): types.add(d['pred']) return list(types)
Return list of all relations used to connect edges
def _build_environ(self) -> Dict[str, Optional[str]]: """ Build environment variables suitable for passing to the Model. """ d: Dict[str, Optional[str]] = {} if self.__config__.case_insensitive: env_vars = {k.lower(): v for k, v in os.environ.items()} else: env_vars = cast(Dict[str, str], os.environ) for field in self.__fields__.values(): if field.has_alias: env_name = field.alias else: env_name = self.__config__.env_prefix + field.name.upper() env_name_ = env_name.lower() if self.__config__.case_insensitive else env_name env_val = env_vars.get(env_name_, None) if env_val: if field.is_complex(): try: env_val = json.loads(env_val) except ValueError as e: raise SettingsError(f'error parsing JSON for "{env_name}"') from e d[field.alias] = env_val return d
Build environment variables suitable for passing to the Model.
def transfer_size(self): """Size of transfer in bytes (e.g.: 8, 4k, 2m, 1g)""" ts = self.attributes['transfer_size'] if isinstance(ts, six.string_types): ts = shlex.split(ts) ts = [str(e) for e in ts] return ts
Size of transfer in bytes (e.g.: 8, 4k, 2m, 1g)
def ask(question): ''' Infinite loop to get yes or no answer or quit the script. ''' while True: ans = input(question) al = ans.lower() if match('^y(es)?$', al): return True elif match('^n(o)?$', al): return False elif match('^q(uit)?$', al): stdout.write(CYAN) print("\nGoodbye.\n") stdout.write(RESET) quit() else: stdout.write(RED) print("%s is invalid. Enter (y)es, (n)o or (q)uit." % ans) stdout.write(RESET)
Infinite loop to get yes or no answer or quit the script.
def find_all_files(glob): """ Finds all files in the django finders for a given glob, returns the file path, if available, and the django storage object. storage objects must implement the File storage API: https://docs.djangoproject.com/en/dev/ref/files/storage/ """ for finder in finders.get_finders(): for path, storage in finder.list([]): if fnmatch.fnmatchcase(os.path.join(getattr(storage, 'prefix', '') or '', path), glob): yield path, storage
Finds all files in the django finders for a given glob, returns the file path, if available, and the django storage object. storage objects must implement the File storage API: https://docs.djangoproject.com/en/dev/ref/files/storage/
def get(self, path, content=True, type=None, format=None, load_alternative_format=True): """ Takes a path for an entity and returns its model""" path = path.strip('/') ext = os.path.splitext(path)[1] # Not a notebook? if not self.exists(path) or (type != 'notebook' if type else ext not in self.all_nb_extensions()): return super(TextFileContentsManager, self).get(path, content, type, format) fmt = preferred_format(ext, self.preferred_jupytext_formats_read) if ext == '.ipynb': model = self._notebook_model(path, content=content) else: self.set_default_format_options(fmt, read=True) with mock.patch('nbformat.reads', _jupytext_reads(fmt)): model = self._notebook_model(path, content=content) if not load_alternative_format: return model if not content: # Modification time of a paired notebook, in this context - Jupyter is checking timestamp # before saving - is the most recent among all representations #118 if path not in self.paired_notebooks: return model fmt, formats = self.paired_notebooks.get(path) for alt_path, _ in paired_paths(path, fmt, formats): if alt_path != path and self.exists(alt_path): alt_model = self._notebook_model(alt_path, content=False) if alt_model['last_modified'] > model['last_modified']: model['last_modified'] = alt_model['last_modified'] return model # We will now read a second file if this is a paired notebooks. nbk = model['content'] jupytext_formats = nbk.metadata.get('jupytext', {}).get('formats') or self.default_formats(path) jupytext_formats = long_form_multiple_formats(jupytext_formats) # Compute paired notebooks from formats alt_paths = [(path, fmt)] if jupytext_formats: try: _, fmt = find_base_path_and_format(path, jupytext_formats) alt_paths = paired_paths(path, fmt, jupytext_formats) self.update_paired_notebooks(path, fmt, jupytext_formats) except InconsistentPath as err: self.log.info("Unable to read paired notebook: %s", str(err)) else: if path in self.paired_notebooks: fmt, formats = self.paired_notebooks.get(path) alt_paths = paired_paths(path, fmt, formats) if len(alt_paths) > 1 and ext == '.ipynb': # Apply default options (like saving and reloading would do) jupytext_metadata = model['content']['metadata'].get('jupytext', {}) self.set_default_format_options(jupytext_metadata, read=True) if jupytext_metadata: model['content']['metadata']['jupytext'] = jupytext_metadata org_model = model fmt_inputs = fmt path_inputs = path_outputs = path model_outputs = None # Source format is first non ipynb format found on disk if path.endswith('.ipynb'): for alt_path, alt_fmt in alt_paths: if not alt_path.endswith('.ipynb') and self.exists(alt_path): self.log.info(u'Reading SOURCE from {}'.format(alt_path)) path_inputs = alt_path fmt_inputs = alt_fmt model_outputs = model model = self.get(alt_path, content=content, type=type, format=format, load_alternative_format=False) break # Outputs taken from ipynb if in group, if file exists else: for alt_path, _ in alt_paths: if alt_path.endswith('.ipynb') and self.exists(alt_path): self.log.info(u'Reading OUTPUTS from {}'.format(alt_path)) path_outputs = alt_path model_outputs = self.get(alt_path, content=content, type=type, format=format, load_alternative_format=False) break try: check_file_version(model['content'], path_inputs, path_outputs) except Exception as err: raise HTTPError(400, str(err)) # Before we combine the two files, we make sure we're not overwriting ipynb cells # with an outdated text file try: if model_outputs and model_outputs['last_modified'] > model['last_modified'] + \ timedelta(seconds=self.outdated_text_notebook_margin): raise HTTPError( 400, '''{out} (last modified {out_last}) seems more recent than {src} (last modified {src_last}) Please either: - open {src} in a text editor, make sure it is up to date, and save it, - or delete {src} if not up to date, - or increase check margin by adding, say, c.ContentsManager.outdated_text_notebook_margin = 5 # in seconds # or float("inf") to your .jupyter/jupyter_notebook_config.py file '''.format(src=path_inputs, src_last=model['last_modified'], out=path_outputs, out_last=model_outputs['last_modified'])) except OverflowError: pass if model_outputs: combine_inputs_with_outputs(model['content'], model_outputs['content'], fmt_inputs) elif not path.endswith('.ipynb'): nbk = model['content'] language = nbk.metadata.get('jupytext', {}).get('main_language', 'python') if 'kernelspec' not in nbk.metadata and language != 'python': kernelspec = kernelspec_from_language(language) if kernelspec: nbk.metadata['kernelspec'] = kernelspec # Trust code cells when they have no output for cell in model['content'].cells: if cell.cell_type == 'code' and not cell.outputs and cell.metadata.get('trusted') is False: cell.metadata['trusted'] = True # Path and name of the notebook is the one of the original path model['path'] = org_model['path'] model['name'] = org_model['name'] return model
Takes a path for an entity and returns its model
def write_file(content, *path): """ Simply write some content to a file, overriding the file if necessary. """ with open(os.path.join(*path), "w") as file: return file.write(content)
Simply write some content to a file, overriding the file if necessary.
def clean_whitespace(statement): """ Remove any consecutive whitespace characters from the statement text. """ import re # Replace linebreaks and tabs with spaces statement.text = statement.text.replace('\n', ' ').replace('\r', ' ').replace('\t', ' ') # Remove any leeding or trailing whitespace statement.text = statement.text.strip() # Remove consecutive spaces statement.text = re.sub(' +', ' ', statement.text) return statement
Remove any consecutive whitespace characters from the statement text.
def post(self, res_path, data=None, files=None, timeout=10.): """ Post operation. :param str res_path: Resource path. :param list data: Request parameters for data. :param list files: Request parameters for files. :param float timeout: Timeout in seconds. :rtype: tuple :return: Tuple with status code and response body. """ resp = requests.post( self.__res_uri(res_path), data=data, files=files, headers=self.__headers(), verify=False, auth=self.__auth(), timeout=timeout ) return ( resp.status_code, json.loads(resp.text) )
Post operation. :param str res_path: Resource path. :param list data: Request parameters for data. :param list files: Request parameters for files. :param float timeout: Timeout in seconds. :rtype: tuple :return: Tuple with status code and response body.
def handle_get_token(self, req): """Handles the various `request for token and service end point(s)` calls. There are various formats to support the various auth servers in the past. Examples:: GET <auth-prefix>/v1/<act>/auth X-Auth-User: <act>:<usr> or X-Storage-User: <usr> X-Auth-Key: <key> or X-Storage-Pass: <key> GET <auth-prefix>/auth X-Auth-User: <act>:<usr> or X-Storage-User: <act>:<usr> X-Auth-Key: <key> or X-Storage-Pass: <key> GET <auth-prefix>/v1.0 X-Auth-User: <act>:<usr> or X-Storage-User: <act>:<usr> X-Auth-Key: <key> or X-Storage-Pass: <key> Values should be url encoded, "act%3Ausr" instead of "act:usr" for example; however, for backwards compatibility the colon may be included unencoded. On successful authentication, the response will have X-Auth-Token and X-Storage-Token set to the token to use with Swift and X-Storage-URL set to the URL to the default Swift cluster to use. The response body will be set to the account's services JSON object as described here:: {"storage": { # Represents the Swift storage service end points "default": "cluster1", # Indicates which cluster is the default "cluster1": "<URL to use with Swift>", # A Swift cluster that can be used with this account, # "cluster1" is the name of the cluster which is usually a # location indicator (like "dfw" for a datacenter region). "cluster2": "<URL to use with Swift>" # Another Swift cluster that can be used with this account, # there will always be at least one Swift cluster to use or # this whole "storage" dict won't be included at all. }, "servers": { # Represents the Nova server service end points # Expected to be similar to the "storage" dict, but not # implemented yet. }, # Possibly other service dicts, not implemented yet. } One can also include an "X-Auth-New-Token: true" header to force issuing a new token and revoking any old token, even if it hasn't expired yet. :param req: The swob.Request to process. :returns: swob.Response, 2xx on success with data set as explained above. """ # Validate the request info try: pathsegs = split_path(req.path_info, minsegs=1, maxsegs=3, rest_with_last=True) except ValueError: return HTTPNotFound(request=req) if pathsegs[0] == 'v1' and pathsegs[2] == 'auth': account = pathsegs[1] user = req.headers.get('x-storage-user') if not user: user = unquote(req.headers.get('x-auth-user', '')) if not user or ':' not in user: return HTTPUnauthorized(request=req) account2, user = user.split(':', 1) if account != account2: return HTTPUnauthorized(request=req) key = req.headers.get('x-storage-pass') if not key: key = unquote(req.headers.get('x-auth-key', '')) elif pathsegs[0] in ('auth', 'v1.0'): user = unquote(req.headers.get('x-auth-user', '')) if not user: user = req.headers.get('x-storage-user') if not user or ':' not in user: return HTTPUnauthorized(request=req) account, user = user.split(':', 1) key = unquote(req.headers.get('x-auth-key', '')) if not key: key = req.headers.get('x-storage-pass') else: return HTTPBadRequest(request=req) if not all((account, user, key)): return HTTPUnauthorized(request=req) if user == '.super_admin' and self.super_admin_key and \ key == self.super_admin_key: token = self.get_itoken(req.environ) url = '%s/%s.auth' % (self.dsc_url, self.reseller_prefix) return Response( request=req, content_type=CONTENT_TYPE_JSON, body=json.dumps({'storage': {'default': 'local', 'local': url}}), headers={'x-auth-token': token, 'x-storage-token': token, 'x-storage-url': url}) # Authenticate user path = quote('/v1/%s/%s/%s' % (self.auth_account, account, user)) resp = self.make_pre_authed_request( req.environ, 'GET', path).get_response(self.app) if resp.status_int == 404: return HTTPUnauthorized(request=req) if resp.status_int // 100 != 2: raise Exception('Could not obtain user details: %s %s' % (path, resp.status)) user_detail = json.loads(resp.body) if not self.credentials_match(user_detail, key): return HTTPUnauthorized(request=req) # See if a token already exists and hasn't expired token = None expires = None candidate_token = resp.headers.get('x-object-meta-auth-token') if candidate_token: object_name = self._get_concealed_token(candidate_token) path = quote('/v1/%s/.token_%s/%s' % (self.auth_account, object_name[-1], object_name)) delete_token = False try: if req.headers.get('x-auth-new-token', 'false').lower() in \ TRUE_VALUES: delete_token = True else: resp = self.make_pre_authed_request( req.environ, 'GET', path).get_response(self.app) if resp.status_int // 100 == 2: token_detail = json.loads(resp.body) if token_detail['expires'] > time(): token = candidate_token expires = token_detail['expires'] else: delete_token = True elif resp.status_int != 404: raise Exception( 'Could not detect whether a token already exists: ' '%s %s' % (path, resp.status)) finally: if delete_token: self.make_pre_authed_request( req.environ, 'DELETE', path).get_response(self.app) memcache_client = cache_from_env(req.environ) if memcache_client: memcache_key = '%s/auth/%s' % (self.reseller_prefix, candidate_token) memcache_client.delete(memcache_key) # Create a new token if one didn't exist if not token: # Retrieve account id, we'll save this in the token path = quote('/v1/%s/%s' % (self.auth_account, account)) resp = self.make_pre_authed_request( req.environ, 'HEAD', path).get_response(self.app) if resp.status_int // 100 != 2: raise Exception('Could not retrieve account id value: ' '%s %s' % (path, resp.status)) account_id = \ resp.headers['x-container-meta-account-id'] # Generate new token token = '%stk%s' % (self.reseller_prefix, uuid4().hex) # Save token info object_name = self._get_concealed_token(token) path = quote('/v1/%s/.token_%s/%s' % (self.auth_account, object_name[-1], object_name)) try: token_life = min( int(req.headers.get('x-auth-token-lifetime', self.token_life)), self.max_token_life) except ValueError: token_life = self.token_life expires = int(time() + token_life) resp = self.make_pre_authed_request( req.environ, 'PUT', path, json.dumps({'account': account, 'user': user, 'account_id': account_id, 'groups': user_detail['groups'], 'expires': expires})).get_response(self.app) if resp.status_int // 100 != 2: raise Exception('Could not create new token: %s %s' % (path, resp.status)) # Record the token with the user info for future use. path = quote('/v1/%s/%s/%s' % (self.auth_account, account, user)) resp = self.make_pre_authed_request( req.environ, 'POST', path, headers={'X-Object-Meta-Auth-Token': token} ).get_response(self.app) if resp.status_int // 100 != 2: raise Exception('Could not save new token: %s %s' % (path, resp.status)) # Get the services information path = quote('/v1/%s/%s/.services' % (self.auth_account, account)) resp = self.make_pre_authed_request( req.environ, 'GET', path).get_response(self.app) if resp.status_int // 100 != 2: raise Exception('Could not obtain services info: %s %s' % (path, resp.status)) detail = json.loads(resp.body) url = detail['storage'][detail['storage']['default']] return Response( request=req, body=resp.body, content_type=CONTENT_TYPE_JSON, headers={'x-auth-token': token, 'x-storage-token': token, 'x-auth-token-expires': str(int(expires - time())), 'x-storage-url': url})
Handles the various `request for token and service end point(s)` calls. There are various formats to support the various auth servers in the past. Examples:: GET <auth-prefix>/v1/<act>/auth X-Auth-User: <act>:<usr> or X-Storage-User: <usr> X-Auth-Key: <key> or X-Storage-Pass: <key> GET <auth-prefix>/auth X-Auth-User: <act>:<usr> or X-Storage-User: <act>:<usr> X-Auth-Key: <key> or X-Storage-Pass: <key> GET <auth-prefix>/v1.0 X-Auth-User: <act>:<usr> or X-Storage-User: <act>:<usr> X-Auth-Key: <key> or X-Storage-Pass: <key> Values should be url encoded, "act%3Ausr" instead of "act:usr" for example; however, for backwards compatibility the colon may be included unencoded. On successful authentication, the response will have X-Auth-Token and X-Storage-Token set to the token to use with Swift and X-Storage-URL set to the URL to the default Swift cluster to use. The response body will be set to the account's services JSON object as described here:: {"storage": { # Represents the Swift storage service end points "default": "cluster1", # Indicates which cluster is the default "cluster1": "<URL to use with Swift>", # A Swift cluster that can be used with this account, # "cluster1" is the name of the cluster which is usually a # location indicator (like "dfw" for a datacenter region). "cluster2": "<URL to use with Swift>" # Another Swift cluster that can be used with this account, # there will always be at least one Swift cluster to use or # this whole "storage" dict won't be included at all. }, "servers": { # Represents the Nova server service end points # Expected to be similar to the "storage" dict, but not # implemented yet. }, # Possibly other service dicts, not implemented yet. } One can also include an "X-Auth-New-Token: true" header to force issuing a new token and revoking any old token, even if it hasn't expired yet. :param req: The swob.Request to process. :returns: swob.Response, 2xx on success with data set as explained above.
def browse(self, ml_item=None, start=0, max_items=100, full_album_art_uri=False, search_term=None, subcategories=None): """Browse (get sub-elements from) a music library item. Args: ml_item (`DidlItem`): the item to browse, if left out or `None`, items at the root level will be searched. start (int): the starting index of the results. max_items (int): the maximum number of items to return. full_album_art_uri (bool): whether the album art URI should be fully qualified with the relevant IP address. search_term (str): A string that will be used to perform a fuzzy search among the search results. If used in combination with subcategories, the fuzzy search will be performed on the subcategory. Note: Searching will not work if ``ml_item`` is `None`. subcategories (list): A list of strings that indicate one or more subcategories to descend into. Note: Providing sub categories will not work if ``ml_item`` is `None`. Returns: A `SearchResult` instance. Raises: AttributeError: if ``ml_item`` has no ``item_id`` attribute. SoCoUPnPException: with ``error_code='701'`` if the item cannot be browsed. """ if ml_item is None: search = 'A:' else: search = ml_item.item_id # Add sub categories if subcategories is not None: for category in subcategories: search += '/' + url_escape_path(really_unicode(category)) # Add fuzzy search if search_term is not None: search += ':' + url_escape_path(really_unicode(search_term)) try: response, metadata = \ self._music_lib_search(search, start, max_items) except SoCoUPnPException as exception: # 'No such object' UPnP errors if exception.error_code == '701': return SearchResult([], 'browse', 0, 0, None) else: raise exception metadata['search_type'] = 'browse' # Parse the results containers = from_didl_string(response['Result']) item_list = [] for container in containers: # Check if the album art URI should be fully qualified if full_album_art_uri: self._update_album_art_to_full_uri(container) item_list.append(container) # pylint: disable=star-args return SearchResult(item_list, **metadata)
Browse (get sub-elements from) a music library item. Args: ml_item (`DidlItem`): the item to browse, if left out or `None`, items at the root level will be searched. start (int): the starting index of the results. max_items (int): the maximum number of items to return. full_album_art_uri (bool): whether the album art URI should be fully qualified with the relevant IP address. search_term (str): A string that will be used to perform a fuzzy search among the search results. If used in combination with subcategories, the fuzzy search will be performed on the subcategory. Note: Searching will not work if ``ml_item`` is `None`. subcategories (list): A list of strings that indicate one or more subcategories to descend into. Note: Providing sub categories will not work if ``ml_item`` is `None`. Returns: A `SearchResult` instance. Raises: AttributeError: if ``ml_item`` has no ``item_id`` attribute. SoCoUPnPException: with ``error_code='701'`` if the item cannot be browsed.
def _run_paired(paired): """Run somatic variant calling pipeline. """ from bcbio.structural import titancna work_dir = _sv_workdir(paired.tumor_data) seg_files = model_segments(tz.get_in(["depth", "bins", "normalized"], paired.tumor_data), work_dir, paired) call_file = call_copy_numbers(seg_files["seg"], work_dir, paired.tumor_data) out = [] if paired.normal_data: out.append(paired.normal_data) if "sv" not in paired.tumor_data: paired.tumor_data["sv"] = [] paired.tumor_data["sv"].append({"variantcaller": "gatk-cnv", "call_file": call_file, "vrn_file": titancna.to_vcf(call_file, "GATK4-CNV", _get_seg_header, _seg_to_vcf, paired.tumor_data), "seg": seg_files["seg"], "plot": plot_model_segments(seg_files, work_dir, paired.tumor_data)}) out.append(paired.tumor_data) return out
Run somatic variant calling pipeline.