code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def getmap(self, path, query=None): """ Performs a GET request where the response content type is required to be "application/json" and the content is a JSON-encoded data structure. The decoded structure is returned. """ code, data, ctype = self.get(path, query) if ctype != 'application/json': self.log.error("Expecting JSON from GET of '%s', got '%s'", self.lastpath, ctype) raise HttpError(code=400, content_type='text/plain', content='Remote returned invalid content type: '+ctype) try: result = json.loads(data) except Exception as e: # pragma: no cover self.log.error("Could not load JSON content from GET %r -- %s", self.lastpath, e) raise HttpError(code=400, content_type='text/plain', content='Could not load JSON content') return result
Performs a GET request where the response content type is required to be "application/json" and the content is a JSON-encoded data structure. The decoded structure is returned.
def iterfiles(self): """Yield all WinFile object. """ try: for path in self.order: yield self.files[path] except: for winfile in self.files.values(): yield winfile
Yield all WinFile object.
def fov_for_height_and_distance(height, distance): """Calculate the FOV needed to get a given frustum height at a given distance. """ vfov_deg = np.degrees(2.0 * np.arctan(height * 0.5 / distance)) return vfov_deg
Calculate the FOV needed to get a given frustum height at a given distance.
def data(self, **query): """Query for Data object annotation.""" data = self.gencloud.project_data(self.id) query['case_ids__contains'] = self.id ids = set(d['id'] for d in self.gencloud.api.dataid.get(**query)['objects']) return [d for d in data if d.id in ids]
Query for Data object annotation.
def __create_channel_run(self, channel, username, token): """Sends a post request to create the channel run.""" data = { 'channel_id': channel.get_node_id().hex, 'chef_name': self.__get_chef_name(), 'ricecooker_version': __version__, 'started_by_user': username, 'started_by_user_token': token, 'content_server': config.DOMAIN, } try: response = requests.post( config.sushi_bar_channel_runs_url(), data=data, auth=AUTH) response.raise_for_status() return response.json()['run_id'] except Exception as e: config.LOGGER.error('Error channel run: %s' % e) return None
Sends a post request to create the channel run.
def download_extract(url): """download and extract file.""" logger.info("Downloading %s", url) request = urllib2.Request(url) request.add_header('User-Agent', 'caelum/0.1 +https://github.com/nrcharles/caelum') opener = urllib2.build_opener() with tempfile.TemporaryFile(suffix='.zip', dir=env.WEATHER_DATA_PATH) \ as local_file: logger.debug('Saving to temporary file %s', local_file.name) local_file.write(opener.open(request).read()) compressed_file = zipfile.ZipFile(local_file, 'r') logger.debug('Extracting %s', compressed_file) compressed_file.extractall(env.WEATHER_DATA_PATH) local_file.close()
download and extract file.
def save(self): """Save data.""" with open(self.filename, 'wb') as file: self.prune() self.data['version'] = self.version json.dump(self.data, file, sort_keys=True, indent=2)
Save data.
def _get_range(book, range_, sheet): """Return a range as nested dict of openpyxl cells.""" filename = None if isinstance(book, str): filename = book book = opxl.load_workbook(book, data_only=True) elif isinstance(book, opxl.Workbook): pass else: raise TypeError if _is_range_address(range_): sheet_names = [name.upper() for name in book.sheetnames] index = sheet_names.index(sheet.upper()) data = book.worksheets[index][range_] else: data = _get_namedrange(book, range_, sheet) if data is None: raise ValueError( "Named range '%s' not found in %s" % (range_, filename or book) ) return data
Return a range as nested dict of openpyxl cells.
def forwards(apps, schema_editor): """ Having added the new 'exhibition' Work type, we're going to assume that every Event of type 'museum' should actually have one Exhibition attached. So, we'll add one, with the same title as the Event. And we'll move all Creators from the Event to the Exhibition. """ Event = apps.get_model('spectator_events', 'Event') Work = apps.get_model('spectator_events', 'Work') WorkRole = apps.get_model('spectator_events', 'WorkRole') WorkSelection = apps.get_model('spectator_events', 'WorkSelection') for event in Event.objects.filter(kind='museum'): # Create a new Work based on this Event's details. work = Work.objects.create( kind='exhibition', title=event.title, title_sort=event.title_sort ) # This doesn't generate the slug field automatically because Django. # So we'll have to do it manually. Graarhhh. work.slug = generate_slug(work.pk) work.save() # Associate the new Work with the Event. WorkSelection.objects.create( event=event, work=work ) # Associate any Creators on the Event with the new Work. for role in event.roles.all(): WorkRole.objects.create( creator=role.creator, work=work, role_name=role.role_name, role_order=role.role_order ) # Remove Creators from the Event. role.delete()
Having added the new 'exhibition' Work type, we're going to assume that every Event of type 'museum' should actually have one Exhibition attached. So, we'll add one, with the same title as the Event. And we'll move all Creators from the Event to the Exhibition.
def extract_transformers_from_source(source): '''Scan a source for lines of the form from __experimental__ import transformer1 [,...] identifying transformers to be used. Such line is passed to the add_transformer function, after which it is removed from the code to be executed. ''' lines = source.split('\n') linenumbers = [] for number, line in enumerate(lines): if FROM_EXPERIMENTAL.match(line): add_transformers(line) linenumbers.insert(0, number) # drop the "fake" import from the source code for number in linenumbers: del lines[number] return '\n'.join(lines)
Scan a source for lines of the form from __experimental__ import transformer1 [,...] identifying transformers to be used. Such line is passed to the add_transformer function, after which it is removed from the code to be executed.
def save_related(self, request, form, formsets, change): """ Rebuilds the tree after saving items related to parent. """ super(MenuItemAdmin, self).save_related(request, form, formsets, change) self.model.objects.rebuild()
Rebuilds the tree after saving items related to parent.
def _remove_redundancy_routers(self, context, router_ids, ports, delete_ha_groups=False): """Deletes all interfaces of the specified redundancy routers and then the redundancy routers themselves. """ subnets_info = [{'subnet_id': port['fixed_ips'][0]['subnet_id']} for port in ports] for r_id in router_ids: for i in range(len(subnets_info)): self.remove_router_interface(context, r_id, subnets_info[i]) LOG.debug("Removed interface on %(s_id)s to redundancy router " "with %(r_id)s", {'s_id': ports[i]['network_id'], 'r_id': r_id}) # There is only one ha group per network so only delete once if delete_ha_groups and r_id == router_ids[0]: self._delete_ha_group(context, ports[i]['id']) self.delete_router(context, r_id) LOG.debug("Deleted redundancy router %s", r_id)
Deletes all interfaces of the specified redundancy routers and then the redundancy routers themselves.
def do_rename(argdict): '''Rename a page.''' site = make_site_obj(argdict) slug = argdict['slug'] newtitle = argdict['newtitle'] try: site.rename_page(slug, newtitle) print "Renamed page." except ValueError: # pragma: no cover print "Cannot rename. A page with the given slug does not exist." sys.exit()
Rename a page.
def define(self, value, lineno, namespace=None): """ Defines label value. It can be anything. Even an AST """ if self.defined: error(lineno, "label '%s' already defined at line %i" % (self.name, self.lineno)) self.value = value self.lineno = lineno self.namespace = NAMESPACE if namespace is None else namespace
Defines label value. It can be anything. Even an AST
def _genA(self): """ Generate the matrix A in the Bartlett decomposition A is a lower triangular matrix, with A(i, j) ~ sqrt of Chisq(df - i + 1) when i == j ~ Normal() when i > j """ p, df = self._p, self.df A = np.zeros((p, p)) for i in range(p): A[i, i] = sqrt(st.chi2.rvs(df - i)) for j in range(p-1): for i in range(j+1, p): A[i, j] = np.random.randn() return A
Generate the matrix A in the Bartlett decomposition A is a lower triangular matrix, with A(i, j) ~ sqrt of Chisq(df - i + 1) when i == j ~ Normal() when i > j
def load_data(path, fmt=None, bg_data=None, bg_fmt=None, meta_data={}, holo_kw={}, as_type="float32"): """Load experimental data Parameters ---------- path: str Path to experimental data file or folder fmt: str The file format to use (see `file_formats.formats`). If set to `None`, the file format is guessed. bg_data: str Path to background data file or `qpimage.QPImage` bg_fmt: str The file format to use (see `file_formats.formats`) for the background. If set to `None`, the file format is be guessed. meta_data: dict Meta data (see `qpimage.meta.DATA_KEYS`) as_type: str Defines the data type that the input data is casted to. The default is "float32" which saves memory. If high numerical accuracy is required (does not apply for a simple 2D phase analysis), set this to double precision ("float64"). Returns ------- dataobj: SeriesData or SingleData Object that gives lazy access to the experimental data. """ path = pathlib.Path(path).resolve() # sanity checks for kk in meta_data: if kk not in qpimage.meta.DATA_KEYS: msg = "Meta data key not allowed: {}".format(kk) raise ValueError(msg) # ignore None or nan values in meta_data for kk in list(meta_data.keys()): if meta_data[kk] in [np.nan, None]: meta_data.pop(kk) if fmt is None: fmt = guess_format(path) else: if not formats_dict[fmt].verify(path): msg = "Wrong file format '{}' for '{}'!".format(fmt, path) raise WrongFileFormatError(msg) dataobj = formats_dict[fmt](path=path, meta_data=meta_data, holo_kw=holo_kw, as_type=as_type) if bg_data is not None: if isinstance(bg_data, qpimage.QPImage): # qpimage instance dataobj.set_bg(bg_data) else: # actual data on disk bg_path = pathlib.Path(bg_data).resolve() if bg_fmt is None: bg_fmt = guess_format(bg_path) bgobj = formats_dict[bg_fmt](path=bg_path, meta_data=meta_data, holo_kw=holo_kw, as_type=as_type) dataobj.set_bg(bgobj) return dataobj
Load experimental data Parameters ---------- path: str Path to experimental data file or folder fmt: str The file format to use (see `file_formats.formats`). If set to `None`, the file format is guessed. bg_data: str Path to background data file or `qpimage.QPImage` bg_fmt: str The file format to use (see `file_formats.formats`) for the background. If set to `None`, the file format is be guessed. meta_data: dict Meta data (see `qpimage.meta.DATA_KEYS`) as_type: str Defines the data type that the input data is casted to. The default is "float32" which saves memory. If high numerical accuracy is required (does not apply for a simple 2D phase analysis), set this to double precision ("float64"). Returns ------- dataobj: SeriesData or SingleData Object that gives lazy access to the experimental data.
def format(self, record): """Overridden method that applies SGR codes to log messages.""" # XXX: idea, colorize message arguments s = super(ANSIFormatter, self).format(record) if hasattr(self.context, 'ansi'): s = self.context.ansi(s, **self.get_sgr(record)) return s
Overridden method that applies SGR codes to log messages.
def _register_view(self, app, resource, *urls, **kwargs): """Bind resources to the app. :param app: an actual :class:`flask.Flask` app :param resource: :param urls: :param endpoint: endpoint name (defaults to :meth:`Resource.__name__.lower` Can be used to reference this route in :meth:`flask.url_for` :type endpoint: str Additional keyword arguments not specified above will be passed as-is to :meth:`flask.Flask.add_url_rule`. SIDE EFFECT Implements the one mentioned in add_resource """ endpoint = kwargs.pop('endpoint', None) or resource.__name__.lower() self.endpoints.add(endpoint) if endpoint in getattr(app, 'view_class', {}): existing_view_class = app.view_functions[endpoint].__dict__['view_class'] # if you override the endpoint with a different class, avoid the collision by raising an exception if existing_view_class != resource: raise ValueError('Endpoint {!r} is already set to {!r}.' .format(endpoint, existing_view_class.__name__)) if not hasattr(resource, 'endpoint'): # Don't replace existing endpoint resource.endpoint = endpoint resource_func = self.output(resource.as_view(endpoint)) for decorator in chain(kwargs.pop('decorators', ()), self.decorators): resource_func = decorator(resource_func) for url in urls: rule = self._make_url(url, self.blueprint.url_prefix if self.blueprint else None) # If this Api has a blueprint if self.blueprint: # And this Api has been setup if self.blueprint_setup: # Set the rule to a string directly, as the blueprint # is already set up. self.blueprint_setup.add_url_rule(self._make_url(url, None), view_func=resource_func, **kwargs) continue else: # Set the rule to a function that expects the blueprint # prefix to construct the final url. Allows deferment # of url finalization in the case that the Blueprint # has not yet been registered to an application, so we # can wait for the registration prefix rule = partial(self._make_url, url) else: # If we've got no Blueprint, just build a url with no prefix rule = self._make_url(url, None) # Add the url to the application or blueprint app.add_url_rule(rule, view_func=resource_func, **kwargs)
Bind resources to the app. :param app: an actual :class:`flask.Flask` app :param resource: :param urls: :param endpoint: endpoint name (defaults to :meth:`Resource.__name__.lower` Can be used to reference this route in :meth:`flask.url_for` :type endpoint: str Additional keyword arguments not specified above will be passed as-is to :meth:`flask.Flask.add_url_rule`. SIDE EFFECT Implements the one mentioned in add_resource
def rm(self, container_alias): ''' a method to remove an active container :param container_alias: string with name or id of container :return: string with container id ''' title = '%s.rm' % self.__class__.__name__ # validate inputs input_fields = { 'container_alias': container_alias } for key, value in input_fields.items(): object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # run remove command sys_cmd = 'docker rm -f %s' % container_alias output_lines = self.command(sys_cmd).split('\n') return output_lines[0]
a method to remove an active container :param container_alias: string with name or id of container :return: string with container id
def _decode_embedded_list(src): ''' Convert enbedded bytes to strings if possible. List helper. ''' output = [] for elem in src: if isinstance(elem, dict): elem = _decode_embedded_dict(elem) elif isinstance(elem, list): elem = _decode_embedded_list(elem) # pylint: disable=redefined-variable-type elif isinstance(elem, bytes): try: elem = elem.decode() except UnicodeError: pass output.append(elem) return output
Convert enbedded bytes to strings if possible. List helper.
def update_item(self, payload, last_modified=None): """ Update an existing item Accepts one argument, a dict containing Item data """ to_send = self.check_items([payload])[0] if last_modified is None: modified = payload["version"] else: modified = last_modified ident = payload["key"] headers = {"If-Unmodified-Since-Version": str(modified)} headers.update(self.default_headers()) req = requests.patch( url=self.endpoint + "/{t}/{u}/items/{id}".format( t=self.library_type, u=self.library_id, id=ident ), headers=headers, data=json.dumps(to_send), ) self.request = req try: req.raise_for_status() except requests.exceptions.HTTPError: error_handler(req) return True
Update an existing item Accepts one argument, a dict containing Item data
def from_local_name(acs, attr, name_format): """ :param acs: List of AttributeConverter instances :param attr: attribute name as string :param name_format: Which name-format it should be translated to :return: An Attribute instance """ for aconv in acs: #print(ac.format, name_format) if aconv.name_format == name_format: #print("Found a name_form converter") return aconv.to_format(attr) return attr
:param acs: List of AttributeConverter instances :param attr: attribute name as string :param name_format: Which name-format it should be translated to :return: An Attribute instance
def getFilename(name): """Get a filename from given name without dangerous or incompatible characters.""" # first replace all illegal chars name = re.sub(r"[^0-9a-zA-Z_\-\.]", "_", name) # then remove double dots and underscores while ".." in name: name = name.replace('..', '.') while "__" in name: name = name.replace('__', '_') # remove a leading dot or minus if name.startswith((".", "-")): name = name[1:] return name
Get a filename from given name without dangerous or incompatible characters.
def _get_config_value(profile, config_name): ''' Helper function that returns a profile's configuration value based on the supplied configuration name. profile The profile name that contains configuration information. config_name The configuration item's name to use to return configuration values. ''' config = __salt__['config.option'](profile) if not config: raise CommandExecutionError( 'Authentication information could not be found for the ' '\'{0}\' profile.'.format(profile) ) config_value = config.get(config_name) if config_value is None: raise CommandExecutionError( 'The \'{0}\' parameter was not found in the \'{1}\' ' 'profile.'.format( config_name, profile ) ) return config_value
Helper function that returns a profile's configuration value based on the supplied configuration name. profile The profile name that contains configuration information. config_name The configuration item's name to use to return configuration values.
def _get_record_attrs(out_keys): """Check for records, a single key plus output attributes. """ if len(out_keys) == 1: attr = list(out_keys.keys())[0] if out_keys[attr]: return attr, out_keys[attr] return None, None
Check for records, a single key plus output attributes.
def AddClient(self, client): """Adds a client to the index. Args: client: A VFSGRRClient record to add or update. """ client_id, keywords = self.AnalyzeClient(client) self.AddKeywordsForName(client_id, keywords)
Adds a client to the index. Args: client: A VFSGRRClient record to add or update.
def pin(package, version, checks, marker, resolving, lazy, quiet): """Pin a dependency for all checks that require it. This can also resolve transient dependencies. Setting the version to `none` will remove the package. You can specify an unlimited number of additional checks to apply the pin for via arguments. """ root = get_root() package_name = package.lower() version = version.lower() for check_name in sorted(os.listdir(root)): pinned_reqs_file = os.path.join(root, check_name, 'requirements.in') resolved_reqs_file = os.path.join(root, check_name, 'requirements.txt') if os.path.isfile(pinned_reqs_file): pinned_packages = {package.name: package for package in read_packages(pinned_reqs_file)} if package not in pinned_packages and check_name not in checks: continue if resolving: pre_packages = list(read_packages(resolved_reqs_file)) else: pre_packages = list(itervalues(pinned_packages)) if not quiet: echo_info('Check `{}`:'.format(check_name)) if version == 'none': del pinned_packages[package_name] else: pinned_packages[package_name] = Package(package_name, version, marker) package_list = sorted(itervalues(pinned_packages)) write_file_lines(pinned_reqs_file, ('{}\n'.format(package) for package in package_list)) if not quiet: echo_waiting(' Resolving dependencies...') if resolving: result = resolve_requirements(pinned_reqs_file, resolved_reqs_file, lazy=lazy) if result.code: abort(result.stdout + result.stderr) if not quiet: post_packages = read_packages(resolved_reqs_file if resolving else pinned_reqs_file) display_package_changes(pre_packages, post_packages, indent=' ')
Pin a dependency for all checks that require it. This can also resolve transient dependencies. Setting the version to `none` will remove the package. You can specify an unlimited number of additional checks to apply the pin for via arguments.
def response_hook(self, r, **kwargs): """The actual hook handler.""" if r.status_code == 401: # Handle server auth. www_authenticate = r.headers.get('www-authenticate', '').lower() auth_type = _auth_type_from_header(www_authenticate) if auth_type is not None: return self.retry_using_http_NTLM_auth( 'www-authenticate', 'Authorization', r, auth_type, kwargs ) elif r.status_code == 407: # If we didn't have server auth, do proxy auth. proxy_authenticate = r.headers.get( 'proxy-authenticate', '' ).lower() auth_type = _auth_type_from_header(proxy_authenticate) if auth_type is not None: return self.retry_using_http_NTLM_auth( 'proxy-authenticate', 'Proxy-authorization', r, auth_type, kwargs ) return r
The actual hook handler.
def disable_multicolor(self): """ swap from the multicolor image to the single color image """ # disable the multicolor image for color in ['red', 'green', 'blue']: self.multicolorscales[color].config(state=tk.DISABLED, bg='grey') self.multicolorframes[color].config(bg='grey') self.multicolorlabels[color].config(bg='grey') self.multicolordropdowns[color].config(bg='grey', state=tk.DISABLED) self.multicolorminscale[color].config(bg='grey', state=tk.DISABLED) self.multicolormaxscale[color].config(bg='grey', state=tk.DISABLED) # enable the single color self.singlecolorscale.config(state=tk.NORMAL, bg=self.single_color_theme) self.singlecolorframe.config(bg=self.single_color_theme) self.singlecolorlabel.config(bg=self.single_color_theme) self.singlecolordropdown.config(bg=self.single_color_theme, state=tk.NORMAL) self.singlecolorminscale.config(bg=self.single_color_theme, state=tk.NORMAL) self.singlecolormaxscale.config(bg=self.single_color_theme, state=tk.NORMAL)
swap from the multicolor image to the single color image
def AssignTasksToClient(self, client_id): """Examines our rules and starts up flows based on the client. Args: client_id: Client id of the client for tasks to be assigned. Returns: Number of assigned tasks. """ rules = self.Get(self.Schema.RULES) if not rules: return 0 if data_store.RelationalDBEnabled(): last_foreman_run = self._GetLastForemanRunTimeRelational(client_id) else: last_foreman_run = self._GetLastForemanRunTime(client_id) latest_rule = max(rule.created for rule in rules) if latest_rule <= last_foreman_run: return 0 # Update the latest checked rule on the client. if data_store.RelationalDBEnabled(): try: self._SetLastForemanRunTimeRelational(client_id, latest_rule) except db.UnknownClientError: pass # If the relational db is used for reads, we don't have to update the # aff4 object. if not data_store.RelationalDBEnabled(): self._SetLastForemanRunTime(client_id, latest_rule) relevant_rules = [] expired_rules = False now = time.time() * 1e6 for rule in rules: if rule.expires < now: expired_rules = True continue if rule.created <= int(last_foreman_run): continue relevant_rules.append(rule) if data_store.RelationalDBEnabled(): client_data = data_store.REL_DB.ReadClientFullInfo(client_id) if client_data is None: return else: client_data = aff4.FACTORY.Open(client_id, mode="rw", token=self.token) actions_count = 0 for rule in relevant_rules: if self._EvaluateRules(rule, client_data): actions_count += self._RunActions(rule, client_id) if expired_rules: self.ExpireRules() return actions_count
Examines our rules and starts up flows based on the client. Args: client_id: Client id of the client for tasks to be assigned. Returns: Number of assigned tasks.
def update_hacluster_dns_ha(service, relation_data, crm_ocf='ocf:maas:dns'): """ Configure DNS-HA resources based on provided configuration @param service: Name of the service being configured @param relation_data: Pointer to dictionary of relation data. @param crm_ocf: Corosync Open Cluster Framework resource agent to use for DNS HA """ # Validate the charm environment for DNS HA assert_charm_supports_dns_ha() settings = ['os-admin-hostname', 'os-internal-hostname', 'os-public-hostname', 'os-access-hostname'] # Check which DNS settings are set and update dictionaries hostname_group = [] for setting in settings: hostname = config(setting) if hostname is None: log('DNS HA: Hostname setting {} is None. Ignoring.' ''.format(setting), DEBUG) continue m = re.search('os-(.+?)-hostname', setting) if m: endpoint_type = m.group(1) # resolve_address's ADDRESS_MAP uses 'int' not 'internal' if endpoint_type == 'internal': endpoint_type = 'int' else: msg = ('Unexpected DNS hostname setting: {}. ' 'Cannot determine endpoint_type name' ''.format(setting)) status_set('blocked', msg) raise DNSHAException(msg) hostname_key = 'res_{}_{}_hostname'.format(service, endpoint_type) if hostname_key in hostname_group: log('DNS HA: Resource {}: {} already exists in ' 'hostname group - skipping'.format(hostname_key, hostname), DEBUG) continue hostname_group.append(hostname_key) relation_data['resources'][hostname_key] = crm_ocf relation_data['resource_params'][hostname_key] = ( 'params fqdn="{}" ip_address="{}"' .format(hostname, resolve_address(endpoint_type=endpoint_type, override=False))) if len(hostname_group) >= 1: log('DNS HA: Hostname group is set with {} as members. ' 'Informing the ha relation'.format(' '.join(hostname_group)), DEBUG) relation_data['groups'] = { DNSHA_GROUP_NAME.format(service=service): ' '.join(hostname_group) } else: msg = 'DNS HA: Hostname group has no members.' status_set('blocked', msg) raise DNSHAException(msg)
Configure DNS-HA resources based on provided configuration @param service: Name of the service being configured @param relation_data: Pointer to dictionary of relation data. @param crm_ocf: Corosync Open Cluster Framework resource agent to use for DNS HA
def binned_bitsets_from_list( list=[] ): """Read a list into a dictionary of bitsets""" last_chrom = None last_bitset = None bitsets = dict() for l in list: chrom = l[0] if chrom != last_chrom: if chrom not in bitsets: bitsets[chrom] = BinnedBitSet(MAX) last_chrom = chrom last_bitset = bitsets[chrom] start, end = int( l[1] ), int( l[2] ) last_bitset.set_range( start, end - start ) return bitsets
Read a list into a dictionary of bitsets
def RemoveDevice(self, object_path): '''Remove (forget) a device ''' adapter = mockobject.objects[self.path] adapter.EmitSignal(ADAPTER_IFACE, 'DeviceRemoved', 'o', [object_path])
Remove (forget) a device
def is_contextfree(self): """Returns True iff the grammar is context-free.""" for lhs, rhs in self.rules: if len(lhs) != 1: return False if lhs[0] not in self.nonterminals: return False return True
Returns True iff the grammar is context-free.
def unregister(self, name): """Unregister function by name. """ try: name = name.name except AttributeError: pass return self.pop(name,None)
Unregister function by name.
def check_load(grid, mode): """ Checks for over-loading of branches and transformers for MV or LV grid. Parameters ---------- grid : GridDing0 Grid identifier. mode : str Kind of grid ('MV' or 'LV'). Returns ------- :obj:`dict` Dict of critical branches with max. relative overloading, and the following format:: { branch_1: rel_overloading_1, ..., branch_n: rel_overloading_n } :any:`list` of :obj:`GridDing0` List of critical transformers with the following format:: [trafo_1, ..., trafo_m] Notes ----- Lines'/cables' max. capacity (load case and feed-in case) are taken from [#]_. References ---------- .. [#] dena VNS See Also -------- ding0.flexopt.reinforce_measures.reinforce_branches_current : ding0.flexopt.reinforce_measures.reinforce_branches_voltage : """ crit_branches = {} crit_stations = [] if mode == 'MV': # load load factors (conditions) for cables, lines and trafos for load- and feedin case # load_factor_mv_trans_lc_normal = float(cfg_ding0.get('assumptions', # 'load_factor_mv_trans_lc_normal')) load_factor_mv_line_lc_normal = float(cfg_ding0.get('assumptions', 'load_factor_mv_line_lc_normal')) load_factor_mv_cable_lc_normal = float(cfg_ding0.get('assumptions', 'load_factor_mv_cable_lc_normal')) #load_factor_mv_trans_fc_normal = float(cfg_ding0.get('assumptions', # 'load_factor_mv_trans_fc_normal')) load_factor_mv_line_fc_normal = float(cfg_ding0.get('assumptions', 'load_factor_mv_line_fc_normal')) load_factor_mv_cable_fc_normal = float(cfg_ding0.get('assumptions', 'load_factor_mv_cable_fc_normal')) mw2kw = 1e3 kw2mw = 1e-3 # STEP 1: check branches' loads for branch in grid.graph_edges(): s_max_th = 3**0.5 * branch['branch'].type['U_n'] * branch['branch'].type['I_max_th'] if branch['branch'].kind == 'line': s_max_th_lcfc = [s_max_th * load_factor_mv_line_lc_normal, s_max_th * load_factor_mv_line_fc_normal] elif branch['branch'].kind == 'cable': s_max_th_lcfc = [s_max_th * load_factor_mv_cable_lc_normal, s_max_th * load_factor_mv_cable_fc_normal] else: raise ValueError('Branch kind is invalid!') # check loads only for non-aggregated Load Areas (aggregated ones are skipped raising except) try: # check if s_res exceeds allowed values for laod and feedin case # CAUTION: The order of values is fix! (1. load case, 2. feedin case) if any([s_res * mw2kw > _ for s_res, _ in zip(branch['branch'].s_res, s_max_th_lcfc)]): # save max. relative overloading crit_branches[branch] = max(branch['branch'].s_res) * mw2kw / s_max_th except: pass # STEP 2: check HV-MV station's load # NOTE: HV-MV station reinforcement is not required for status-quo # scenario since HV-MV trafos already sufficient for load+generation # case as done in MVStationDing0.choose_transformers() # OLD snippet: # cum_peak_load = grid.grid_district.peak_load # cum_peak_generation = grid.station().peak_generation(mode='MVLV') # # # reinforcement necessary only if generation > load # if cum_peak_generation > cum_peak_load: # grid.station().choose_transformers # # cum_trafo_capacity = sum((_.s_max_a for _ in grid.station().transformers())) # # max_trafo = max((_.s_max_a for _ in grid.station().transformers())) # # # determine number and size of required transformers # kw2mw = 1e-3 # residual_apparent_power = cum_generation_sum * kw2mw - \ # cum_trafo_capacity elif mode == 'LV': raise NotImplementedError if crit_branches: logger.info('==> {} branches have load issues.'.format( len(crit_branches))) if crit_stations: logger.info('==> {} stations have load issues.'.format( len(crit_stations))) return crit_branches, crit_stations
Checks for over-loading of branches and transformers for MV or LV grid. Parameters ---------- grid : GridDing0 Grid identifier. mode : str Kind of grid ('MV' or 'LV'). Returns ------- :obj:`dict` Dict of critical branches with max. relative overloading, and the following format:: { branch_1: rel_overloading_1, ..., branch_n: rel_overloading_n } :any:`list` of :obj:`GridDing0` List of critical transformers with the following format:: [trafo_1, ..., trafo_m] Notes ----- Lines'/cables' max. capacity (load case and feed-in case) are taken from [#]_. References ---------- .. [#] dena VNS See Also -------- ding0.flexopt.reinforce_measures.reinforce_branches_current : ding0.flexopt.reinforce_measures.reinforce_branches_voltage :
def compile_relative_distances(self, sympy_accesses=None): """ Return load and store distances between accesses. :param sympy_accesses: optionally restrict accesses, default from compile_sympy_accesses() e.g. if accesses are to [+N, +1, -1, -N], relative distances are [N-1, 2, N-1] returned is a dict of list of sympy expressions, for each variable """ if sympy_accesses is None: sympy_accesses = self.compile_sympy_accesses() sympy_distances = defaultdict(list) for var_name, accesses in sympy_accesses.items(): for i in range(1, len(accesses)): sympy_distances[var_name].append((accesses[i-1]-accesses[i]).simplify()) return sympy_distances
Return load and store distances between accesses. :param sympy_accesses: optionally restrict accesses, default from compile_sympy_accesses() e.g. if accesses are to [+N, +1, -1, -N], relative distances are [N-1, 2, N-1] returned is a dict of list of sympy expressions, for each variable
def get_pending_withdrawals(self, currency=None): """ Used to view your pending withdrawals Endpoint: 1.1 NO EQUIVALENT 2.0 /key/balance/getpendingwithdrawals :param currency: String literal for the currency (ie. BTC) :type currency: str :return: pending withdrawals in JSON :rtype : list """ return self._api_query(path_dict={ API_V2_0: '/key/balance/getpendingwithdrawals' }, options={'currencyname': currency} if currency else None, protection=PROTECTION_PRV)
Used to view your pending withdrawals Endpoint: 1.1 NO EQUIVALENT 2.0 /key/balance/getpendingwithdrawals :param currency: String literal for the currency (ie. BTC) :type currency: str :return: pending withdrawals in JSON :rtype : list
def new(cls, settings, *args, **kwargs): """ Create a new Cloud instance based on the Settings """ logger.debug('Initializing new "%s" Instance object' % settings['CLOUD']) cloud = settings['CLOUD'] if cloud == 'bare': self = BareInstance(settings=settings, *args, **kwargs) elif cloud == 'aws': self = AWSInstance(settings=settings, *args, **kwargs) elif cloud == 'gcp': self = GCPInstance(settings=settings, *args, **kwargs) else: raise DSBException('Cloud "%s" not supported' % cloud) return self
Create a new Cloud instance based on the Settings
def compile(self, source, name=None, filename=None, raw=False, defer_init=False): """Compile a node or template source code. The `name` parameter is the load name of the template after it was joined using :meth:`join_path` if necessary, not the filename on the file system. the `filename` parameter is the estimated filename of the template on the file system. If the template came from a database or memory this can be omitted. The return value of this method is a python code object. If the `raw` parameter is `True` the return value will be a string with python code equivalent to the bytecode returned otherwise. This method is mainly used internally. `defer_init` is use internally to aid the module code generator. This causes the generated code to be able to import without the global environment variable to be set. .. versionadded:: 2.4 `defer_init` parameter added. """ source_hint = None try: if isinstance(source, string_types): source_hint = source source = self._parse(source, name, filename) source = self._generate(source, name, filename, defer_init=defer_init) if raw: return source if filename is None: filename = '<template>' else: filename = encode_filename(filename) return self._compile(source, filename) except TemplateSyntaxError: exc_info = sys.exc_info() self.handle_exception(exc_info, source_hint=source_hint)
Compile a node or template source code. The `name` parameter is the load name of the template after it was joined using :meth:`join_path` if necessary, not the filename on the file system. the `filename` parameter is the estimated filename of the template on the file system. If the template came from a database or memory this can be omitted. The return value of this method is a python code object. If the `raw` parameter is `True` the return value will be a string with python code equivalent to the bytecode returned otherwise. This method is mainly used internally. `defer_init` is use internally to aid the module code generator. This causes the generated code to be able to import without the global environment variable to be set. .. versionadded:: 2.4 `defer_init` parameter added.
def decorator_handle(tokens): """Process decorators.""" defs = [] decorates = [] for i, tok in enumerate(tokens): if "simple" in tok and len(tok) == 1: decorates.append("@" + tok[0]) elif "test" in tok and len(tok) == 1: varname = decorator_var + "_" + str(i) defs.append(varname + " = " + tok[0]) decorates.append("@" + varname) else: raise CoconutInternalException("invalid decorator tokens", tok) return "\n".join(defs + decorates) + "\n"
Process decorators.
def _extract_coeffs(self, imt): """ Extract dictionaries of coefficients specific to required intensity measure type. """ C_HR = self.COEFFS_HARD_ROCK[imt] C_BC = self.COEFFS_BC[imt] C_SR = self.COEFFS_SOIL_RESPONSE[imt] SC = self.COEFFS_STRESS[imt] return C_HR, C_BC, C_SR, SC
Extract dictionaries of coefficients specific to required intensity measure type.
def jtype(c): """ Return the a string with the data type of a value, for JSON data """ ct = c['type'] return ct if ct != 'literal' else '{}, {}'.format(ct, c.get('xml:lang'))
Return the a string with the data type of a value, for JSON data
def _bool_segments(array, start=0, delta=1, minlen=1): """Yield segments of consecutive `True` values in a boolean array Parameters ---------- array : `iterable` An iterable of boolean-castable values. start : `float` The value of the first sample on the indexed axis (e.g.the GPS start time of the array). delta : `float` The step size on the indexed axis (e.g. sample duration). minlen : `int`, optional The minimum number of consecutive `True` values for a segment. Yields ------ segment : `tuple` ``(start + i * delta, start + (i + n) * delta)`` for a sequence of ``n`` consecutive True values starting at position ``i``. Notes ----- This method is adapted from original code written by Kipp Cannon and distributed under GPLv3. The datatype of the values returned will be the larger of the types of ``start`` and ``delta``. Examples -------- >>> print(list(_bool_segments([0, 1, 0, 0, 0, 1, 1, 1, 0, 1])) [(1, 2), (5, 8), (9, 10)] >>> print(list(_bool_segments([0, 1, 0, 0, 0, 1, 1, 1, 0, 1] ... start=100., delta=0.1)) [(100.1, 100.2), (100.5, 100.8), (100.9, 101.0)] """ array = iter(array) i = 0 while True: try: # get next value val = next(array) except StopIteration: # end of array return if val: # start of new segment n = 1 # count consecutive True try: while next(array): # run until segment will end n += 1 except StopIteration: # have reached the end return # stop finally: # yield segment (including at StopIteration) if n >= minlen: # ... if long enough yield (start + i * delta, start + (i + n) * delta) i += n i += 1
Yield segments of consecutive `True` values in a boolean array Parameters ---------- array : `iterable` An iterable of boolean-castable values. start : `float` The value of the first sample on the indexed axis (e.g.the GPS start time of the array). delta : `float` The step size on the indexed axis (e.g. sample duration). minlen : `int`, optional The minimum number of consecutive `True` values for a segment. Yields ------ segment : `tuple` ``(start + i * delta, start + (i + n) * delta)`` for a sequence of ``n`` consecutive True values starting at position ``i``. Notes ----- This method is adapted from original code written by Kipp Cannon and distributed under GPLv3. The datatype of the values returned will be the larger of the types of ``start`` and ``delta``. Examples -------- >>> print(list(_bool_segments([0, 1, 0, 0, 0, 1, 1, 1, 0, 1])) [(1, 2), (5, 8), (9, 10)] >>> print(list(_bool_segments([0, 1, 0, 0, 0, 1, 1, 1, 0, 1] ... start=100., delta=0.1)) [(100.1, 100.2), (100.5, 100.8), (100.9, 101.0)]
def sticker_templates(): """ It returns the registered stickers in the system. :return: a DisplayList object """ voc = DisplayList() stickers = getStickerTemplates() for sticker in stickers: voc.add(sticker.get('id'), sticker.get('title')) if voc.index == 0: logger.warning('Sampletype: getStickerTemplates is empty!') return voc
It returns the registered stickers in the system. :return: a DisplayList object
def exclude(source, keys, *, transform=None): """Returns a dictionary excluding keys from a source dictionary. :source: a dictionary :keys: a set of keys, or a predicate function that accepting a key :transform: a function that transforms the values """ check = keys if callable(keys) else lambda key: key in keys return {key: transform(source[key]) if transform else source[key] for key in source if not check(key)}
Returns a dictionary excluding keys from a source dictionary. :source: a dictionary :keys: a set of keys, or a predicate function that accepting a key :transform: a function that transforms the values
def coerce(self, values): """Convert an iterable of literals to an iterable of options. Args: values (iterable or string): An iterable of raw values to convert into options. If the value is a string is is assumed to be a comma separated list and will be split before processing. Returns: iterable: An iterable of option values initialized with the raw values from `values`. Raises: TypeError: If `values` is not iterable or string. TypeError: If the underlying option raises a TypeError. ValueError: If the underlying option raises a ValueError. """ if isinstance(values, compat.basestring): values = tuple(value.strip() for value in values.split(',')) # Create a list of options to store each value. opt_iter = tuple(copy.deepcopy(self._option) for value in values) for opt_obj, val in compat.zip(opt_iter, values): opt_obj.__set__(None, val) return opt_iter
Convert an iterable of literals to an iterable of options. Args: values (iterable or string): An iterable of raw values to convert into options. If the value is a string is is assumed to be a comma separated list and will be split before processing. Returns: iterable: An iterable of option values initialized with the raw values from `values`. Raises: TypeError: If `values` is not iterable or string. TypeError: If the underlying option raises a TypeError. ValueError: If the underlying option raises a ValueError.
def params_of_mean(value=array([-.005, 1.]), tau=.1, rate=4.): """ Intercept and slope of rate stochastic of poisson distribution Rate stochastic must be positive for t in [0,T] p(intercept, slope|tau,rate) = N(slope|0,tau) Exp(intercept|rate) 1(intercept>0) 1(intercept + slope * T>0) """ def logp(value, tau, rate): if value[1] > 0 and value[1] + value[0] * 110 > 0: return normal_like(value[0], 0., tau) + \ exponential_like(value[1], rate) else: return -Inf def random(tau, rate): val = zeros(2) val[0] = rnormal(0., tau) val[1] = rexponential(rate) while val[1] < 0 or val[1] + val[0] * 110 <= 0: val[0] = rnormal(0., tau) val[1] = rexponential(rate) return val
Intercept and slope of rate stochastic of poisson distribution Rate stochastic must be positive for t in [0,T] p(intercept, slope|tau,rate) = N(slope|0,tau) Exp(intercept|rate) 1(intercept>0) 1(intercept + slope * T>0)
def _get_color(self, r, g, b): """Convert red, green and blue values specified in floats with range 0-1 to whatever the native widget color object is. """ clr = (r, g, b) return clr
Convert red, green and blue values specified in floats with range 0-1 to whatever the native widget color object is.
def wrap(msg, indent, indent_first=True): """ Helper function that wraps msg to 120-chars page width. All lines (except maybe 1st) will be prefixed with string {indent}. First line is prefixed only if {indent_first} is True. :param msg: string to indent :param indent: string that will be used for indentation :param indent_first: if True then the first line will be indented as well, otherwise not """ wrapper.width = 120 wrapper.initial_indent = indent wrapper.subsequent_indent = indent msg = wrapper.fill(msg) return msg if indent_first else msg[len(indent):]
Helper function that wraps msg to 120-chars page width. All lines (except maybe 1st) will be prefixed with string {indent}. First line is prefixed only if {indent_first} is True. :param msg: string to indent :param indent: string that will be used for indentation :param indent_first: if True then the first line will be indented as well, otherwise not
def toposort(data): """ Dependencies are expressed as a dictionary whose keys are items and whose values are a set of dependent items. Output is a list of sets in topological order. The first set consists of items with no dependences, each subsequent set consists of items that depend upon items in the preceeding sets. :param data: :type data: :return: :rtype: """ # Special case empty input. if len(data) == 0: return # Copy the input so as to leave it unmodified. data = data.copy() # Ignore self dependencies. for k, v in data.items(): v.discard(k) # Find all items that don't depend on anything. extra_items_in_deps = reduce(set.union, data.values()) - set(data.keys()) # Add empty dependences where needed. data.update(dict((item, set()) for item in extra_items_in_deps)) while True: ordered = set(item for item, dep in data.items() if len(dep) == 0) if not ordered: break yield ordered data = dict((item, (dep - ordered)) for item, dep in data.items() if item not in ordered) if len(data) != 0: raise CyclicDependency(data)
Dependencies are expressed as a dictionary whose keys are items and whose values are a set of dependent items. Output is a list of sets in topological order. The first set consists of items with no dependences, each subsequent set consists of items that depend upon items in the preceeding sets. :param data: :type data: :return: :rtype:
def int_list_packer(term, values): """ return singletons, ranges and exclusions """ DENSITY = 10 # a range can have holes, this is inverse of the hole density MIN_RANGE = 20 # min members before a range is allowed to be used singletons = set() ranges = [] exclude = set() sorted = jx.sort(values) last = sorted[0] curr_start = last curr_excl = set() for v in sorted[1::]: if v <= last + 1: pass elif v - last > 3: # big step, how do we deal with it? if last == curr_start: # not a range yet, so just add as singlton singletons.add(last) elif last - curr_start - len(curr_excl) < MIN_RANGE or ((last - curr_start) < len(curr_excl) * DENSITY): # small ranges are singletons, sparse ranges are singletons singletons |= set(range(curr_start, last + 1)) singletons -= curr_excl else: # big enough, and dense enough range ranges.append({"gte": curr_start, "lte": last}) exclude |= curr_excl curr_start = v curr_excl = set() else: if 1 + last - curr_start >= len(curr_excl) * DENSITY: # high density, keep track of excluded and continue add_me = set(range(last + 1, v)) curr_excl |= add_me elif 1 + last - curr_start - len(curr_excl) < MIN_RANGE: # not big enough, convert range to singletons new_singles = set(range(curr_start, last + 1)) - curr_excl singletons = singletons | new_singles curr_start = v curr_excl = set() else: ranges.append({"gte": curr_start, "lte": last}) exclude |= curr_excl curr_start = v curr_excl = set() last = v if last == curr_start: # not a range yet, so just add as singlton singletons.add(last) elif last - curr_start - len(curr_excl) < MIN_RANGE or ((last - curr_start) < len(curr_excl) * DENSITY): # small ranges are singletons, sparse ranges are singletons singletons |= set(range(curr_start, last + 1)) singletons -= curr_excl else: # big enough, and dense enough range ranges.append({"gte": curr_start, "lte": last}) exclude |= curr_excl if ranges: r = {"or": [{"range": {term: r}} for r in ranges]} if exclude: r = {"and": [r, {"not": {"terms": {term: jx.sort(exclude)}}}]} if singletons: return {"or": [ {"terms": {term: jx.sort(singletons)}}, r ]} else: return r else: return {"terms": {term: values}}
return singletons, ranges and exclusions
def delete(self): """Delete the instance.""" if lib.EnvDeleteInstance(self._env, self._ist) != 1: raise CLIPSError(self._env)
Delete the instance.
def wrap_many(self, *args, strict=False): """Wraps different copies of this element inside all empty tags listed in params or param's (non-empty) iterators. Returns list of copies of this element wrapped inside args or None if not succeeded, in the same order and same structure, i.e. args = (Div(), (Div())) -> value = (A(...), (A(...))) If on some args it must raise TagError, it will only if strict is True, otherwise it will do nothing with them and return Nones on their positions""" for arg in args: is_elem = arg and isinstance(arg, DOMElement) is_elem_iter = ( not is_elem and arg and isinstance(arg, Iterable) and isinstance(iter(arg).__next__(), DOMElement) ) if not (is_elem or is_elem_iter): raise WrongArgsError( self, "Argument {} is not DOMElement nor iterable of DOMElements".format( arg ), ) wcopies = [] failure = [] def wrap_next(tag, idx): nonlocal wcopies, failure next_copy = self.__copy__() try: return next_copy.wrap(tag) except TagError: failure.append(idx) return next_copy for arg_idx, arg in enumerate(args): if isinstance(arg, DOMElement): wcopies.append(wrap_next(arg, (arg_idx, -1))) else: iter_wcopies = [] for iter_idx, t in enumerate(arg): iter_wcopies.append(wrap_next(t, (arg_idx, iter_idx))) wcopies.append(type(arg)(iter_wcopies)) if failure and strict: raise TagError( self, "Wrapping in a non empty Tag is forbidden, failed on arguments " + ", ".join( list( map( lambda idx: str(idx[0]) if idx[1] == -1 else "[{1}] of {0}".format(*idx), failure, ) ) ), ) return wcopies
Wraps different copies of this element inside all empty tags listed in params or param's (non-empty) iterators. Returns list of copies of this element wrapped inside args or None if not succeeded, in the same order and same structure, i.e. args = (Div(), (Div())) -> value = (A(...), (A(...))) If on some args it must raise TagError, it will only if strict is True, otherwise it will do nothing with them and return Nones on their positions
def update(did): """Update DDO of an existing asset --- tags: - ddo consumes: - application/json parameters: - in: body name: body required: true description: DDO of the asset. schema: type: object required: - "@context" - created - id - publicKey - authentication - proof - service properties: "@context": description: example: https://w3id.org/future-method/v1 type: string id: description: ID of the asset. example: did:op:123456789abcdefghi type: string created: description: date of ddo creation. example: "2016-02-08T16:02:20Z" type: string publicKey: type: array description: List of public keys. example: [{"id": "did:op:123456789abcdefghi#keys-1"}, {"type": "Ed25519VerificationKey2018"}, {"owner": "did:op:123456789abcdefghi"}, {"publicKeyBase58": "H3C2AVvLMv6gmMNam3uVAjZpfkcJCwDwnZn6z3wXmqPV"}] authentication: type: array description: List of authentication mechanisms. example: [{"type": "RsaSignatureAuthentication2018"}, {"publicKey": "did:op:123456789abcdefghi#keys-1"}] proof: type: dictionary description: Information about the creation and creator of the asset. example: {"type": "UUIDSignature", "created": "2016-02-08T16:02:20Z", "creator": "did:example:8uQhQMGzWxR8vw5P3UWH1ja", "signatureValue": "QNB13Y7Q9...1tzjn4w==" } service: type: array description: List of services. example: [{"type": "Access", "serviceEndpoint": "http://mybrizo.org/api/v1/brizo/services/consume?pubKey=${ pubKey}&serviceId={serviceId}&url={url}"}, {"type": "Compute", "serviceEndpoint": "http://mybrizo.org/api/v1/brizo/services/compute?pubKey=${ pubKey}&serviceId={serviceId}&algo={algo}&container={container}"}, { "type": "Metadata", "serviceDefinitionId": "2", "serviceEndpoint": "http://myaquarius.org/api/v1/provider/assets/metadata/{did}", "metadata": { "base": { "name": "UK Weather information 2011", "type": "dataset", "description": "Weather information of UK including temperature and humidity", "dateCreated": "2012-02-01T10:55:11Z", "author": "Met Office", "license": "CC-BY", "copyrightHolder": "Met Office", "compression": "zip", "workExample": "stationId,latitude,longitude,datetime, temperature,humidity/n423432fsd,51.509865,-0.118092, 2011-01-01T10:55:11+00:00,7.2,68", "files": [{ "contentLength": "4535431", "contentType": "text/csv", "encoding": "UTF-8", "compression": "zip", "resourceId": "access-log2018-02-13-15-17-29-18386C502CAEA932" } ], "encryptedFiles": "0x098213xzckasdf089723hjgdasfkjgasfv", "links": [{ "name": "Sample of Asset Data", "type": "sample", "url": "https://foo.com/sample.csv" }, { "name": "Data Format Definition", "type": "format", "AssetID": "4d517500da0acb0d65a716f61330969334630363ce4a6a9d39691026ac7908ea" } ], "inLanguage": "en", "tags": "weather, uk, 2011, temperature, humidity", "price": 10, "checksum": "38803b9e6f04fce3fba4b124524672592264d31847182c689095a081c9e85262" }, "curation": { "rating": 0.93, "numVotes": 123, "schema": "Binary Voting" }, "additionalInformation": { "updateFrecuency": "yearly", "structuredMarkup": [{ "uri": "http://skos.um.es/unescothes/C01194/jsonld", "mediaType": "application/ld+json" }, { "uri": "http://skos.um.es/unescothes/C01194/turtle", "mediaType": "text/turtle" } ] } } }] responses: 200: description: Asset successfully updated. 201: description: Asset successfully registered. 400: description: One of the required attributes is missing. 404: description: Invalid asset data. 500: description: Error """ required_attributes = ['@context', 'created', 'id', 'publicKey', 'authentication', 'proof', 'service'] required_metadata_base_attributes = ['name', 'dateCreated', 'author', 'license', 'price', 'encryptedFiles', 'type', 'checksum'] required_metadata_curation_attributes = ['rating', 'numVotes'] assert isinstance(request.json, dict), 'invalid payload format.' data = request.json if not data: logger.error(f'request body seems empty, expecting {required_attributes}') return 400 msg, status = check_required_attributes(required_attributes, data, 'update') if msg: return msg, status msg, status = check_required_attributes(required_metadata_base_attributes, _get_base_metadata(data['service']), 'update') if msg: return msg, status msg, status = check_required_attributes(required_metadata_curation_attributes, _get_curation_metadata(data['service']), 'update') if msg: return msg, status msg, status = check_no_urls_in_files(_get_base_metadata(data['service']), 'register') if msg: return msg, status msg, status = validate_date_format(data['created']) if msg: return msg, status _record = dict() _record = copy.deepcopy(data) _record['created'] = datetime.strptime(data['created'], '%Y-%m-%dT%H:%M:%SZ') try: if dao.get(did) is None: register() return _sanitize_record(_record), 201 else: for service in _record['service']: service_id = int(service['serviceDefinitionId']) if service['type'] == 'Metadata': _record['service'][service_id]['metadata']['base']['datePublished'] = _get_date( dao.get(did)['service']) dao.update(_record, did) return Response(_sanitize_record(_record), 200, content_type='application/json') except Exception as err: return f'Some error: {str(err)}', 500
Update DDO of an existing asset --- tags: - ddo consumes: - application/json parameters: - in: body name: body required: true description: DDO of the asset. schema: type: object required: - "@context" - created - id - publicKey - authentication - proof - service properties: "@context": description: example: https://w3id.org/future-method/v1 type: string id: description: ID of the asset. example: did:op:123456789abcdefghi type: string created: description: date of ddo creation. example: "2016-02-08T16:02:20Z" type: string publicKey: type: array description: List of public keys. example: [{"id": "did:op:123456789abcdefghi#keys-1"}, {"type": "Ed25519VerificationKey2018"}, {"owner": "did:op:123456789abcdefghi"}, {"publicKeyBase58": "H3C2AVvLMv6gmMNam3uVAjZpfkcJCwDwnZn6z3wXmqPV"}] authentication: type: array description: List of authentication mechanisms. example: [{"type": "RsaSignatureAuthentication2018"}, {"publicKey": "did:op:123456789abcdefghi#keys-1"}] proof: type: dictionary description: Information about the creation and creator of the asset. example: {"type": "UUIDSignature", "created": "2016-02-08T16:02:20Z", "creator": "did:example:8uQhQMGzWxR8vw5P3UWH1ja", "signatureValue": "QNB13Y7Q9...1tzjn4w==" } service: type: array description: List of services. example: [{"type": "Access", "serviceEndpoint": "http://mybrizo.org/api/v1/brizo/services/consume?pubKey=${ pubKey}&serviceId={serviceId}&url={url}"}, {"type": "Compute", "serviceEndpoint": "http://mybrizo.org/api/v1/brizo/services/compute?pubKey=${ pubKey}&serviceId={serviceId}&algo={algo}&container={container}"}, { "type": "Metadata", "serviceDefinitionId": "2", "serviceEndpoint": "http://myaquarius.org/api/v1/provider/assets/metadata/{did}", "metadata": { "base": { "name": "UK Weather information 2011", "type": "dataset", "description": "Weather information of UK including temperature and humidity", "dateCreated": "2012-02-01T10:55:11Z", "author": "Met Office", "license": "CC-BY", "copyrightHolder": "Met Office", "compression": "zip", "workExample": "stationId,latitude,longitude,datetime, temperature,humidity/n423432fsd,51.509865,-0.118092, 2011-01-01T10:55:11+00:00,7.2,68", "files": [{ "contentLength": "4535431", "contentType": "text/csv", "encoding": "UTF-8", "compression": "zip", "resourceId": "access-log2018-02-13-15-17-29-18386C502CAEA932" } ], "encryptedFiles": "0x098213xzckasdf089723hjgdasfkjgasfv", "links": [{ "name": "Sample of Asset Data", "type": "sample", "url": "https://foo.com/sample.csv" }, { "name": "Data Format Definition", "type": "format", "AssetID": "4d517500da0acb0d65a716f61330969334630363ce4a6a9d39691026ac7908ea" } ], "inLanguage": "en", "tags": "weather, uk, 2011, temperature, humidity", "price": 10, "checksum": "38803b9e6f04fce3fba4b124524672592264d31847182c689095a081c9e85262" }, "curation": { "rating": 0.93, "numVotes": 123, "schema": "Binary Voting" }, "additionalInformation": { "updateFrecuency": "yearly", "structuredMarkup": [{ "uri": "http://skos.um.es/unescothes/C01194/jsonld", "mediaType": "application/ld+json" }, { "uri": "http://skos.um.es/unescothes/C01194/turtle", "mediaType": "text/turtle" } ] } } }] responses: 200: description: Asset successfully updated. 201: description: Asset successfully registered. 400: description: One of the required attributes is missing. 404: description: Invalid asset data. 500: description: Error
def check_in(self, url: str, new_status: Status, increment_try_count: bool=True, url_result: Optional[URLResult]=None): '''Update record for processed URL. Args: url: The URL. new_status: Update the item status to `new_status`. increment_try_count: Whether to increment the try counter for the URL. url_result: Additional values. '''
Update record for processed URL. Args: url: The URL. new_status: Update the item status to `new_status`. increment_try_count: Whether to increment the try counter for the URL. url_result: Additional values.
def cut_from_block(html_message): """Cuts div tag which wraps block starting with "From:".""" # handle the case when From: block is enclosed in some tag block = html_message.xpath( ("//*[starts-with(mg:text_content(), 'From:')]|" "//*[starts-with(mg:text_content(), 'Date:')]")) if block: block = block[-1] parent_div = None while block.getparent() is not None: if block.tag == 'div': parent_div = block break block = block.getparent() if parent_div is not None: maybe_body = parent_div.getparent() # In cases where removing this enclosing div will remove all # content, we should assume the quote is not enclosed in a tag. parent_div_is_all_content = ( maybe_body is not None and maybe_body.tag == 'body' and len(maybe_body.getchildren()) == 1) if not parent_div_is_all_content: parent = block.getparent() next_sibling = block.getnext() # remove all tags after found From block # (From block and quoted message are in separate divs) while next_sibling is not None: parent.remove(block) block = next_sibling next_sibling = block.getnext() # remove the last sibling (or the # From block if no siblings) if block is not None: parent.remove(block) return True else: return False # handle the case when From: block goes right after e.g. <hr> # and not enclosed in some tag block = html_message.xpath( ("//*[starts-with(mg:tail(), 'From:')]|" "//*[starts-with(mg:tail(), 'Date:')]")) if block: block = block[0] if RE_FWD.match(block.getparent().text or ''): return False while(block.getnext() is not None): block.getparent().remove(block.getnext()) block.getparent().remove(block) return True
Cuts div tag which wraps block starting with "From:".
def get_help(obj, env, subcmds): """Interpolate complete help doc of given object Assumption that given object as a specific interface: obj.__doc__ is the basic help object. obj.get_actions_titles() returns the subcommand if any. """ doc = txt.dedent(obj.__doc__ or "") env = env.copy() ## get a local copy doc = doc.strip() if not re.search(r"^usage:\s*$", doc, flags=re.IGNORECASE | re.MULTILINE): doc += txt.dedent(""" Usage: %(std_usage)s Options: %(std_options)s""") help_line = (" %%-%ds %%s" % (max([5] + [len(a) for a in subcmds]), )) env["actions"] = "\n".join( help_line % ( name, get_help(subcmd, subcmd_env(env, name), {}).split("\n")[0]) for name, subcmd in subcmds.items()) env["actions_help"] = "" if not env["actions"] else ( "ACTION could be one of:\n\n" "%(actions)s\n\n" "See '%(surcmd)s help ACTION' for more information " "on a specific command." % env) if "%(std_usage)s" in doc: env["std_usage"] = txt.indent( ("%(surcmd)s --help\n" "%(surcmd)s --version" + (("\n%(surcmd)s help [COMMAND]" "\n%(surcmd)s ACTION [ARGS...]") if subcmds else "")) % env, _find_prefix(doc, "%(std_usage)s"), first="") if "%(std_options)s" in doc: env["std_options"] = txt.indent( "--help Show this screen.\n" "--version Show version.", _find_prefix(doc, "%(std_options)s"), first="") if subcmds and "%(actions_help)s" not in doc: doc += "\n\n%(actions_help)s" try: output = doc % env except KeyError as e: msg.err("Doc interpolation of %s needed missing key %r" % (aformat(env["surcmd"], attrs=["bold", ]), e.args[0])) exit(1) except Exception as e: msg.err( "Documentation of %s is not valid. Please check it:\n%s" % (aformat(env["surcmd"], attrs=["bold", ]), doc)) exit(1) return output
Interpolate complete help doc of given object Assumption that given object as a specific interface: obj.__doc__ is the basic help object. obj.get_actions_titles() returns the subcommand if any.
def list_build_configuration_sets(page_size=200, page_index=0, sort="", q=""): """ List all build configuration sets """ data = list_build_configuration_sets_raw(page_size, page_index, sort, q) if data: return utils.format_json_list(data)
List all build configuration sets
def tf_idf(text): """ Compute the TF-IDF scores for each word in each document. The collection of documents must be in bag-of-words format. .. math:: \mbox{TF-IDF}(w, d) = tf(w, d) * log(N / f(w)) where :math:`tf(w, d)` is the number of times word :math:`w` appeared in document :math:`d`, :math:`f(w)` is the number of documents word :math:`w` appeared in, :math:`N` is the number of documents, and we use the natural logarithm. Parameters ---------- text : SArray[str | dict | list] Input text data. Returns ------- out : SArray[dict] The same document corpus where each score has been replaced by the TF-IDF transformation. See Also -------- count_words, count_ngrams, tokenize, References ---------- - `Wikipedia - TF-IDF <https://en.wikipedia.org/wiki/TFIDF>`_ Examples -------- .. sourcecode:: python >>> import turicreate >>> docs = turicreate.SArray('https://static.turi.com/datasets/nips-text') >>> docs_tfidf = turicreate.text_analytics.tf_idf(docs) """ _raise_error_if_not_sarray(text, "text") if len(text) == 0: return _turicreate.SArray() dataset = _turicreate.SFrame({'docs': text}) scores = _feature_engineering.TFIDF('docs').fit_transform(dataset) return scores['docs']
Compute the TF-IDF scores for each word in each document. The collection of documents must be in bag-of-words format. .. math:: \mbox{TF-IDF}(w, d) = tf(w, d) * log(N / f(w)) where :math:`tf(w, d)` is the number of times word :math:`w` appeared in document :math:`d`, :math:`f(w)` is the number of documents word :math:`w` appeared in, :math:`N` is the number of documents, and we use the natural logarithm. Parameters ---------- text : SArray[str | dict | list] Input text data. Returns ------- out : SArray[dict] The same document corpus where each score has been replaced by the TF-IDF transformation. See Also -------- count_words, count_ngrams, tokenize, References ---------- - `Wikipedia - TF-IDF <https://en.wikipedia.org/wiki/TFIDF>`_ Examples -------- .. sourcecode:: python >>> import turicreate >>> docs = turicreate.SArray('https://static.turi.com/datasets/nips-text') >>> docs_tfidf = turicreate.text_analytics.tf_idf(docs)
def main(): """Main entry point""" # Quit when interrupted import signal signal.signal(signal.SIGINT, signal.SIG_DFL) # Arguments: # --separator STRING/REGEX - how to split a row into cells (only relevant for CSV parser) # --flatten - flatten item hashes. {'a':{'b':'c'}} --> {'a_b':'c'} import argparse parser = argparse.ArgumentParser(description='View tabulated data via GUI') parser.add_argument('-p','--parser',type=str, default='autosplit',help='Type of parser to use') #TODO add possible parsers parser.add_argument('--headers',type=str, help='Headers are this comma-delimited names instead of ones supplied in file. Use colons to specify types, like "colname:int"') parser.add_argument('--filter',type=str, help='Pre-populate filter box') parser.add_argument('-s', '--separator', help='How to seperate columns. Applies only to some parsers') parser.add_argument('files', nargs='*', help='Files to show. Each file opens a new window') args = parser.parse_args() GObject.threads_init() default_config = {'format': args.parser} if args.filter: default_config['filter']=args.filter if args.separator: default_config['separator']=args.separator if args.headers: default_config['headers']=map(lambda s: s.strip(),args.headers.split(',')) inputs = [ (open(f,'r'),default_config) for f in args.files ] # Add stdin as input, if it's not a tty if not sys.stdin.isatty(): inputs.append((sys.stdin, default_config)) global windows windows = [Window(i[0],i[1]) for i in inputs] for win in windows: win.start_read() win.connect("destroy",window_closed) if windows: Gtk.main() else: print 'No input supplied so no windows are created'
Main entry point
def get_all_publications(return_namedtuples=True): """ Get list publications from all available source. Args: return_namedtuples (bool, default True): Convert :class:`.Publication` structures to namedtuples (used in AMQP communication). Returns: list: List of :class:`.Publication` structures converted to namedtuple. """ sources = [ ben_cz.get_publications, grada_cz.get_publications, cpress_cz.get_publications, zonerpress_cz.get_publications, ] # get data from all scrappers publications = [] for source in sources: publications.extend( filters.filter_publications(source()) ) # convert to namedtuples if return_namedtuples: publications = map(lambda x: x.to_namedtuple(), publications) return publications
Get list publications from all available source. Args: return_namedtuples (bool, default True): Convert :class:`.Publication` structures to namedtuples (used in AMQP communication). Returns: list: List of :class:`.Publication` structures converted to namedtuple.
def download_ts(self, path, chunk, process_last_line=True): """ This will look for a download ts link. It will then download that file and replace the link with the local file. :param process_last_line: :param path: str of the path to put the file :param chunk: str of the chunk file, note this could have partial lines :return: str of the chunk with the local file link """ import glob ret_chunk = [] partial_chunk = '' lines = chunk.strip().split('\n') if not process_last_line: partial_chunk = lines.pop() for line in lines: if line.startswith('http:'): ts = '%s/%s.ts' % (path, line.split('.ts?')[0].split('/')[-1]) relative_ts = '%s/%s.ts' % ( path.split('/')[-1], line.split('.ts?')[0].split('/')[-1]) if not os.path.exists(ts): # this could be a repeat call # log.debug("Downloading: %s at %s" % (line, time.time())) gevent.spawn(ApiCall.save_url_to_file, line, ts).start() gevent.sleep(0) ret_chunk.append('# ' + line) ret_chunk.append(relative_ts) # log.debug("Done Downloading = %s"%time.time()) else: ret_chunk = [] # start over else: ret_chunk.append(line) if '#EXT-X-ENDLIST' in chunk: self.repeat_needed = 0 gevent.sleep(0) elif chunk.strip(): self.repeat_needed = 1 + len(glob.glob(path + '/*.ts')) ret_chunk = ret_chunk and '\n'.join(ret_chunk) + '\n' or '' return ret_chunk, partial_chunk
This will look for a download ts link. It will then download that file and replace the link with the local file. :param process_last_line: :param path: str of the path to put the file :param chunk: str of the chunk file, note this could have partial lines :return: str of the chunk with the local file link
def c(*args, **kwargs): ''' kind of like od -c on the command line, basically it dumps each character and info about that char since -- 2013-5-9 *args -- tuple -- one or more strings to dump ''' with Reflect.context(**kwargs) as r: kwargs["args"] = args instance = C_CLASS(r, stream, **kwargs) instance()
kind of like od -c on the command line, basically it dumps each character and info about that char since -- 2013-5-9 *args -- tuple -- one or more strings to dump
def differences_between(self, current_files, parent_files, changes, prefixes): """ yield (thing, changes, is_path) If is_path is true, changes is None and thing is the path as a tuple. If is_path is false, thing is the current_files and parent_files for that changed treeentry and changes is the difference between current_files and parent_files. The code here is written to squeeze as much performance as possible out of this operation. """ parent_oid = None if any(is_tree for _, is_tree, _ in changes): if len(changes) == 1: wanted_path = list(changes)[0][0] parent_oid = frozenset([oid for path, is_tree, oid in parent_files if path == wanted_path and is_tree]) else: parent_values = defaultdict(set) parent_changes = parent_files - current_files for path, is_tree, oid in parent_changes: if is_tree: parent_values[path].add(oid) for path, is_tree, oid in changes: if is_tree and path not in prefixes: continue if not is_tree: yield path, None, True else: parent_oids = parent_oid if parent_oid is not None else parent_values.get(path, empty) cf_and_pf, changes = self.tree_structures_for(path, oid, parent_oids, prefixes) if changes: yield cf_and_pf, changes, False
yield (thing, changes, is_path) If is_path is true, changes is None and thing is the path as a tuple. If is_path is false, thing is the current_files and parent_files for that changed treeentry and changes is the difference between current_files and parent_files. The code here is written to squeeze as much performance as possible out of this operation.
def _rest_post(self, suburi, request_headers, request_body): """REST POST operation. The response body after the operation could be the new resource, or ExtendedError, or it could be empty. """ return self._rest_op('POST', suburi, request_headers, request_body)
REST POST operation. The response body after the operation could be the new resource, or ExtendedError, or it could be empty.
def on_for_seconds(self, left_speed, right_speed, seconds, brake=True, block=True): """ Rotate the motors at 'left_speed & right_speed' for 'seconds'. Speeds can be percentages or any SpeedValue implementation. """ if seconds < 0: raise ValueError("seconds is negative ({})".format(seconds)) (left_speed_native_units, right_speed_native_units) = self._unpack_speeds_to_native_units(left_speed, right_speed) # Set all parameters self.left_motor.speed_sp = int(round(left_speed_native_units)) self.left_motor.time_sp = int(seconds * 1000) self.left_motor._set_brake(brake) self.right_motor.speed_sp = int(round(right_speed_native_units)) self.right_motor.time_sp = int(seconds * 1000) self.right_motor._set_brake(brake) log.debug("%s: on_for_seconds %ss at left-speed %s, right-speed %s" % (self, seconds, left_speed, right_speed)) # Start the motors self.left_motor.run_timed() self.right_motor.run_timed() if block: self._block()
Rotate the motors at 'left_speed & right_speed' for 'seconds'. Speeds can be percentages or any SpeedValue implementation.
def install_remote(self): """Download, extract and install NApp.""" package, pkg_folder = None, None try: package = self._download() pkg_folder = self._extract(package) napp_folder = self._get_local_folder(pkg_folder) dst = self._installed / self.user / self.napp self._check_module(dst.parent) shutil.move(str(napp_folder), str(dst)) finally: # Delete temporary files if package: Path(package).unlink() if pkg_folder and pkg_folder.exists(): shutil.rmtree(str(pkg_folder))
Download, extract and install NApp.
def getRelativePath(basepath, path): """Get a path that is relative to the given base path.""" basepath = splitpath(os.path.abspath(basepath)) path = splitpath(os.path.abspath(path)) afterCommon = False for c in basepath: if afterCommon or path[0] != c: path.insert(0, os.path.pardir) afterCommon = True else: del path[0] return os.path.join(*path)
Get a path that is relative to the given base path.
def _from_dict(cls, _dict): """Initialize a LanguageModels object from a json dictionary.""" args = {} if 'customizations' in _dict: args['customizations'] = [ LanguageModel._from_dict(x) for x in (_dict.get('customizations')) ] else: raise ValueError( 'Required property \'customizations\' not present in LanguageModels JSON' ) return cls(**args)
Initialize a LanguageModels object from a json dictionary.
def make_encoder(self,formula_dict,inter_list,param_dict): """ make the encoder function """ X_dict = {} Xcol_dict = {} encoder_dict = {} # first, replace param_dict[key] = values, with param_dict[key] = dmatrix for key in formula_dict: encoding,arg = formula_dict[key] if 'Dev' in encoding: # make deviation encoded design matrix drop_name = arg # encode deviation_encoder,X_sub,colnames_sub = _dev_encode(param_dict,drop_name,key) # additionally, store in dictionary for use by interactions X_dict[key] = X_sub Xcol_dict[key] = colnames_sub # store dictionary of encoder functions to keep for prediction encoder_dict[key] = deviation_encoder elif 'Dum' in encoding: # make dummy variable encoding design mat ref_name = arg dummy_encoder,X_sub,colnames_sub = _dum_encode(param_dict,ref_name,key) # additionally, store in dictionary for use by interactions X_dict[key] = X_sub Xcol_dict[key] = colnames_sub # store dictionary of encoder functions to keep for prediction encoder_dict[key] = dummy_encoder elif 'Poly' in encoding: # make polynomial encoding design mat degree = arg polynomial_encoder,X_sub,colnames_sub = _poly_encode(param_dict,degree,key) # additionally, store in dictionary for use by interactions X_dict[key] = X_sub Xcol_dict[key] = colnames_sub # store dictionary of encoder functions to keep for prediction encoder_dict[key] = polynomial_encoder else: print encoding raise Exception("Encoding name error") # now compute interaction designmatrices for interaction in inter_list: if len(interaction) >= 3: raise Exception("Doesn't allow 4-way or higher interaction terms") elif len(interaction) == 3: param_name1 = interaction[0] param_name2 = interaction[1] param_name3 = interaction[2] col_names1 = Xcol_dict[param_name1] col_names2 = Xcol_dict[param_name2] col_names3 = Xcol_dict[param_name3] # make 3-way encoder function def threeway_encoder(param_name1,param_name2,param_name3, \ col_names1, col_names2, col_names3, X_dict): """ needs the three names of the parameters to be encoded, as well as a dictionary containing the already encoded single parameter design matrices, keyed by name """ X1 = X_dict[param_name1] X2 = X_dict[param_name2] X3 = X_dict[param_name3] X_int = [] names_int = [] for i in np.arange(0,X1.shape[1]): for j in np.arange(0,X2.shape[1]): for k in np.arange(0,X3.shape[1]): X_int.append(X1[:,i]*X2[:,j]*X3[:,k]) names_int.append(col_names1[i] + "*" + \ col_names2[j] + "*" + col_names3[k]) # make X_int from lists to np array X_int = np.array(X_int).T return X_int, names_int encoder_dict['threeway'] = threeway_encoder elif len(interaction) == 2: # there are two interaction terms (A*B) param_name1 = interaction[0] param_name2 = interaction[1] col_names1 = Xcol_dict[param_name1] col_names2 = Xcol_dict[param_name2] # make twoway_encoder function def twoway_encoder(param_name1,param_name2, col_names1, col_names2, X_dict): X1 = X_dict[param_name1] X2 = X_dict[param_name2] X_int = [] names_int = [] for i in np.arange(0,X1.shape[1]): for j in np.arange(0,X2.shape[1]): X_int.append(X1[:,i]*X2[:,j]) names_int.append(col_names1[i] + "*" + col_names2[j]) X_int = np.array(X_int).T return X_int, names_int encoder_dict['twoway'] = twoway_encoder else: raise Exception("Error while evaluating meaning of interaction term") # make key in encoder to specify which columns are active encoder_dict['trimmed_columns'] = self._trimmed_columns return encoder_dict
make the encoder function
def associate_route_table(self, route_table_id, subnet_id): """ Associates a route table with a specific subnet. :type route_table_id: str :param route_table_id: The ID of the route table to associate. :type subnet_id: str :param subnet_id: The ID of the subnet to associate with. :rtype: str :return: The ID of the association created """ params = { 'RouteTableId': route_table_id, 'SubnetId': subnet_id } result = self.get_object('AssociateRouteTable', params, ResultSet) return result.associationId
Associates a route table with a specific subnet. :type route_table_id: str :param route_table_id: The ID of the route table to associate. :type subnet_id: str :param subnet_id: The ID of the subnet to associate with. :rtype: str :return: The ID of the association created
def export_throw_event_info(node_params, output_element): """ Adds EndEvent or IntermediateThrowingEvent attributes to exported XML element :param node_params: dictionary with given intermediate throw event parameters, :param output_element: object representing BPMN XML 'intermediateThrowEvent' element. """ definitions = node_params[consts.Consts.event_definitions] for definition in definitions: definition_id = definition[consts.Consts.id] definition_type = definition[consts.Consts.definition_type] output_definition = eTree.SubElement(output_element, definition_type) if definition_id != "": output_definition.set(consts.Consts.id, definition_id)
Adds EndEvent or IntermediateThrowingEvent attributes to exported XML element :param node_params: dictionary with given intermediate throw event parameters, :param output_element: object representing BPMN XML 'intermediateThrowEvent' element.
def _rapRperiAxiEq(R,E,L,pot): """The vr=0 equation that needs to be solved to find apo- and pericenter""" return E-potentialAxi(R,pot)-L**2./2./R**2.
The vr=0 equation that needs to be solved to find apo- and pericenter
def get_measurement_id_options(self): """ Returns list of measurement choices.""" # get the URL for the main check in page document = self._get_document_for_url( self._get_url_for_measurements() ) # gather the IDs for all measurement types measurement_ids = self._get_measurement_ids(document) return measurement_ids
Returns list of measurement choices.
def create_small_thumbnail(self, token, item_id): """ Create a 100x100 small thumbnail for the given item. It is used for preview purpose and displayed in the 'preview' and 'thumbnails' sidebar sections. :param token: A valid token for the user in question. :type token: string :param item_id: The item on which to set the thumbnail. :type item_id: int | long :returns: The item object (with the new thumbnail id) and the path where the newly created thumbnail is stored. :rtype: dict """ parameters = dict() parameters['token'] = token parameters['itemId'] = item_id response = self.request( 'midas.thumbnailcreator.create.small.thumbnail', parameters) return response
Create a 100x100 small thumbnail for the given item. It is used for preview purpose and displayed in the 'preview' and 'thumbnails' sidebar sections. :param token: A valid token for the user in question. :type token: string :param item_id: The item on which to set the thumbnail. :type item_id: int | long :returns: The item object (with the new thumbnail id) and the path where the newly created thumbnail is stored. :rtype: dict
def on(self, event, listener, *user_args): """Register a ``listener`` to be called on ``event``. The listener will be called with any extra arguments passed to :meth:`emit` first, and then the extra arguments passed to :meth:`on` last. If the listener function returns :class:`False`, it is removed and will not be called the next time the ``event`` is emitted. """ self._listeners[event].append( _Listener(callback=listener, user_args=user_args))
Register a ``listener`` to be called on ``event``. The listener will be called with any extra arguments passed to :meth:`emit` first, and then the extra arguments passed to :meth:`on` last. If the listener function returns :class:`False`, it is removed and will not be called the next time the ``event`` is emitted.
def get_image_grad(net, image, class_id=None): """Get the gradients of the image. Parameters: ---------- net: Block Network to use for visualization. image: NDArray Preprocessed image to use for visualization. class_id: int Category ID this image belongs to. If not provided, network's prediction will be used.""" return _get_grad(net, image, class_id, image_grad=True)
Get the gradients of the image. Parameters: ---------- net: Block Network to use for visualization. image: NDArray Preprocessed image to use for visualization. class_id: int Category ID this image belongs to. If not provided, network's prediction will be used.
def determine_if_whitespace(self): """ Set is_space if current token is whitespace Is space if value is: * Newline * Empty String * Something that matches regexes['whitespace'] """ value = self.current.value if value == '\n': self.is_space = True else: self.is_space = False if (value == '' or regexes['whitespace'].match(value)): self.is_space = True
Set is_space if current token is whitespace Is space if value is: * Newline * Empty String * Something that matches regexes['whitespace']
def authorized_default_handler(resp, remote, *args, **kwargs): """Store access token in session. Default authorized handler. :param remote: The remote application. :param resp: The response. :returns: Redirect response. """ response_token_setter(remote, resp) db.session.commit() return redirect(url_for('invenio_oauthclient_settings.index'))
Store access token in session. Default authorized handler. :param remote: The remote application. :param resp: The response. :returns: Redirect response.
def init_registry_from_json(mongo, filename, clear_collection=False): """Initialize a model registry with a list of model definitions that are stored in a given file in Json format. Parameters ---------- mongo : scodata.MongoDBFactory Connector for MongoDB filename : string Path to file containing model definitions clear_collection : boolean If true, collection will be dropped before models are created """ # Read model definition file (JSON) with open(filename, 'r') as f: models = json.load(f) init_registry(mongo, models, clear_collection)
Initialize a model registry with a list of model definitions that are stored in a given file in Json format. Parameters ---------- mongo : scodata.MongoDBFactory Connector for MongoDB filename : string Path to file containing model definitions clear_collection : boolean If true, collection will be dropped before models are created
def write(self): """Write object state to Zookeeper. This will write the current state of the object to Zookeeper, taking the final merged state as the new one, and resetting any write buffers. """ self._check() cache = self._cache pristine_cache = self._pristine_cache self._pristine_cache = cache.copy() # Used by `apply_changes` function to return the changes to # this scope. changes = [] def apply_changes(content, stat): """Apply the local state to the Zookeeper node state.""" del changes[:] current = yaml.load(content) if content else {} missing = object() for key in set(pristine_cache).union(cache): old_value = pristine_cache.get(key, missing) new_value = cache.get(key, missing) if old_value != new_value: if new_value != missing: current[key] = new_value if old_value != missing: changes.append( ModifiedItem(key, old_value, new_value)) else: changes.append(AddedItem(key, new_value)) elif key in current: del current[key] changes.append(DeletedItem(key, old_value)) return yaml.safe_dump(current) # Apply the change till it takes. yield retry_change(self._client, self._path, apply_changes) returnValue(changes)
Write object state to Zookeeper. This will write the current state of the object to Zookeeper, taking the final merged state as the new one, and resetting any write buffers.
def wait_for_crm_operation(operation): """Poll for cloud resource manager operation until finished.""" logger.info("wait_for_crm_operation: " "Waiting for operation {} to finish...".format(operation)) for _ in range(MAX_POLLS): result = crm.operations().get(name=operation["name"]).execute() if "error" in result: raise Exception(result["error"]) if "done" in result and result["done"]: logger.info("wait_for_crm_operation: Operation done.") break time.sleep(POLL_INTERVAL) return result
Poll for cloud resource manager operation until finished.
def attention_mask_same_segment( query_segment, memory_segment=None, dtype=tf.float32): """Bias for attention where attention between segments is disallowed. Args: query_segment: a mtf.Tensor with shape [..., length_dim] memory_segment: a mtf.Tensor with shape [..., memory_length_dim] dtype: a tf.dtype Returns: a mtf.Tensor with shape [..., length_dim, memory_length_dim] """ memory_segment = rename_length_to_memory_length( memory_segment or query_segment) return mtf.cast(mtf.not_equal(query_segment, memory_segment), dtype) * -1e9
Bias for attention where attention between segments is disallowed. Args: query_segment: a mtf.Tensor with shape [..., length_dim] memory_segment: a mtf.Tensor with shape [..., memory_length_dim] dtype: a tf.dtype Returns: a mtf.Tensor with shape [..., length_dim, memory_length_dim]
def _load(self): """Load data from a pickle file. """ with open(self._pickle_file, 'rb') as source: pickler = pickle.Unpickler(source) for attribute in self._pickle_attributes: pickle_data = pickler.load() setattr(self, attribute, pickle_data)
Load data from a pickle file.
def get_nsing(self,epsilon=1.0e-4): """ get the number of solution space dimensions given a ratio between the largest and smallest singular values Parameters ---------- epsilon: float singular value ratio Returns ------- nsing : float number of singular components above the epsilon ratio threshold Note ----- If nsing == nadj_par, then None is returned """ mx = self.xtqx.shape[0] nsing = mx - np.searchsorted( np.sort((self.xtqx.s.x / self.xtqx.s.x.max())[:,0]),epsilon) if nsing == mx: self.logger.warn("optimal nsing=npar") nsing = None return nsing
get the number of solution space dimensions given a ratio between the largest and smallest singular values Parameters ---------- epsilon: float singular value ratio Returns ------- nsing : float number of singular components above the epsilon ratio threshold Note ----- If nsing == nadj_par, then None is returned
def defaults(self): """ Reset the chart options and style to defaults """ self.chart_style = {} self.chart_opts = {} self.style("color", "#30A2DA") self.width(900) self.height(250)
Reset the chart options and style to defaults
def load_glove_df(filepath, **kwargs): """ Load a GloVE-format text file into a dataframe >>> df = load_glove_df(os.path.join(BIGDATA_PATH, 'glove_test.txt')) >>> df.index[:3] Index(['the', ',', '.'], dtype='object', name=0) >>> df.iloc[0][:3] 1 0.41800 2 0.24968 3 -0.41242 Name: the, dtype: float64 """ pdkwargs = dict(index_col=0, header=None, sep=r'\s', skiprows=[0], verbose=False, engine='python') pdkwargs.update(kwargs) return pd.read_csv(filepath, **pdkwargs)
Load a GloVE-format text file into a dataframe >>> df = load_glove_df(os.path.join(BIGDATA_PATH, 'glove_test.txt')) >>> df.index[:3] Index(['the', ',', '.'], dtype='object', name=0) >>> df.iloc[0][:3] 1 0.41800 2 0.24968 3 -0.41242 Name: the, dtype: float64
def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format="channels_first", use_td=False, targeting_rate=None, keep_prob=None, is_training=None): """Strided 2-D convolution with explicit padding. The padding is consistent and is based only on `kernel_size`, not on the dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone). Args: inputs: `Tensor` of size `[batch, channels, height_in, width_in]`. filters: `int` number of filters in the convolution. kernel_size: `int` size of the kernel to be used in the convolution. strides: `int` strides of the convolution. data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. is_training: `bool` for whether the model is in training. Returns: A `Tensor` of shape `[batch, filters, height_out, width_out]`. Raises: Exception: if use_td is not valid. """ if strides > 1: inputs = fixed_padding(inputs, kernel_size, data_format=data_format) if use_td: inputs_shape = common_layers.shape_list(inputs) if use_td == "weight": if data_format == "channels_last": size = kernel_size * kernel_size * inputs_shape[-1] else: size = kernel_size * kernel_size * inputs_shape[1] targeting_count = targeting_rate * tf.to_float(size) targeting_fn = common_layers.weight_targeting elif use_td == "unit": targeting_count = targeting_rate * filters targeting_fn = common_layers.unit_targeting else: raise Exception("Unrecognized targeted dropout type: %s" % use_td) y = common_layers.td_conv( inputs, filters, kernel_size, targeting_count, targeting_fn, keep_prob, is_training, do_prune=True, strides=strides, padding=("SAME" if strides == 1 else "VALID"), data_format=data_format, use_bias=False, kernel_initializer=tf.variance_scaling_initializer()) else: y = layers().Conv2D( filters=filters, kernel_size=kernel_size, strides=strides, padding=("SAME" if strides == 1 else "VALID"), use_bias=False, kernel_initializer=tf.variance_scaling_initializer(), data_format=data_format)(inputs) return y
Strided 2-D convolution with explicit padding. The padding is consistent and is based only on `kernel_size`, not on the dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone). Args: inputs: `Tensor` of size `[batch, channels, height_in, width_in]`. filters: `int` number of filters in the convolution. kernel_size: `int` size of the kernel to be used in the convolution. strides: `int` strides of the convolution. data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. is_training: `bool` for whether the model is in training. Returns: A `Tensor` of shape `[batch, filters, height_out, width_out]`. Raises: Exception: if use_td is not valid.
def init_prior(self, R): """initialize prior for the subject Returns ------- TFA Returns the instance itself. """ centers, widths = self.init_centers_widths(R) # update prior prior = np.zeros(self.K * (self.n_dim + 1)) self.set_centers(prior, centers) self.set_widths(prior, widths) self.set_prior(prior) return self
initialize prior for the subject Returns ------- TFA Returns the instance itself.
def printSequences(x, formatString="%d"): """ Print a bunch of sequences stored in a 2D numpy array. """ [seqLen, numElements] = x.shape for i in range(seqLen): s = "" for j in range(numElements): s += formatString % x[i][j] print s
Print a bunch of sequences stored in a 2D numpy array.
async def execute_all_with_names(self, subprocesses, container = None, retnames = ('',), forceclose = True): ''' DEPRECATED Execute all subprocesses and get the return values. :param subprocesses: sequence of subroutines (coroutines) :param container: if specified, run subprocesses in another container. :param retnames: DEPRECATED get return value from container.(name) for each name in retnames. '' for return value (to be compatible with earlier versions) :param forceclose: force close the routines on exit, so all the subprocesses are terminated on timeout if used with executeWithTimeout :returns: a list of tuples, one for each subprocess, with value of retnames inside: `[('retvalue1',),('retvalue2',),...]` ''' if not subprocesses: return [] subprocesses = list(subprocesses) if len(subprocesses) == 1 and (container is None or container is self) and forceclose: # Directly run the process to improve performance return [await subprocesses[0]] if container is None: container = self delegates = [self.begin_delegate_other(p, container, retnames) for p in subprocesses] matchers = [d[0] for d in delegates] try: _, eventdict = await self.wait_for_all(*matchers) events = [eventdict[m] for m in matchers] exceptions = [e.exception for e in events if hasattr(e, 'exception')] if exceptions: if len(exceptions) == 1: raise exceptions[0] else: raise MultipleException(exceptions) return [e.result for e in events] finally: if forceclose: for d in delegates: try: container.terminate(d[1]) except Exception: pass
DEPRECATED Execute all subprocesses and get the return values. :param subprocesses: sequence of subroutines (coroutines) :param container: if specified, run subprocesses in another container. :param retnames: DEPRECATED get return value from container.(name) for each name in retnames. '' for return value (to be compatible with earlier versions) :param forceclose: force close the routines on exit, so all the subprocesses are terminated on timeout if used with executeWithTimeout :returns: a list of tuples, one for each subprocess, with value of retnames inside: `[('retvalue1',),('retvalue2',),...]`
def _get_notifications_status(self, notifications): """ Get the notifications status """ if notifications: size = len(notifications["activeNotifications"]) else: size = 0 status = self.status_notif if size > 0 else self.status_no_notif return (size, status)
Get the notifications status
def get_cached_moderated_reddits(self): """Return a cached dictionary of the user's moderated reddits. This list is used internally. Consider using the `get_my_moderation` function instead. """ if self._mod_subs is None: self._mod_subs = {'mod': self.reddit_session.get_subreddit('mod')} for sub in self.reddit_session.get_my_moderation(limit=None): self._mod_subs[six.text_type(sub).lower()] = sub return self._mod_subs
Return a cached dictionary of the user's moderated reddits. This list is used internally. Consider using the `get_my_moderation` function instead.
def contains(self, times, keep_inside=True, delta_t=DEFAULT_OBSERVATION_TIME): """ Get a mask array (e.g. a numpy boolean array) of times being inside (or outside) the TMOC instance. Parameters ---------- times : `astropy.time.Time` astropy times to check whether they are contained in the TMOC or not. keep_inside : bool, optional True by default. If so the filtered table contains only observations that are located the MOC. If ``keep_inside`` is False, the filtered table contains all observations lying outside the MOC. delta_t : `astropy.time.TimeDelta`, optional the duration of one observation. It is set to 30 min by default. This data is used to compute the more efficient TimeMOC order to represent the observations (Best order = the less precise order which is able to discriminate two observations separated by ``delta_t``). Returns ------- array : `~numpy.darray` A mask boolean array """ # the requested order for filtering the astropy observations table is more precise than the order # of the TimeMoc object current_max_order = self.max_order new_max_order = TimeMOC.time_resolution_to_order(delta_t) if new_max_order > current_max_order: message = 'Requested time resolution filtering cannot be applied.\n' \ 'Filtering is applied with a time resolution of {0} sec.'.format( TimeMOC.order_to_time_resolution(current_max_order).sec) warnings.warn(message, UserWarning) rough_tmoc = self.degrade_to_order(new_max_order) pix_arr = (times.jd * TimeMOC.DAY_MICRO_SEC) pix_arr = pix_arr.astype(int) intervals_arr = rough_tmoc._interval_set._intervals inf_arr = np.vstack([pix_arr[i] >= intervals_arr[:, 0] for i in range(pix_arr.shape[0])]) sup_arr = np.vstack([pix_arr[i] <= intervals_arr[:, 1] for i in range(pix_arr.shape[0])]) if keep_inside: res = inf_arr & sup_arr filtered_rows = np.any(res, axis=1) else: res = ~inf_arr | ~sup_arr filtered_rows = np.all(res, axis=1) return filtered_rows
Get a mask array (e.g. a numpy boolean array) of times being inside (or outside) the TMOC instance. Parameters ---------- times : `astropy.time.Time` astropy times to check whether they are contained in the TMOC or not. keep_inside : bool, optional True by default. If so the filtered table contains only observations that are located the MOC. If ``keep_inside`` is False, the filtered table contains all observations lying outside the MOC. delta_t : `astropy.time.TimeDelta`, optional the duration of one observation. It is set to 30 min by default. This data is used to compute the more efficient TimeMOC order to represent the observations (Best order = the less precise order which is able to discriminate two observations separated by ``delta_t``). Returns ------- array : `~numpy.darray` A mask boolean array
def lookup(self, pathogenName, sampleName): """ Look up a pathogen name, sample name combination and get its FASTA/FASTQ file name and unique read count. This method should be used instead of C{add} in situations where you want an exception to be raised if a pathogen/sample combination has not already been passed to C{add}. @param pathogenName: A C{str} pathogen name. @param sampleName: A C{str} sample name. @raise KeyError: If the pathogen name or sample name have not been seen, either individually or in combination. @return: A (C{str}, C{int}) tuple retrieved from self._readsFilenames """ pathogenIndex = self._pathogens[pathogenName] sampleIndex = self._samples[sampleName] return self._readsFilenames[(pathogenIndex, sampleIndex)]
Look up a pathogen name, sample name combination and get its FASTA/FASTQ file name and unique read count. This method should be used instead of C{add} in situations where you want an exception to be raised if a pathogen/sample combination has not already been passed to C{add}. @param pathogenName: A C{str} pathogen name. @param sampleName: A C{str} sample name. @raise KeyError: If the pathogen name or sample name have not been seen, either individually or in combination. @return: A (C{str}, C{int}) tuple retrieved from self._readsFilenames
def highlight_occurences(editor): """ Highlights given editor current line. :param editor: Document editor. :type editor: QWidget :return: Method success. :rtype: bool """ format = editor.language.theme.get("accelerator.occurence") if not format: return False extra_selections = editor.extraSelections() or [] if not editor.isReadOnly(): word = editor.get_word_under_cursor() if not word: return False block = editor.document().findBlock(0) cursor = editor.document().find(word, block.position(), QTextDocument.FindCaseSensitively | QTextDocument.FindWholeWords) while block.isValid() and cursor.position() != -1: selection = QTextEdit.ExtraSelection() selection.format.setBackground(format.background()) selection.cursor = cursor extra_selections.append(selection) cursor = editor.document().find(word, cursor.position(), QTextDocument.FindCaseSensitively | QTextDocument.FindWholeWords) block = block.next() editor.setExtraSelections(extra_selections) return True
Highlights given editor current line. :param editor: Document editor. :type editor: QWidget :return: Method success. :rtype: bool
def list(self, full_properties=False, filter_args=None): """ List the Virtual Functions of this Partition. Authorization requirements: * Object-access permission to this Partition. Parameters: full_properties (bool): Controls whether the full set of resource properties should be retrieved, vs. only the short set as returned by the list operation. filter_args (dict): Filter arguments that narrow the list of returned resources to those that match the specified filter arguments. For details, see :ref:`Filtering`. `None` causes no filtering to happen, i.e. all resources are returned. Returns: : A list of :class:`~zhmcclient.VirtualFunction` objects. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` """ resource_obj_list = [] uris = self.partition.get_property('virtual-function-uris') if uris: for uri in uris: resource_obj = self.resource_class( manager=self, uri=uri, name=None, properties=None) if self._matches_filters(resource_obj, filter_args): resource_obj_list.append(resource_obj) if full_properties: resource_obj.pull_full_properties() self._name_uri_cache.update_from(resource_obj_list) return resource_obj_list
List the Virtual Functions of this Partition. Authorization requirements: * Object-access permission to this Partition. Parameters: full_properties (bool): Controls whether the full set of resource properties should be retrieved, vs. only the short set as returned by the list operation. filter_args (dict): Filter arguments that narrow the list of returned resources to those that match the specified filter arguments. For details, see :ref:`Filtering`. `None` causes no filtering to happen, i.e. all resources are returned. Returns: : A list of :class:`~zhmcclient.VirtualFunction` objects. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError`
def load_obs(self, mask_threshold=0.5): """ Loads observations and masking grid (if needed). Args: mask_threshold: Values greater than the threshold are kept, others are masked. """ print("Loading obs ", self.run_date, self.model_name, self.forecast_variable) start_date = self.run_date + timedelta(hours=self.start_hour) end_date = self.run_date + timedelta(hours=self.end_hour) mrms_grid = MRMSGrid(start_date, end_date, self.mrms_variable, self.mrms_path) mrms_grid.load_data() if len(mrms_grid.data) > 0: self.raw_obs[self.mrms_variable] = np.where(mrms_grid.data > 100, 100, mrms_grid.data) self.period_obs[self.mrms_variable] = self.raw_obs[self.mrms_variable].max(axis=0) if self.obs_mask: mask_grid = MRMSGrid(start_date, end_date, self.mask_variable, self.mrms_path) mask_grid.load_data() self.raw_obs[self.mask_variable] = np.where(mask_grid.data >= mask_threshold, 1, 0) self.period_obs[self.mask_variable] = self.raw_obs[self.mask_variable].max(axis=0)
Loads observations and masking grid (if needed). Args: mask_threshold: Values greater than the threshold are kept, others are masked.
def _normalise_weights(logZ, weights, ntrim=None): """ Correctly normalise the weights for trimming This takes a list of log-evidences, and re-normalises the weights so that the largest weight across all samples is 1, and the total weight in each set of samples is proportional to the evidence. Parameters ---------- logZ: array-like log-evidences to weight each set of weights by weights: array-like of numpy.array list of not necessarily equal length list of weights Returns ------- logZ: numpy.array evidences, renormalised so that max(logZ) = 0 weights: list of 1D numpy.array normalised weights """ logZ -= logZ.max() Zs = numpy.exp(logZ) weights = [w/w.sum()*Z for w, Z in zip(weights, Zs)] wmax = max([w.max() for w in weights]) weights = [w/wmax for w in weights] ntot = sum([w.sum() for w in weights]) if ntrim is not None and ntrim < ntot: weights = [w*ntrim/ntot for w in weights] return logZ, weights
Correctly normalise the weights for trimming This takes a list of log-evidences, and re-normalises the weights so that the largest weight across all samples is 1, and the total weight in each set of samples is proportional to the evidence. Parameters ---------- logZ: array-like log-evidences to weight each set of weights by weights: array-like of numpy.array list of not necessarily equal length list of weights Returns ------- logZ: numpy.array evidences, renormalised so that max(logZ) = 0 weights: list of 1D numpy.array normalised weights