content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def pf_mobility(phi, gamma):
""" Phase field mobility function. """
# return gamma * (phi**2-1.)**2
# func = 1.-phi**2
# return 0.75 * gamma * 0.5 * (1. + df.sign(func)) * func
return gamma | 10045807bdb030c362d700d61789c0a490aad93b | 707,223 |
import argparse
def parse_args():
"""Parse commandline arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('--minSdkVersion', default='', dest='min_sdk_version',
help='specify minSdkVersion used by the build system')
parser.add_argument('--targetSdkVersion', default='', dest='target_sdk_version',
help='specify targetSdkVersion used by the build system')
parser.add_argument('--raise-min-sdk-version', dest='raise_min_sdk_version', action='store_true',
help='raise the minimum sdk version in the manifest if necessary')
parser.add_argument('--library', dest='library', action='store_true',
help='manifest is for a static library')
parser.add_argument('--uses-library', dest='uses_libraries', action='append',
help='specify additional <uses-library> tag to add. android:requred is set to true')
parser.add_argument('--optional-uses-library', dest='optional_uses_libraries', action='append',
help='specify additional <uses-library> tag to add. android:requred is set to false')
parser.add_argument('--uses-non-sdk-api', dest='uses_non_sdk_api', action='store_true',
help='manifest is for a package built against the platform')
parser.add_argument('--logging-parent', dest='logging_parent', default='',
help=('specify logging parent as an additional <meta-data> tag. '
'This value is ignored if the logging_parent meta-data tag is present.'))
parser.add_argument('--use-embedded-dex', dest='use_embedded_dex', action='store_true',
help=('specify if the app wants to use embedded dex and avoid extracted,'
'locally compiled code. Must not conflict if already declared '
'in the manifest.'))
parser.add_argument('--extract-native-libs', dest='extract_native_libs',
default=None, type=lambda x: (str(x).lower() == 'true'),
help=('specify if the app wants to use embedded native libraries. Must not conflict '
'if already declared in the manifest.'))
parser.add_argument('--has-no-code', dest='has_no_code', action='store_true',
help=('adds hasCode="false" attribute to application. Ignored if application elem '
'already has a hasCode attribute.'))
parser.add_argument('input', help='input AndroidManifest.xml file')
parser.add_argument('output', help='output AndroidManifest.xml file')
return parser.parse_args() | 4ccbb4fa225abbe4eaa249a2dbc537d338559b62 | 707,224 |
import os
def cases():
"""
Loads all filenames of the pre-calculated test cases.
"""
case_dir = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'cases'
)
cases = []
for dir_path, _, files in os.walk(case_dir):
cases = cases + [os.path.join(dir_path, f) for f in files]
return cases | 1e8cbf1001cb52ab5875b38714f1edca664f867c | 707,226 |
def get_master_name(els):
"""Function: get_master_name
Description: Return name of the master node in a Elasticsearch cluster.
Arguments:
(input) els -> ElasticSearch instance.
(output) Name of master node in ElasticSearch cluster.
"""
return els.cat.master().strip().split(" ")[-1] | 0371dac1fdf0fd6b906646e1882e9089d9dfa12c | 707,227 |
def smith_gassmann(kstar, k0, kfl2, phi):
"""
Applies the Gassmann equation.
Returns Ksat2.
"""
a = (1 - kstar/k0)**2.0
b = phi/kfl2 + (1-phi)/k0 - (kstar/k0**2.0)
ksat2 = kstar + (a/b)
return ksat2 | ae413d7ed55862927e5f8d06d4aff5bfc0e91167 | 707,228 |
def organize_by_chromosome(genes, transcripts):
""" Iterate through genes and transcripts and group them by chromosome """
gene_dict = {}
transcript_dict = {}
for ID in genes:
gene = genes[ID]
chromosome = gene.chromosome
if chromosome not in gene_dict:
chrom_genes = {}
chrom_genes[ID] = gene
gene_dict[chromosome] = chrom_genes
gene_dict[chromosome][ID] = gene
for ID in transcripts:
transcript = transcripts[ID]
chromosome = transcript.chromosome
if chromosome not in transcript_dict:
chrom_transcripts = {}
chrom_transcripts[ID] = transcript
transcript_dict[chromosome] = chrom_transcripts
transcript_dict[chromosome][ID] = transcript
transcript_dict[chromosome][ID] = transcript
return gene_dict, transcript_dict | 2f55d29a75f5c28fbf3c79882b8b2ac18590cdb2 | 707,229 |
def get_word_combinations(word):
"""
'one-two-three'
=>
['one', 'two', 'three', 'onetwo', 'twothree', 'onetwothree']
"""
permutations = []
parts = [part for part in word.split(u'-') if part]
for count in range(1, len(parts) + 1):
for index in range(len(parts) - count + 1):
permutations.append(u''.join(parts[index:index+count]))
return permutations | 5a4c042cc0f3dedb297e2513bf638eac4278e0a6 | 707,230 |
import tempfile
def env_to_file(env_variables, destination_path=None, posix=True):
"""
Write environment variables to a file.
:param env_variables: environment variables
:param destination_path: destination path of a file where the
environment variables will be stored. the
stored variables will be a bash script you can
then source.
:param posix: false if the target of the generated file will be a
windows machine
"""
if not env_variables:
return None
if not destination_path:
destination_path = tempfile.mkstemp(suffix='env')[1]
if posix:
linesep = '\n'
else:
linesep = '\r\n'
with open(destination_path, 'w') as f:
if posix:
f.write('#!/bin/bash')
f.write(linesep)
f.write('# Environmnet file generated by Cloudify. Do not delete '
'unless you know exactly what you are doing.')
f.write(linesep)
f.write(linesep)
else:
f.write('rem Environmnet file generated by Cloudify. Do not '
'delete unless you know exactly what you are doing.')
f.write(linesep)
for key, value in env_variables.iteritems():
if posix:
f.write('export {0}={1}'.format(key, value))
f.write(linesep)
else:
f.write('set {0}={1}'.format(key, value))
f.write(linesep)
f.write(linesep)
return destination_path | c242ff4d6956922b2ccceecaef5b95640116e75a | 707,231 |
def CanEditHotlist(effective_ids, hotlist):
"""Return True if a user is editor(add/remove issues and change rankings)."""
return any([user_id in (hotlist.owner_ids + hotlist.editor_ids)
for user_id in effective_ids]) | dc29c74e2628930faffb12b6772046564ffb8218 | 707,232 |
def dice_counts(dice):
"""Make a dictionary of how many of each value are in the dice """
return {x: dice.count(x) for x in range(1, 7)} | 427703283b5c0cb621e25f16a1c1f2436642fa9f | 707,233 |
def events(*_events):
""" A class decorator. Adds auxiliary methods for callback based event
notification of multiple watchers.
"""
def add_events(cls):
# Maintain total event list of both inherited events and events added
# using nested decorations.
try:
all_events = cls.events
except AttributeError:
cls.events = _events
else:
cls.events = all_events + _events
for e in _events:
helpers = {}
exec("""
@lazy
def {event}_handlers(self):
return []
def {event}(self, *a, **kw):
for h in list(self.{handlers}):
h(*a, **kw)
def watch_{event}(self, cb):
self.{handlers}.append(cb)
def unwatch_{event}(self, cb):
self.{handlers}.remove(cb)
""".format(event = e, handlers = e + "_handlers"),
globals(), helpers
)
for n, h in helpers.items():
setattr(cls, n, h)
return cls
return add_events | 601f7d55ff4d05dd0aca552213dcd911f15c91b6 | 707,235 |
from typing import List
def dict_to_kvp(dictionary: dict) -> List[tuple]:
"""
Converts a dictionary to a list of tuples where each tuple has the key and value
of each dictionary item
:param dictionary: Dictionary to convert
:return: List of Key-Value Pairs
"""
return [(k, v) for k, v in dictionary.items()] | 2b856ebb218884a4975d316bebe27546070f2083 | 707,236 |
def convert_and_remove_punctuation(text):
"""
remove punctuation that are not allowed, e.g. / \
convert Chinese punctuation into English punctuation, e.g. from「 to "
"""
# removal
text = text.replace("\\", "")
text = text.replace("\\", "")
text = text.replace("[", "")
text = text.replace("]", "")
text = text.replace("【", "")
text = text.replace("】", "")
text = text.replace("{", "")
text = text.replace("}", "")
# conversion
text = text.replace(u"\u201C", "\"")
text = text.replace(u"\u201D", "\"")
text = text.replace(u"\u2018", "'")
text = text.replace(u"\u2019", "'")
text = text.replace("「", "\"")
text = text.replace("」", "\"")
text = text.replace("『", "\"")
text = text.replace("』", "\"")
text = text.replace("quot;", "\"")
return text | 2de1f930ca76da7fec3467469f98b0e0858e54a0 | 707,237 |
import re
def _get_ip_from_response(response):
"""
Filter ipv4 addresses from string.
Parameters
----------
response: str
String with ipv4 addresses.
Returns
-------
list: list with ip4 addresses.
"""
ip = re.findall(r'\b(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\b', response)
return ip | ac36a3b729b0ce4ba13a6db550a71276319cbd70 | 707,238 |
def EscapeShellArgument(s):
"""Quotes an argument so that it will be interpreted literally by a POSIX
shell. Taken from
http://stackoverflow.com/questions/35817/whats-the-best-way-to-escape-ossystem-calls-in-python
"""
return "'" + s.replace("'", "'\\''") + "'" | 132a3b1bb8a0e7b3c92ac15e2d68337eeef19042 | 707,239 |
def read_csv_to_data(path: str, delimiter: str = ",", headers: list = []):
"""A zero-dependancy helper method to read a csv file
Given the path to a csv file, read data row-wise. This data may be later converted to a dict of lists if needed (column-wise).
Args:
path (str): Path to csv file
delimiter (str, optional): Delimiter to split the rows by. Defaults to ','
headers: (list, optional): Given header list for a csv file. Defaults to an empty list, which results in the first row being used as a header.
Returns:
A list of dictionary values (list of rows) representing the file being read
"""
data = []
with open(path, "r") as f:
header = headers
if len(headers) == 0:
header = f.readline().split(",")
for line in f:
entry = {}
for i, value in enumerate(line.split(",")):
entry[header[i].strip()] = value.strip()
data.append(entry)
return data | f60e163e770680efd1f8944becd79a0dd7ceaa08 | 707,240 |
import os
def current_user():
"""Returns the value of the USER environment variable"""
return os.environ['USER'] | 75d588d801a5afcd2037a05c7dc5e990532eb114 | 707,241 |
def approx_match_dictionary():
"""Maps abbreviations to the part of the expanded form that is common beween all forms of the word"""
k=["%","bls","gr","hv","hæstv","kl","klst","km","kr","málsl",\
"málsgr","mgr","millj","nr","tölul","umr","þm","þskj","þús"]
v=['prósent','blaðsíð',\
'grein','háttvirt',\
'hæstvirt','klukkan',\
'klukkustund','kílómetr',\
'krón','málslið',\
'málsgrein','málsgrein',\
'milljón','númer','tölulið',\
'umræð','þingm',\
'þingskj','þúsund']
d={}
for i in range(len(k)):
d[k[i]] = v[i]
return d | 021c7de862b2559b55051bc7267113d77132e195 | 707,242 |
def getInputShape(model):
"""
Gets the shape when there is a single input.
Return:
Numeric dimensions, omits dimensions that have no value. eg batch
size.
"""
s = []
for dim in model.input.shape:
if dim.value:
s.append(dim.value)
return tuple(s) | 628f61a995784b9be79816a5bbcde2f8204640be | 707,243 |
def kewley_agn_oi(log_oi_ha):
"""Seyfert/LINER classification line for log([OI]/Ha)."""
return 1.18 * log_oi_ha + 1.30 | 5e6b71742bec307ad609d855cced80ae08e5c35c | 707,244 |
from typing import Tuple
import os
def get_load_average() -> Tuple[float, float, float]:
"""Get load average"""
return os.getloadavg() | 48942b9dbd5c1c38e0c9e13566521d96e980b7a7 | 707,245 |
def permissions_vsr(func):
"""
:param func:
:return:
"""
def func_wrapper(name):
return "<p>{0}</p>".format(func(name))
return func_wrapper | a7e01f7711cab6bc46c004c4d062930c2a656eee | 707,246 |
def multiply_str(char, times):
"""
Return multiplied character in string
"""
return char * times | cc69f0e16cba1b8c256301567905e861c05291ea | 707,247 |
def calories_per_item(hundr, weight, number_cookies, output_type):
"""
>>> calories_per_item(430, 0.3, 20, 0)
'One item has 64.5 kcal.'
>>> calories_per_item(430, 0.3, 20, 1)
'One item has 64.5 Calories.'
>>> calories_per_item(1, 1000, 10, 1)
'One item has 1000.0 Calories.'
>>> calories_per_item(1, 1000, 10, 0)
'One item has 1000.0 kcal.'
>>> calories_per_item(0, 1000, 10, 0)
'One item has 0.0 kcal.'
"""
kcal_per_item = hundr * 10 # convert kcal per 100g to kcal per kg
unit = 'kcal'
if output_type == 1: # change output unit based on input
unit = 'Calories'
return 'One item has ' + str((kcal_per_item * weight) / number_cookies) + ' ' + unit + '.' | 9ca16eee8aa8a81424aeaa30f696fb5bec5e3956 | 707,248 |
def solar_true_longitude(solar_geometric_mean_longitude, solar_equation_of_center):
"""Returns the Solar True Longitude with Solar Geometric Mean Longitude,
solar_geometric_mean_longitude, and Solar Equation of Center,
solar_equation_of_center."""
solar_true_longitude = solar_geometric_mean_longitude + solar_equation_of_center
return solar_true_longitude | a335bb82002846eb2bc2106675c13e9f3ee28900 | 707,249 |
import stat
import functools
import operator
def flags(flags: int, modstring: str) -> int:
""" Modifies the stat flags according to *modstring*, mirroring the syntax for POSIX `chmod`. """
mapping = {
'r': (stat.S_IRUSR, stat.S_IRGRP, stat.S_IROTH),
'w': (stat.S_IWUSR, stat.S_IWGRP, stat.S_IWOTH),
'x': (stat.S_IXUSR, stat.S_IXGRP, stat.S_IXOTH)
}
target, direction = 'a', None
for c in modstring:
if c in '+-':
direction = c
continue
if c in 'ugoa':
target = c
direction = None # Need a - or + after group specifier.
continue
if c in 'rwx' and direction and direction in '+-':
if target == 'a':
mask = functools.reduce(operator.or_, mapping[c])
else:
mask = mapping[c]['ugo'.index(target)]
if direction == '-':
flags &= ~mask
else:
flags |= mask
continue
raise ValueError('invalid chmod: {!r}'.format(modstring))
return flags | 9acfeb4d9b90a12d2308c0ec992cfbb47f11000c | 707,250 |
def maximumToys(prices, k):
"""Problem solution."""
prices.sort()
c = 0
for toy in prices:
if toy > k:
return c
else:
k -= toy
c += 1
return c | 0ce709ff7b106b5379217cb6b7f1f481d27c94e7 | 707,251 |
def findUsername(data):
"""Find a username in a Element
Args:
data (xml.etree.ElementTree.Element): XML from PMS as a Element
Returns:
username or None
"""
elem = data.find('User')
if elem is not None:
return elem.attrib.get('title')
return None | f7b6bb816b9eeeca7e865582935a157cdf276928 | 707,252 |
def fix_empty_strings(tweet_dic):
"""空文字列を None に置換する"""
def fix_media_info(media_dic):
for k in ['title', 'description']:
if media_dic.get('additional_media_info', {}).get(k) == '':
media_dic['additional_media_info'][k] = None
return media_dic
for m in tweet_dic.get('entities', {}).get('media', []):
m = fix_media_info(m)
for m in tweet_dic.get('extended_entities', {}).get('media', []):
m = fix_media_info(m)
for m in tweet_dic.get('extended_tweet', {}).get('entities', {}).get('media', []):
m = fix_media_info(m)
for m in tweet_dic.get('extended_tweet', {}).get('extended_entities', {}).get('media', []):
m = fix_media_info(m)
for k in [
'profile_background_image_url',
'profile_background_image_url_https',
'profile_image_url',
'profile_image_url_https',
]:
if tweet_dic.get('user', {}).get(k) == '':
tweet_dic['user'][k] = None
return tweet_dic | 436daaeb9b96b60867d27812ed7388892ab79b1a | 707,253 |
def read_data():
"""Reads in the data from (currently) only the development file
and returns this as a list. Pops the last element, because it is empty."""
with open('../PMB/parsing/layer_data/4.0.0/en/gold/dev.conll') as file:
data = file.read()
data = data.split('\n\n')
data.pop(-1)
return data | da75e237bbc7b2168cd5af76eefaf389b29d4b30 | 707,254 |
def dice_coefficient(x, target):
"""
Dice Loss: 1 - 2 * (intersection(A, B) / (A^2 + B^2))
:param x:
:param target:
:return:
"""
eps = 1e-5
n_inst = x.size(0)
x = x.reshape(n_inst, -1)
target = target.reshape(n_inst, -1)
intersection = (x * target).sum(dim=1)
union = (x ** 2.0).sum(dim=1) + (target ** 2.0).sum(dim=1) + eps
loss = 1. - (2 * intersection / union)
return loss | c73cd86ed11bf89d94fb84db16186d6ace39d814 | 707,256 |
import pydoc
def locate(name):
"""
Locate the object for the given name
"""
obj = pydoc.locate(name)
if not obj:
obj = globals().get(name, None)
return obj | 24f31b241ffcbd2e983889f209bff9a1ff8b1fc3 | 707,257 |
import argparse
def parse_args():
"""parse args for binlog2sql"""
parser = argparse.ArgumentParser(description='Parse MySQL binlog to SQL you want', add_help=False)
connect_setting = parser.add_argument_group('connect setting')
connect_setting.add_argument('-h', '--host', dest='host', type=str,
help='Host the MySQL database server located', default='127.0.0.1')
connect_setting.add_argument('-u', '--user', dest='user', type=str,
help='MySQL Username to log in as', default='root')
connect_setting.add_argument('-p', '--password', dest='password', type=str, nargs='*',
help='MySQL Password to use', default='')
connect_setting.add_argument('-P', '--port', dest='port', type=int,
help='MySQL port to use', default=3306)
interval = parser.add_argument_group('interval filter')
interval.add_argument('--start-file', dest='start_file', type=str, help='Start binlog file to be parsed')
interval.add_argument('--start-position', '--start-pos', dest='start_pos', type=int,
help='Start position of the --start-file', default=4)
interval.add_argument('--stop-file', '--end-file', dest='end_file', type=str,
help="Stop binlog file to be parsed. default: '--start-file'", default='')
interval.add_argument('--stop-position', '--end-pos', dest='end_pos', type=int,
help="Stop position. default: latest position of '--stop-file'", default=0)
interval.add_argument('--start-datetime', dest='start_time', type=str,
help="Start time. format %%Y-%%m-%%d %%H:%%M:%%S", default='')
interval.add_argument('--stop-datetime', dest='stop_time', type=str,
help="Stop Time. format %%Y-%%m-%%d %%H:%%M:%%S;", default='')
parser.add_argument('--stop-never', dest='stop_never', action='store_true', default=False,
help="Continuously parse binlog. default: stop at the latest event when you start.")
parser.add_argument('--help', dest='help', action='store_true', help='help information', default=False)
schema = parser.add_argument_group('schema filter')
schema.add_argument('-d', '--databases', dest='databases', type=str, nargs='*',
help='dbs you want to process', default='')
schema.add_argument('-t', '--tables', dest='tables', type=str, nargs='*',
help='tables you want to process', default='')
event = parser.add_argument_group('type filter')
event.add_argument('--only-dml', dest='only_dml', action='store_true', default=False,
help='only print dml, ignore ddl')
event.add_argument('--sql-type', dest='sql_type', type=str, nargs='*', default=['INSERT', 'UPDATE', 'DELETE'],
help='Sql type you want to process, support INSERT, UPDATE, DELETE.')
# exclusive = parser.add_mutually_exclusive_group()
parser.add_argument('-K', '--no-primary-key', dest='no_pk', action='store_true',
help='Generate insert sql without primary key if exists', default=False)
parser.add_argument('-B', '--flashback', dest='flashback', action='store_true',
help='Flashback data to start_position of start_file', default=False)
parser.add_argument('--back-interval', dest='back_interval', type=float, default=1.0,
help="Sleep time between chunks of 1000 rollback sql. set it to 0 if do not need sleep")
return parser | e6ef917b97ea15097b30684a1069ea1c74b16064 | 707,258 |
import requests
def get_quote_data(ticker):
"""Inputs: @ticker
Returns a dictionary containing over 70 elements corresponding to the
input ticker, including company name, book value, moving average data,
pre-market / post-market price (when applicable), and more."""
site = "https://query1.finance.yahoo.com/v7/finance/quote?symbols=" + ticker
resp = requests.get(site)
if not resp.ok:
raise AssertionError(
"""Invalid response from server. Check if ticker is valid."""
)
json_result = resp.json()
info = json_result["quoteResponse"]["result"]
return info[0] | a23d7e091547ceca3c66f0ae90e84ea9f89d4e1c | 707,259 |
import torch
def _old_extract_roles(x, roles):
"""
x is [N, B, R, *shape]
roles is [N, B]
"""
N, B, R, *shape = x.shape
assert roles.shape == (N, B)
parts = []
for n in range(N):
parts.append(x[n:n+1, range(B), roles[n]])
return torch.cat(parts, dim=0) | 07a7be138558baa28ab1a10e2be2c7f17501ae96 | 707,260 |
def is_valid_table_name(cur, table_name):
"""
Checks whether a name is for a table in the database.
Note: Copied from utils.database for use in testing, to avoid
a circular dependency between tests and implementation.
Args:
cur: sqlite3 database cursor object
table_name (str): name to check
Returns:
True if valid, False otherwise
"""
query = """
SELECT 1
FROM sqlite_master
WHERE type == 'table'
AND name == ?
"""
res = cur.execute(query, (table_name,))
return res.fetchone() is not None | f1efc66220baa215a73f374da19842ab38c619be | 707,261 |
import re
def sub_repeatedly(pattern, repl, term):
"""apply sub() repeatedly until no change"""
while True:
new_term = re.sub(pattern, repl, term)
if new_term == term:
return term
term = new_term | e57c648fb057f81e35e0fc2d2dc57edd0b400baf | 707,262 |
def create_lexicon(word_tags):
"""
Create a lexicon in the right format for nltk.CFG.fromString() from
a list with tuples with words and their tag.
"""
# dictionary to filter the double tags
word_dict = {}
for word, tag in word_tags:
if tag not in word_dict:
word_dict[tag] = {word}
else:
word_dict[tag].add(word)
# PRO is the tag for 's, but the 's is not removed on nouns.
word_dict['NN'] = [x.replace('\'s', '') for x in word_dict['NN']]
word_dict['JJ'] = [x.replace('\'s', '') for x in word_dict['JJ']]
del word_dict[',']
word_dict['PRP'].update(word_dict['PRP$'])
del word_dict['PRP$']
word_dict['POS'] = ['"s']
# convert the dictionary to the right NLTK format
lexicon = ''
for key, val in word_dict.items():
lexicon += key + ' -> '
# add ' ' around every word
val = [f'\'{v}\'' for v in val]
# the words are seperated by a pipe
lexicon += ' | '.join(val) + '\n'
return lexicon | 3a91671d559f5924ec9326520db6e11a1672fee4 | 707,263 |
import ipaddress
def ip_only(value):
"""
Returns only the IP address string of the value provided. The value could be either an IP address,
and IP network or and IP interface as defined by the ipaddress module.
Parameters
----------
value : str
The value to use
Returns
-------
str
The IP address only value, if the value provided was valid
None
If the value provided is not an IP thing
"""
for test in [lambda x: str(ipaddress.ip_address(x)),
lambda x: str(ipaddress.ip_interface(x).ip),
lambda x: str(ipaddress.ip_network(x).network_address)]:
try:
return test(value)
except:
pass
return None | 149b202969c0ccb4e0c5e55417ce0231f1b5fc11 | 707,265 |
def area_triangle(base, height):
"""
"""
return (base * height) / 2.0 | 474e1a090dc7af9d68eaab35e6b04e5e165b6777 | 707,267 |
def get_table_6():
"""表 6 蓄熱の採用の可否
Args:
Returns:
list: 表 6 蓄熱の採用の可否
"""
table_6 = [
('不可', '不可', '可', '可', '可'),
('不可', '不可', '可', '可', '可'),
('不可', '不可', '可', '可', '可'),
('不可', '不可', '可', '可', '可'),
('不可', '不可', '可', '可', '可'),
('不可', '不可', '不可', '可', '可'),
('不可', '不可', '不可', '可', '可')
]
return table_6 | 4ecd4526ed9ce67b7a5d22b67dd804059807e94d | 707,268 |
import random
def is_prime(number, num_trials=200):
"""Determines whether a number is prime.
Runs the Miller-Rabin probabilistic primality test many times on the given number.
Args:
number (int): Number to perform primality test on.
num_trials (int): Number of times to perform the Miller-Rabin test.
Returns:
True if number is prime, False otherwise.
"""
if number < 2:
return False
if number != 2 and number % 2 == 0:
return False
# Find largest odd factor of n-1.
exp = number - 1
while exp % 2 == 0:
exp //= 2
for _ in range(num_trials):
rand_val = int(random.SystemRandom().randrange(1, number))
new_exp = exp
power = pow(rand_val, new_exp, number)
while new_exp != number - 1 and power != 1 and power != number - 1:
power = (power * power) % number
new_exp *= 2
if power != number - 1 and new_exp % 2 == 0:
return False
return True | 78478437c08bcbd5e4c690466e4fe51bb4fad5ce | 707,269 |
from pathlib import Path
from typing import List
import os
def get_proj_libdirs(proj_dir: Path) -> List[str]:
"""
This function finds the library directories
"""
proj_libdir = os.environ.get("PROJ_LIBDIR")
libdirs = []
if proj_libdir is None:
libdir_search_paths = (proj_dir / "lib", proj_dir / "lib64")
for libdir_search_path in libdir_search_paths:
if libdir_search_path.exists():
libdirs.append(str(libdir_search_path))
if not libdirs:
raise SystemExit(
"ERROR: PROJ_LIBDIR dir not found. Please set PROJ_LIBDIR."
)
else:
libdirs.append(proj_libdir)
return libdirs | efeacd08940c1f8706cd86aa0c5da50b498608e6 | 707,270 |
def str2num(s):
"""Convert string to int or float number.
Parameters
----------
s : string
String representing a number.
Returns
-------
Number (int or float)
Raises
------
TypeError
If `s` is not a string.
ValueError
If the string does not represent a (float or int) number.
"""
try:
x = float(s)
if x.is_integer():
return int(x)
else:
return x
except ValueError:
raise ValueError("'s' does not represent a number (int or float)") | 5dfaed567a66fc7d3ee46cbb70d9c408d38fcbfe | 707,271 |
import os
def get_html_templates_path():
"""
Return path to ABlog templates folder.
"""
pkgdir = os.path.abspath(os.path.dirname(__file__))
return os.path.join(pkgdir, "templates") | e72caf4a2558298ec909aff04bfde381abba256f | 707,272 |
from typing import OrderedDict
import inspect
def _get_new_args_dict(func, args, kwargs):
"""Build one dict from args, kwargs and function default args
The function signature is used to build one joint dict from args and kwargs and
additional from the default arguments found in the function signature. The order
of the args in this dict is the order of the args in the function signature and
hence the list of args can be used in cases where we can only supply *args, but
we have to work with a mixture of args, kwargs and default args as in
xarray.apply_ufunc in the xarray wrapper.
"""
new_args_dict = OrderedDict()
for i, (arg, parameter) in enumerate(inspect.signature(func).parameters.items()):
if i < len(args):
new_args_dict[arg] = args[i]
elif arg in kwargs.keys():
new_args_dict[arg] = kwargs[arg]
else:
new_args_dict[arg] = parameter.default
return new_args_dict | ad7553e7b778b8f7b499217c7ee4ad7328958809 | 707,273 |
def find_dead_blocks(func, cfg):
"""Find all immediate dead blocks"""
return [block for block in cfg if not cfg.predecessors(block)
if block != func.startblock] | 3f72e0a573b1ef617511f2b9ec3d2e30c7ba6554 | 707,274 |
def num_list(to_parse):
"""
Creates list from its string representation
Arguments:
to_parse {string} -- String representation of list, can include 'None' or internal lists, represented by separation with '#'
Returns:
list[int] -- List represented in to_parse
"""
if len(to_parse) == 2:
return []
inter = to_parse[1:-1]
inter = [x.strip() for x in inter.split(',')]
result = []
for n in inter:
if n == "None":
result.append(None)
elif "#" in n:
result.append([int(x) for x in n.split("#")])
else:
result.append(int(n))
return result | b444554e37434b5ae42ebc913bcc0f9b99c65ce9 | 707,275 |
def read_user(str):
""" str -> dict """
pieces = str.split()
return {
'first': pieces[0],
'last': pieces[1],
'username': pieces[5],
'custID': pieces[3],
'password': pieces[7],
'rank': 0,
'total': 0
} | fcb24a2b791f0df8f40ea4080cdabe83d51fe068 | 707,276 |
from typing import Union
from typing import List
import shlex
import subprocess
def _run(cmd: Union[str, List[str]]) -> List[str]:
"""Run a 'cmd', returning stdout as a list of strings."""
cmd_list = shlex.split(cmd) if type(cmd) == str else cmd
result = subprocess.run(cmd_list, capture_output=True)
return result.stdout.decode('utf-8').split("\n") | 74fc47f531e8eef9f77b80798f0b5505b57968da | 707,277 |
def compFirstFivePowOf2(iset={0, 1, 2, 3, 4}):
"""
task 0.5.6
a comprehension over the given set whose value is the set consisting
of the first five powers of two, starting with 2**0
"""
return {2**x for x in iset} | a7b04ab6b127ef5ee7fdd3598b1569e171fd009e | 707,278 |
def __pairwise__(iterable):
""" Converts a list of elements in a list of pairs like:
list -> (list[0], list[1]), (list[2], list[3]), (list[4], list[5]), ...
:param iterable: Input list.
:return: List of pairs of the given list elements.
"""
a = iter(iterable)
return zip(a, a) | 59eae23e0e6f9ccba528f9632caf77fe28698c5b | 707,279 |
def read_ground_stations_extended(filename_ground_stations_extended):
"""
Reads ground stations from the input file.
:param filename_ground_stations_extended: Filename of ground stations basic (typically /path/to/ground_stations.txt)
:return: List of ground stations
"""
ground_stations_extended = []
gid = 0
with open(filename_ground_stations_extended, 'r') as f:
for line in f:
split = line.split(',')
if len(split) != 8:
raise ValueError("Extended ground station file has 8 columns: " + line)
if int(split[0]) != gid:
raise ValueError("Ground station id must increment each line")
ground_station_basic = {
"gid": gid,
"name": split[1],
"latitude_degrees_str": split[2],
"longitude_degrees_str": split[3],
"elevation_m_float": float(split[4]),
"cartesian_x": float(split[5]),
"cartesian_y": float(split[6]),
"cartesian_z": float(split[7]),
}
ground_stations_extended.append(ground_station_basic)
gid += 1
return ground_stations_extended | 2492dc8d5c55f124696aafbec11d74e609c3f397 | 707,281 |
import uuid
def shortPrescID():
"""Create R2 (short format) Prescription ID
Build the prescription ID and add the required checkdigit.
Checkdigit is selected from the PRESCRIPTION_CHECKDIGIT_VALUES constant
"""
_PRESC_CHECKDIGIT_VALUES = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ+'
hexString = str(uuid.uuid1()).replace('-', '').upper()
prescriptionID = hexString[:6] + '-Z' + hexString[6:11] + '-' + hexString[12:17]
prscID = prescriptionID.replace('-', '')
prscIDLength = len(prscID)
runningTotal = 0
for stringPosition in range(prscIDLength):
runningTotal = runningTotal + int(prscID[stringPosition], 36) * (2 ** (prscIDLength - stringPosition))
checkValue = (38 - runningTotal % 37) % 37
checkValue = _PRESC_CHECKDIGIT_VALUES[checkValue]
prescriptionID += checkValue
return prescriptionID | db491d3fe299adfbcd6f202eb46bc4669f829613 | 707,282 |
import ast
def get_module_docstring(path):
"""get a .py file docstring, without actually executing the file"""
with open(path) as f:
return ast.get_docstring(ast.parse(f.read())) | e253372bfb6f65907a5461332d14c414c2370c66 | 707,283 |
def transform(f, a, b, c, d):
"""
Transform a given function linearly.
If f(t) is the original function, and a, b, c, and d are the parameters in
order, then the return value is the function
F(t) = af(cx + d) + b
"""
return lambda x: a * f(c * x + d) + b | a47b3f4f3dc1e3ed5ddb6155bcd67b8297c298ed | 707,284 |
import logging
def log_command(func):
"""
Logging decorator for logging bot commands and info
"""
def log_command(*args, **kwargs):
slack, command, event = args
user = slack.user_info(event["user"])
log_line = 'USER: %s | CHANNEL ID: %s | COMMAND: %s | TEXT: %s'
command_info = log_line % (user["user"]["name"],
event["channel"],
command,
event["text"])
logging.info(command_info)
command = func(*args, **kwargs)
return command
return log_command | 8ab4f36ff6c01a3799061f532d0c25ec04d725e8 | 707,285 |
import os
def expand_home_folder(path):
"""Checks if path starts with ~ and expands it to the actual
home folder."""
if path.startswith("~"):
return os.environ.get('HOME') + path[1:]
return path | 3746859cc16b77dcfd02c675db81bfe4a195a85f | 707,286 |
def centered_mols(self, labels, return_trans=False):
"""
Return the molecules translated at the origin with a corresponding cell
Parameters
----------
labels : int or list of ints
The labels of the atoms to select
print_centro : bool
Print the translation vector which was detected as -centroid
Returns
-------
mol : Mol object
The selected molecules with their centroid at the origin
mod_cell : Mol object
The new confined cell corresponding to the now translated molecules
"""
mol, mod_cell = self.complete_mol(labels)
centro = mol.centroid()
mol.translate(-centro)
mod_cell.translate(-centro)
mod_cell = mod_cell.confined()
if return_trans:
return mol, mod_cell, -centro
else:
return mol, mod_cell | 858fd2b43f0ac9eaca3db94108f9bec0dbf305c7 | 707,287 |
import json
def load_config(path='config.json'):
"""
Loads configruation from config.json file.
Returns station mac address, interval, and units for data request
"""
# Open config JSON
with open(path) as f:
# Load JSON file to dictionary
config = json.load(f)
# Return mac address, interval, and units
return (config['station_max_address'], int(config['interval']), config['units']) | 5522f023ed3293149613dcc2dc007e34d50f3fa8 | 707,288 |
import torch
def log_px_z(pred_logits, outcome):
"""
Returns Bernoulli log probability.
:param pred_logits: logits for outcome 1
:param outcome: datapoint
:return: log Bernoulli probability of outcome given logits in pred_logits
"""
pred = pred_logits.view(pred_logits.size(0), -1)
y = outcome.view(outcome.size(0), -1)
return -torch.sum(torch.max(pred, torch.tensor(0., device=pred.device)) - pred * y +
torch.log(1 + torch.exp(-torch.abs(pred))), 1) | 6369d893cc9bfe5c3f642f819511798d01ae3ae9 | 707,289 |
import functools
def partial_at(func, indices, *args):
"""Partial function application for arguments at given indices."""
@functools.wraps(func)
def wrapper(*fargs, **fkwargs):
nargs = len(args) + len(fargs)
iargs = iter(args)
ifargs = iter(fargs)
posargs = (next((ifargs, iargs)[i in indices]) for i in range(nargs))
# posargs = list( posargs )
# print( 'posargs', posargs )
return func(*posargs, **fkwargs)
return wrapper | 1b45e0bd8baea869d80c6b5963c6063f6b8fbdd4 | 707,290 |
def augment_features(data, feature_augmentation):
"""
Augment features for a given data matrix.
:param data: Data matrix.
:param feature_augmentation: Function applied to augment the features.
:return: Augmented data matrix.
"""
if data is not None and feature_augmentation is not None:
if isinstance(feature_augmentation, list):
for augmentation_function in feature_augmentation:
data = augmentation_function(data)
else:
data = feature_augmentation(data)
return data | 687a7ff2a4b61131f5d95e1f7d6eb77d75bd6f06 | 707,291 |
def copy_keys_except(dic, *keys):
"""Return a copy of the dict without the specified items.
"""
ret = dic.copy()
for key in keys:
try:
del ret[key]
except KeyError:
pass
return ret | b1e57db9dbacbc2a7c502c36082f40598a0f4b90 | 707,292 |
import random
import math
def get_params(img, scale, ratio):
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image): Image to be cropped.
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
area = img.size[0] * img.size[1]
for attempt in range(10):
target_area = random.uniform(*scale) * area
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if w <= img.size[0] and h <= img.size[1]:
i = random.randint(0, img.size[1] - h)
j = random.randint(0, img.size[0] - w)
return i, j, h, w
# Fallback to central crop
in_ratio = img.size[0] / img.size[1]
if in_ratio < min(ratio):
w = img.size[0]
h = int(round(w / min(ratio)))
elif in_ratio > max(ratio):
h = img.size[1]
w = int(round(h * max(ratio)))
else: # whole image
w = img.size[0]
h = img.size[1]
i = (img.size[1] - h) // 2
j = (img.size[0] - w) // 2
return i, j, h, w | 80838328fc9383731e1a853c8dc572228d1a4567 | 707,293 |
def generate_html_from_module(module):
"""
Extracts a module documentations from a module object into a HTML string
uses a pre-written builtins list in order to exclude built in functions
:param module: Module object type to extract documentation from
:return: String representation of an HTML file
"""
html_content = f"<html><head><title>{module.__name__} Doc</title></head><body><h1>Module {module.__name__}:</h1>"
html_content += f"Function {module.__doc__}"
for function in module.__dict__:
if callable(getattr(module, function)):
html_content += f"<h2>Function {function}:</h2>"
html_content += f"{getattr(module, function).__doc__}"
html_content += f"<h3>Annotations:</h3>"
for annotation in getattr(module, function).__annotations__.keys():
html_content += f"{annotation} <br>"
html_content += "</body></html>"
return html_content | 3e59931f3716dd3c50dfdda3ba17807b62f04c14 | 707,294 |
def getlineno(frame):
"""Get the line number from a frame object, allowing for optimization."""
# FrameType.f_lineno is now a descriptor that grovels co_lnotab
return frame.f_lineno | b8c8d6fb3ebb8784d10250a42526b31e185e9b7a | 707,295 |
def format_len(x):
"""
>>> format_len('abc')
3
>>> format_len(('(', ('(', 'def', ')'), 'yz', ')'))
11
"""
if not isinstance(x, (list, tuple)): return len(x)
if len(x) > 3: sep_len = 2 * (len(x) - 3)
else: sep_len = 0
return sum(map(format_len, x)) + sep_len | 723afb58bfed0cfb7fbd25a12b86b257bf8b40df | 707,296 |
def _uid_or_str(node_or_entity):
""" Helper function to support the transition from `Entitie`s to `Node`s.
"""
return (
node_or_entity.uid
if hasattr(node_or_entity, "uid")
else str(node_or_entity)
) | 82f5747e8c73e1c167d351e1926239f17ea37b98 | 707,297 |
def answer(panel_array):
""" Returns the maximum product of positive and (odd) negative numbers."""
print("panel_array=", panel_array)
# Edge case I: no panels :]
if (len(panel_array) == 0):
return str(0)
# Get zero panels.
zero_panels = list(filter(lambda x: x == 0 , panel_array))
print("zero_panels=", zero_panels)
# Edge case II: no positive nor negative panels.
if (len(zero_panels) == len(panel_array)):
return str(0)
# Get positive panels
positive_panels = list(filter(lambda x: x >0 , panel_array))
print("positive_panels=", positive_panels)
positive_product = 1
for x in positive_panels:
positive_product *= x
# Get negative panels.
negative_panels = sorted(list(filter(lambda x: x <0 , panel_array)))
print("negative_panels=", negative_panels)
# Edge case III: there is only one "negative panel".
if (len(negative_panels) == 1):
# If this is the only panel.
if (len(panel_array) == 1):
return negative_panels[0]
# If there are no positive panels, but there are some panels with zeros
elif (len(positive_panels) == 0) and (len(zero_panels) > 1):
return 0
# Check number of negative panels.
if len(negative_panels) % 2 != 0:
# Remove smallest.
negative_panels.pop()
print("final negative_panels=", negative_panels)
negative_product = 1
for x in negative_panels:
negative_product *= x
# Return product of those two.
return str(negative_product * positive_product) | 7169fba8dcf6c0932722dcbc606d6d60fdaf3ed1 | 707,298 |
import subprocess
def exec_command_rc(*cmdargs, **kwargs):
"""
Return the exit code of the command specified by the passed positional arguments, optionally configured by the
passed keyword arguments.
Parameters
----------
cmdargs : list
Variadic list whose:
1. Mandatory first element is the absolute path, relative path, or basename in the current `${PATH}` of the
command to run.
2. Optional remaining elements are arguments to pass to this command.
All keyword arguments are passed as is to the `subprocess.call()` function.
Returns
----------
int
This command's exit code as an unsigned byte in the range `[0, 255]`, where 0 signifies success and all other
values signal a failure.
"""
# 'encoding' keyword is not supported for 'subprocess.call'; remove it from kwargs.
if 'encoding' in kwargs:
kwargs.pop('encoding')
return subprocess.call(cmdargs, **kwargs) | bfe4f5bbdcbed6cfc3c8f52abffe2be7107fd091 | 707,299 |
def readCoords(f):
"""Read XYZ file and return as MRChem JSON friendly string."""
with open(f) as file:
return '\n'.join([line.strip() for line in file.readlines()[2:]]) | 0cf1a9d07b4b3fe1836ce5c8a308ff67b5fe4c70 | 707,300 |
def get_or_create(session, model, **kwargs):
"""
Creates and returns an instance of the model with given kwargs,
if it does not yet exist. Otherwise, get instance and return.
Parameters:
session: Current database session
model: The Class of the database model
**kwargds: The attributes for the desired instance
Returns:
(object): An object instance of the model with given kwargs
"""
instance = session.query(model).filter_by(**kwargs).first()
if instance:
return instance
else:
instance = model(**kwargs)
session.add(instance)
return instance | 4d3e4f0da5ca61789171db5d8d16a5fa06e975cc | 707,301 |
import os
def get_environ_list(name, default=None):
"""Return the split colon-delimited list from an environment variable.
Returns an empty list if the variable didn't exist.
"""
packed = os.environ.get(name)
if packed is not None:
return packed.split(':')
elif default is not None:
return default
else:
return [] | 3e59962558b127790e456a79edf6175d1c3f7bbe | 707,302 |
def reverse_complement(sequence):
""" Return reverse complement of a sequence. """
complement_bases = {
'g':'c', 'c':'g', 'a':'t', 't':'a', 'n':'n',
'G':'C', 'C':'G', 'A':'T', 'T':'A', 'N':'N', "-":"-",
"R":"Y", "Y":"R", "S":"W", "W":"S", "K":"M", "M":"K",
"B":"V", "V":"B", "D": "H", "H": "D",
"r":"y", "y":"r", "s":"w", "w":"s", "k":"m", "m":"k",
"b":"v", "v":"b", "d": "h", "h": "d"
}
bases = list(sequence)
bases.reverse()
revcomp = []
for base in bases:
try:
revcomp.append(complement_bases[base])
except KeyError:
print("Unexpected base encountered: ", base, " returned as X!!!")
revcomp.append("X")
return "".join(revcomp) | d28e520a9159cb4812079b4a7a5f2f6eb5723403 | 707,303 |
def clean_acl(name, value):
"""
Returns a cleaned ACL header value, validating that it meets the formatting
requirements for standard Swift ACL strings.
The ACL format is::
[item[,item...]]
Each item can be a group name to give access to or a referrer designation
to grant or deny based on the HTTP Referer header.
The referrer designation format is::
.r:[-]value
The ``.r`` can also be ``.ref``, ``.referer``, or ``.referrer``; though it
will be shortened to just ``.r`` for decreased character count usage.
The value can be ``*`` to specify any referrer host is allowed access, a
specific host name like ``www.example.com``, or if it has a leading period
``.`` or leading ``*.`` it is a domain name specification, like
``.example.com`` or ``*.example.com``. The leading minus sign ``-``
indicates referrer hosts that should be denied access.
Referrer access is applied in the order they are specified. For example,
.r:.example.com,.r:-thief.example.com would allow all hosts ending with
.example.com except for the specific host thief.example.com.
Example valid ACLs::
.r:*
.r:*,.r:-.thief.com
.r:*,.r:.example.com,.r:-thief.example.com
.r:*,.r:-.thief.com,bobs_account,sues_account:sue
bobs_account,sues_account:sue
Example invalid ACLs::
.r:
.r:-
By default, allowing read access via .r will not allow listing objects in
the container -- just retrieving objects from the container. To turn on
listings, use the .rlistings directive.
Also, .r designations aren't allowed in headers whose names include the
word 'write'.
ACLs that are "messy" will be cleaned up. Examples:
====================== ======================
Original Cleaned
---------------------- ----------------------
``bob, sue`` ``bob,sue``
``bob , sue`` ``bob,sue``
``bob,,,sue`` ``bob,sue``
``.referrer : *`` ``.r:*``
``.ref:*.example.com`` ``.r:.example.com``
``.r:*, .rlistings`` ``.r:*,.rlistings``
====================== ======================
:param name: The name of the header being cleaned, such as X-Container-Read
or X-Container-Write.
:param value: The value of the header being cleaned.
:returns: The value, cleaned of extraneous formatting.
:raises ValueError: If the value does not meet the ACL formatting
requirements; the error message will indicate why.
"""
name = name.lower()
values = []
for raw_value in value.split(','):
raw_value = raw_value.strip()
if not raw_value:
continue
if ':' not in raw_value:
values.append(raw_value)
continue
first, second = (v.strip() for v in raw_value.split(':', 1))
if not first or not first.startswith('.'):
values.append(raw_value)
elif first in ('.r', '.ref', '.referer', '.referrer'):
if 'write' in name:
raise ValueError('Referrers not allowed in write ACL: '
'%s' % repr(raw_value))
negate = False
if second and second.startswith('-'):
negate = True
second = second[1:].strip()
if second and second != '*' and second.startswith('*'):
second = second[1:].strip()
if not second or second == '.':
raise ValueError('No host/domain value after referrer '
'designation in ACL: %s' % repr(raw_value))
values.append('.r:%s%s' % ('-' if negate else '', second))
else:
raise ValueError('Unknown designator %s in ACL: %s' %
(repr(first), repr(raw_value)))
return ','.join(values) | 1cceb2af22d2f5bbf223a0eb381b4c6643d76f0e | 707,305 |
def soup_extract_enzymelinks(tabletag):
"""Extract all URLs for enzyme families from first table."""
return {link.string: link['href']
for link in tabletag.find_all("a", href=True)} | 7baabd98042ab59feb5d8527c18fe9fa4b6a50af | 707,306 |
def custom_field_sum(issues, custom_field):
"""Sums custom field values together.
Args:
issues: List The issue list from the JQL query
custom_field: String The custom field to sum.
Returns:
Integer of the sum of all the found values of the custom_field.
"""
custom_field_running_total = 0
for issue in issues:
if getattr(issue.fields, custom_field) is None:
custom_field_running_total = custom_field_running_total + 2
else:
custom_field_running_total = custom_field_running_total + \
getattr(issue.fields, custom_field)
return custom_field_running_total | 32c1cce310c06f81036ee79d70a8d4bbe28c8417 | 707,307 |
def build_target_areas(entry):
"""Cleanup the raw target areas description string"""
target_areas = []
areas = str(entry['cap:areaDesc']).split(';')
for area in areas:
target_areas.append(area.strip())
return target_areas | 48e76a5c1ed42aed696d441c71799b47f9193b29 | 707,308 |
import pathlib
def list_files(directory):
"""Returns all files in a given directory
"""
return [f for f in pathlib.Path(directory).iterdir() if f.is_file() and not f.name.startswith('.')] | a8c5fea794198c17c2aff41a1a07009984a8e61f | 707,309 |
from typing import Union
def score_normalization(extracted_score: Union[str, None]):
"""
Sofa score normalization.
If available, returns the integer value of the SOFA score.
"""
score_range = list(range(0, 30))
if (extracted_score is not None) and (int(extracted_score) in score_range):
return int(extracted_score) | 74501e9351296037ecc90ae647155e3c6b76ae01 | 707,310 |
def format_dev_sub_dev_id(pciIdPair):
"""
pciIdPair (int pci device id, int pci sub device id or None)
"""
if pciIdPair[1] is None:
return "(0x%08X, None)" % pciIdPair[0]
return "(0x%08X, 0x%08X)" % pciIdPair | fded71eee57f4fac60175bfb015845bf1eba58f7 | 707,311 |
def polynom_prmzt(x, t, order):
"""
Polynomial (deterministic) parameterization of fast variables (Y).
NB: Only valid for system settings of Wilks'2005.
Note: In order to observe an improvement in DA performance w
higher orders, the EnKF must be reasonably tuned with
There is very little improvement gained above order=1.
"""
if order == 4:
# From Wilks
d = 0.262 + 1.45*x - 0.0121*x**2 - 0.00713*x**3 + 0.000296*x**4
elif order == 3:
# From Arnold
d = 0.341 + 1.30*x - 0.0136*x**2 - 0.00235*x**3
elif order == 1:
# From me -- see AdInf/illust_parameterizations.py
d = 0.74 + 0.82*x
elif order == 0:
# From me -- see AdInf/illust_parameterizations.py
d = 3.82
elif order == -1:
# Leave as dxdt_trunc
d = 0
else:
raise NotImplementedError
return d | 80d3f9563c5f8a04a65de7d2d22f5d49d35c71fe | 707,312 |
import errno
import os
def pid_exists(pid):
"""Check whether pid exists in the current process table."""
if pid < 0:
return False
try:
os.kill(pid, 0)
except OSError as e:
return e.errno == errno.EPERM
else:
return True | 0ebefcc958e629aac6d06e6d79d8aaa1acf7607b | 707,313 |
def clap_convert(txt):
"""convert string of clap values on medium to actualy number
Args:
txt (str): claps values
Returns:
number on claps (int)
"""
# Medium annotation
if txt[-1] == "K":
output = int(float(txt[:-1]) * 1000)
return output
else:
return int(txt) | 253e0e2be4f37f1994637bbfc80edfc5d72bc4e5 | 707,314 |
import io
def write_phase1_capsummary(inst, isStringIO=True):
"""
Write out a multiweek summary of capacity, demand, understaffing.
:param inst: Model instance
:param isStringIO: True (default) to return StringIO object, False to return string
:return: capacity summary as StringIO object or a string.
"""
param = 'period,day,week,dmd,cap,us1,us2,ustot\n'
rows = [(i, j, w,
inst.dmd_staff[i, j, w],
inst.cov[i, j, w].value,
inst.under1[i, j, w].value,
inst.under2[i, j, w].value,
inst.under1[i, j, w].value + inst.under2[i, j, w].value)
for i in inst.PERIODS
for j in inst.DAYS
for w in inst.WEEKS
]
for row in rows:
row = [str(r) for r in row]
data_row = ','.join(row)
data_row += '\n'
param += data_row
if isStringIO:
param_out = io.StringIO()
param_out.write(param)
return param_out.getvalue()
else:
return param | 6d6e7d083693b74ea27e7f10cec4899735f32541 | 707,315 |
def project_disk_sed(bulge_sed, disk_sed):
"""Project the disk SED onto the space where it is bluer
For the majority of observed galaxies, it appears that
the difference between the bulge and the disk SEDs is
roughly monotonic, making the disk bluer.
This projection operator projects colors that are redder onto
the same difference in color as the previous wavelength,
similar to the way monotonicity works for the morphological
`S` matrix of the model.
While a single iteration of this model is unlikely to yield
results that are as good as those in `project_disk_sed_mean`,
after many iterations it is expected to converge to a better value.
"""
new_sed = disk_sed.copy()
diff = bulge_sed - disk_sed
for s in range(1, len(diff)-1):
if diff[s]<diff[s-1]:
new_sed[s] = new_sed[s] + diff[s-1]
diff[s] = diff[s-1]
return new_sed | 5faf8f7d8d0d780f61586f7fae39f4ba04d3752d | 707,316 |
def revcomp(sequence):
"""
Find reverse complementary sequence
:param sequence: The RNA sequence in string form
:return: The reverse complement sequence in string form
"""
complement = {"A": "U", "U": "A", "C": "G", "G": "C", "N": "N"}
revcompseq = ""
sequence_list = list(sequence)
sequence_list.reverse()
for letter in sequence_list:
revcompseq += complement[letter.upper()]
return revcompseq | c66b9ad967e612fa97f18bb2932e7eb4bbee8245 | 707,317 |
import time
def timesince():
"""
Get the amount of time since 00:00 on 1 January 1970,
the raw date before formatting it.
"""
return time.time() | 7e6944d74172947c4ac990c0fa993524ab865e18 | 707,318 |
def effective_area(true_energy, reco_energy, simu_area):
"""
Compute the effective area from a list of simulated energy and reconstructed energy
Parameters
----------
true_energy: 1d numpy array
reco_energy: 1d numpy array
simu_area: float - area on which events are simulated
Returns
-------
float = effective area
"""
return simu_area * len(reco_energy) / len(true_energy) | b17efa390a1ae14bb8ecb959740bad8c391b1d2e | 707,320 |
def physical_cpu_mhz(vir_connection):
""" Get the CPU frequency in MHz using libvirt.
:param vir_connection: A libvirt connection object.
:type vir_connection: virConnect
:return: The CPU frequency in MHz.
:rtype: int
"""
return vir_connection.getInfo()[3] | f6a404a6d531940fbc762f493e90355e2fc78690 | 707,321 |
def addstream(bot, input):
"""Add a stream from the notify list"""
if not input.admin: return False
if not input.group(2): return
stream = input.group(2).lower()
if not stream in bot.config.streams:
bot.config.set_add('streams', stream)
bot.reply("Added {0} to stream list".format(stream))
else:
bot.reply("{0} is already in the stream list".format(stream)) | 48465633ea58968efca31231eb5e1a47a537c979 | 707,322 |
def fmt(n):
"""format number with a space in front if it is single digit"""
if n < 10:
return " " + str(n)
else:
return str(n) | 976acc22cafd6d6bdb4e251853f49a114b63ec21 | 707,324 |
def get_handler_name(method: str, url_path: str, path_params: dict):
"""
Возвращает имя необходимого хендлера для рефлексифного вызова метода
:param method: Метод
:param url_path: URL
:param path_params: Параметры
:return:
"""
handler = url_path.replace('/', '_')
for key, value in path_params.items():
handler = handler.replace(value, key)
return method.lower() + handler | e8060538a6bf73e6291ecbcbec14f11997a53507 | 707,325 |
import argparse
def parse_args():
"""Process input arguments"""
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('genotypes', metavar='G', help="Genotype table")
parser.add_argument('mutations', metavar='M', help="Mutation table")
parser.add_argument('--zygosity', '-z', default='both', type=str,
help="Minimum number of variant alleles required to be variant\
(het, hom or both)")
parser.add_argument('--nonsense', '-n', default=0, type=float,
help="Only consider nonsense variants occuring in the first\
X portion of the protein")
parser.add_argument('--total', '-t', action="store_true",
help="Return the count of variants in each gene")
parser.add_argument('--worst', '-w', action="store_true",
help="Return the neutral probability of the most impactful variant\
in a gene")
parser.add_argument('--sift', '-i', action="store_true",
help="Use SIFT scores to calculate P(Neutral)")
parser.add_argument('--blosum', '-b', action="store_true",
help="Use BLOSUM62 scores to calculate P(Neutral)")
parser.add_argument('--foldx', '-f', action="store_true",
help="Use FoldX ddG scores to calculate P(Neutral)")
return parser.parse_args() | 276aed3028ae4d9614b7afae58ece670b8d2b806 | 707,326 |
def _always_run(*args, **kwargs) -> bool:
""" This returns False to indicate that the step is not already completed. """
return False | db31e0ac20ac0eef410fb051928308ce7414f5b6 | 707,327 |
def has_anonymous_link(node, auth):
"""check if the node is anonymous to the user
:param Node node: Node which the user wants to visit
:param str link: any view-only link in the current url
:return bool anonymous: Whether the node is anonymous to the user or not
"""
if auth.private_link:
return auth.private_link.anonymous
return False | c5941bce3f0110dfcd5e9bbb19bae0682c5e731f | 707,328 |
def is_successful(gsm_log):
"""
Success is defined as having converged to a transition state.
"""
with open(gsm_log) as f:
for line in reversed(f.readlines()):
if '-XTS-' in line or '-TS-' in line:
return True
return False | 9bab6837c8e6b818cceb025c5df9aed78074edcf | 707,329 |
def indicator(function_array_to_be_indicated, its_domain, barrier):
"""the indicator influences the function argument, not value. So here it iterates through x-domain and cuts any
values of function with an argument less than H"""
indicated = []
for index in range(len(its_domain)):
if its_domain[index] > barrier:
indicated.append(function_array_to_be_indicated[index])
else:
indicated.append(0)
return indicated | 440f423b7b25b0d152bc691acd3d7dea6c785aed | 707,330 |
def _causes_name_clash(candidate, path_list, allowed_occurences=1):
"""Determine if candidate leads to a name clash.
Args:
candidate (tuple): Tuple with parts of a path.
path_list (list): List of pathlib.Paths.
allowed_occurences (int): How often a name can occur before we call it a clash.
Returns:
bool
"""
duplicate_counter = -allowed_occurences
for path in path_list:
parts = tuple(reversed(path.parts))
if len(parts) >= len(candidate) and parts[: len(candidate)] == candidate:
duplicate_counter += 1
return duplicate_counter > 0 | 3b874e4ea6d8780483100e464e3325321c82689e | 707,331 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.