max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
adventure-cards/package/main.py | DaneRosa/adventure-cards | 0 | 2100 | import json
def hydrateCards(rawDeckDataPath):
pack = []
rawDeckData = json.load(open(rawDeckDataPath,))
for index, item in enumerate(rawDeckData):
deck = []
# print(index,item)
for i in rawDeckData[item]:
card ={
f'{index}':
{
"name": "",
"type": "",
"level": None,
"spell_name": "",
"creature_name": "",
"artifact_name": "",
"enchantment_name": "",
"spell_magnifier": "",
"spell_type": "",
"name_modifier": "",
"creature_modifier": "",
"mythic_creature_modifier": "",
"location": "",
"mythic_location": ""
}
}
nameSplit = i[0].split()
card[f'{index}']['name'] = i[0]
card[f'{index}']['type']= i[1]
card[f'{index}']['level']=i[2]
if i[1] == 'spell':
if len(nameSplit) == 1:
card[f'{index}']['spell_name']= i[0]
elif len(nameSplit) == 2:
card[f'{index}']['spell_type']= nameSplit[0]
card[f'{index}']['spell_name']= nameSplit[1]
elif len(nameSplit) == 3:
card[f'{index}']['spell_magnifier']=nameSplit[0]
card[f'{index}']['spell_type']=nameSplit[1]
card[f'{index}']['spell_name']=nameSplit[2]
elif i[1] == 'artifact':
if 'Divine Robe' or 'Ghost Wand' in i[0]:
if 'Divine Robe' in i[0]:
i[0] = i[0].replace('Divine Robe', 'DivineRobe')
if 'Ghost Wand' in i[0]:
i[0] = i[0].replace('Ghost Wand', 'GhostWand')
nameSplit = i[0].split()
card[f'{index}']['name'] = i[0]
if len(nameSplit) == 1:
card[f'{index}']['artifact_name']= i[0]
elif len(nameSplit) == 2:
card[f'{index}']['artifact_name']= nameSplit[1]
card[f'{index}']['spell_type']= nameSplit[0]
elif len(nameSplit) == 3:
card[f'{index}']['artifact_name']= nameSplit[2]
card[f'{index}']['spell_magnifier']= nameSplit[0]
card[f'{index}']['spell_type']= nameSplit[1]
elif i[1] == 'enchantment':
if len(nameSplit) == 1:
card[f'{index}']['enchantment_name']= i[0]
if len(nameSplit) == 2:
card[f'{index}']['enchantment_name']= nameSplit[1]
card[f'{index}']['spell_type']= nameSplit[0]
if len(nameSplit) == 3:
card[f'{index}']['enchantment_name']=nameSplit[2]
card[f'{index}']['spell_type']=nameSplit[1]
card[f'{index}']['spell_magnifier']=nameSplit[0]
elif i[1] == 'monster':
card[f'{index}']['type']= 'creature'
if len(nameSplit) == 1:
card[f'{index}']['creature_name']= nameSplit[0]
if len(nameSplit) == 3:
card[f'{index}']['creature_name']= nameSplit[2]
card[f'{index}']['creature_modifier']= nameSplit[1]
card[f'{index}']['name_modifier']= nameSplit[0]
if len(nameSplit) >3:
keyword = 'of'
before_keyword, keyword, after_keyword = i[0].partition(keyword)
if i[2] == 2:
card[f'{index}']['creature_name']= nameSplit[2]
card[f'{index}']['creature_modifier']= nameSplit[1]
card[f'{index}']['name_modifier']= nameSplit[0]
card[f'{index}']['location']= nameSplit[2] = keyword + after_keyword
elif i[2] == 3:
card[f'{index}']['creature_name']= nameSplit[2]
card[f'{index}']['mythic_creature_modifier']= nameSplit[1]
card[f'{index}']['name_modifier']= nameSplit[0]
card[f'{index}']['mythic_location']= keyword + after_keyword
deck.append(card[f'{index}'])
index +=1
if len(deck) == 45:
break
pack.append(deck)
return(pack) | 3 | 3 |
demo/cnn_predict.py | huynhtnhut97/keras-video-classifier | 108 | 2101 | import numpy as np
from keras import backend as K
import os
import sys
K.set_image_dim_ordering('tf')
def patch_path(path):
return os.path.join(os.path.dirname(__file__), path)
def main():
sys.path.append(patch_path('..'))
data_dir_path = patch_path('very_large_data')
model_dir_path = patch_path('models/UCF-101')
from keras_video_classifier.library.convolutional import CnnVideoClassifier
from keras_video_classifier.library.utility.ucf.UCF101_loader import load_ucf, scan_ucf_with_labels
config_file_path = CnnVideoClassifier.get_config_file_path(model_dir_path)
weight_file_path = CnnVideoClassifier.get_weight_file_path(model_dir_path)
np.random.seed(42)
load_ucf(data_dir_path)
predictor = CnnVideoClassifier()
predictor.load_model(config_file_path, weight_file_path)
videos = scan_ucf_with_labels(data_dir_path, [label for (label, label_index) in predictor.labels.items()])
video_file_path_list = np.array([file_path for file_path in videos.keys()])
np.random.shuffle(video_file_path_list)
for video_file_path in video_file_path_list:
label = videos[video_file_path]
predicted_label = predictor.predict(video_file_path)
print('predicted: ' + predicted_label + ' actual: ' + label)
if __name__ == '__main__':
main() | 2.484375 | 2 |
pyconde/context_processors.py | EuroPython/djep | 5 | 2102 | from django.conf import settings
def less_settings(request):
return {
'use_dynamic_less_in_debug': getattr(settings, 'LESS_USE_DYNAMIC_IN_DEBUG', True)
}
| 1.734375 | 2 |
pmdarima/preprocessing/endog/boxcox.py | tuomijal/pmdarima | 736 | 2103 | <reponame>tuomijal/pmdarima<gh_stars>100-1000
# -*- coding: utf-8 -*-
from scipy import stats
import numpy as np
import warnings
from ...compat import check_is_fitted, pmdarima as pm_compat
from .base import BaseEndogTransformer
__all__ = ['BoxCoxEndogTransformer']
class BoxCoxEndogTransformer(BaseEndogTransformer):
r"""Apply the Box-Cox transformation to an endogenous array
The Box-Cox transformation is applied to non-normal data to coerce it more
towards a normal distribution. It's specified as::
(((y + lam2) ** lam1) - 1) / lam1, if lmbda != 0, else
log(y + lam2)
Parameters
----------
lmbda : float or None, optional (default=None)
The lambda value for the Box-Cox transformation, if known. If not
specified, it will be estimated via MLE.
lmbda2 : float, optional (default=0.)
The value to add to ``y`` to make it non-negative. If, after adding
``lmbda2``, there are still negative values, a ValueError will be
raised.
neg_action : str, optional (default="raise")
How to respond if any values in ``y <= 0`` after adding ``lmbda2``.
One of ('raise', 'warn', 'ignore'). If anything other than 'raise',
values <= 0 will be truncated to the value of ``floor``.
floor : float, optional (default=1e-16)
A positive value that truncate values to if there are values in ``y``
that are zero or negative and ``neg_action`` is not 'raise'. Note that
if values are truncated, invertibility will not be preserved, and the
transformed array may not be perfectly inverse-transformed.
"""
def __init__(self, lmbda=None, lmbda2=0, neg_action="raise", floor=1e-16):
self.lmbda = lmbda
self.lmbda2 = lmbda2
self.neg_action = neg_action
self.floor = floor
def fit(self, y, X=None, **kwargs): # TODO: kwargs go away
"""Fit the transformer
Learns the value of ``lmbda``, if not specified in the constructor.
If defined in the constructor, is not re-learned.
Parameters
----------
y : array-like or None, shape=(n_samples,)
The endogenous (time-series) array.
X : array-like or None, shape=(n_samples, n_features), optional
The exogenous array of additional covariates. Not used for
endogenous transformers. Default is None, and non-None values will
serve as pass-through arrays.
"""
lam1 = self.lmbda
lam2 = self.lmbda2
# Temporary shim until we remove `exogenous` support completely
X, _ = pm_compat.get_X(X, **kwargs)
if lam2 < 0:
raise ValueError("lmbda2 must be a non-negative scalar value")
if lam1 is None:
y, _ = self._check_y_X(y, X)
_, lam1 = stats.boxcox(y + lam2, lmbda=None, alpha=None)
self.lam1_ = lam1
self.lam2_ = lam2
return self
def transform(self, y, X=None, **kwargs):
"""Transform the new array
Apply the Box-Cox transformation to the array after learning the
lambda parameter.
Parameters
----------
y : array-like or None, shape=(n_samples,)
The endogenous (time-series) array.
X : array-like or None, shape=(n_samples, n_features), optional
The exogenous array of additional covariates. Not used for
endogenous transformers. Default is None, and non-None values will
serve as pass-through arrays.
Returns
-------
y_transform : array-like or None
The Box-Cox transformed y array
X : array-like or None
The X array
"""
check_is_fitted(self, "lam1_")
# Temporary shim until we remove `exogenous` support completely
X, _ = pm_compat.get_X(X, **kwargs)
lam1 = self.lam1_
lam2 = self.lam2_
y, exog = self._check_y_X(y, X)
y += lam2
neg_mask = y <= 0.
if neg_mask.any():
action = self.neg_action
msg = "Negative or zero values present in y"
if action == "raise":
raise ValueError(msg)
elif action == "warn":
warnings.warn(msg, UserWarning)
y[neg_mask] = self.floor
if lam1 == 0:
return np.log(y), exog
return (y ** lam1 - 1) / lam1, exog
def inverse_transform(self, y, X=None, **kwargs): # TODO: kwargs go away
"""Inverse transform a transformed array
Inverse the Box-Cox transformation on the transformed array. Note that
if truncation happened in the ``transform`` method, invertibility will
not be preserved, and the transformed array may not be perfectly
inverse-transformed.
Parameters
----------
y : array-like or None, shape=(n_samples,)
The transformed endogenous (time-series) array.
X : array-like or None, shape=(n_samples, n_features), optional
The exogenous array of additional covariates. Not used for
endogenous transformers. Default is None, and non-None values will
serve as pass-through arrays.
Returns
-------
y : array-like or None
The inverse-transformed y array
X : array-like or None
The inverse-transformed X array
"""
check_is_fitted(self, "lam1_")
# Temporary shim until we remove `exogenous` support completely
X, _ = pm_compat.get_X(X, **kwargs)
lam1 = self.lam1_
lam2 = self.lam2_
y, exog = self._check_y_X(y, X)
if lam1 == 0:
return np.exp(y) - lam2, exog
numer = y * lam1 # remove denominator
numer += 1. # add 1 back to it
de_exp = numer ** (1. / lam1) # de-exponentiate
return de_exp - lam2, exog
| 2.453125 | 2 |
backend/src/baserow/api/user/registries.py | ashishdhngr/baserow | 1 | 2104 | <gh_stars>1-10
from baserow.core.registry import Instance, Registry
class UserDataType(Instance):
"""
The user data type can be used to inject an additional payload to the API
JWT response. This is the response when a user authenticates or refreshes his
token. The returned dict of the `get_user_data` method is added to the payload
under the key containing the type name.
Example:
class TestUserDataType(UserDataType):
type = "test"
def get_user_data(user, request):
return {"test": "value"}
user_data_registry.register(TestUserDataType())
Will result into the following response when the user authenticates:
{
"token": "<PASSWORD>....",
"user: {
"id": 1,
...
},
"test": {
"test": "value"
}
}
"""
def get_user_data(self, user, request) -> dict:
"""
Should return a dict containing the additional information that must be added
to the response payload after the user authenticates.
:param user: The related user that just authenticated.
:type user: User
:param request: The request when the user authenticated.
:type request: Request
:return: a dict containing the user data that must be added to the response.
"""
raise NotImplementedError(
"The get_user_data must be implemented and should return a dict."
)
class UserDataRegistry(Registry):
name = "api_user_data"
def get_all_user_data(self, user, request) -> dict:
"""
Collects the additional user data of all the registered user data type
instances.
:param user: The user that just authenticated.
:type user: User
:param request: The request when the user authenticated.
:type request: Request
:return: a dict containing all additional user data payload for all the
registered instances.
"""
return {
key: value.get_user_data(user, request)
for key, value in self.registry.items()
}
user_data_registry = UserDataRegistry()
| 3.34375 | 3 |
Week 2/code.py | aklsh/EE2703 | 0 | 2105 | '''
-------------------------------------
Assignment 2 - EE2703 (Jan-May 2020)
Done by <NAME> (EE18B122)
Created on 18/01/20
Last Modified on 04/02/20
-------------------------------------
'''
# importing necessary libraries
import sys
import cmath
import numpy as np
import pandas as pd
# To improve readability
CIRCUIT_START = ".circuit"
CIRCUIT_END = ".end"
RESISTOR = "R"
CAPACITOR = "C"
INDUCTOR = "L"
IVS = "V"
ICS = "I"
VCVS = "E"
VCCS = "G"
CCVS = "H"
CCCS = "F"
PI = np.pi
# Classes for each circuit component
class resistor:
def __init__(self, name, n1, n2, val):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
class inductor:
def __init__(self, name, n1, n2, val):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
class capacitor:
def __init__(self, name, n1, n2, val):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
class voltageSource:
def __init__(self, name, n1, n2, val, phase=0):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
self.phase = float(phase)
class currentSource:
def __init__(self, name, n1, n2, val, phase=0):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
self.phase = float(phase)
class vcvs:
def __init__(self, name, n1, n2, n3, n4, val):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
self.node3 = n3
self.node4 = n4
class vccs:
def __init__(self, name, n1, n2, n3, n4, val):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
self.node3 = n3
self.node4 = n4
class ccvs:
def __init__(self, name, n1, n2, vName, val):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
self.vSource = vName
class cccs:
def __init__(self, name, n1, n2, vName, val):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
self.vSource = vName
# Convert a number in engineer's format to math
def enggToMath(enggNumber):
try:
return float(enggNumber)
except:
lenEnggNumber = len(enggNumber)
# Kilo
if enggNumber[lenEnggNumber-1] == 'k':
base = int(enggNumber[0:lenEnggNumber-1])
return base*1e3
# Milli
elif enggNumber[lenEnggNumber-1] == 'm':
base = int(enggNumber[0:lenEnggNumber-1])
return base*1e-3
# Micro
elif enggNumber[lenEnggNumber-1] == 'u':
base = int(enggNumber[0:lenEnggNumber-1])
return base*1e-6
# Nano
elif enggNumber[lenEnggNumber-1] == 'n':
base = int(enggNumber[0:lenEnggNumber-1])
return base*1e-9
# Mega
elif enggNumber[lenEnggNumber-1] == 'M':
base = int(enggNumber[0:lenEnggNumber-1])
return base*1e6
else:
sys.exit("Please check the component values given. Supported engineer units are: M, k, m, u, n\nYou can also enter values in exponential format (eg. 1e3 = 1000).")
if __name__ == "__main__":
# checking number of command line arguments
if len(sys.argv)!=2 :
sys.exit("Invalid number of arguments!")
else:
try:
circuitFile = sys.argv[1]
circuitFreq = 1e-100
circuitComponents = { RESISTOR: [], CAPACITOR: [], INDUCTOR: [], IVS: [], ICS: [], VCVS: [], VCCS: [], CCVS: [], CCCS: [] }
circuitNodes = []
# checking if given netlist file is of correct type
if (not circuitFile.endswith(".netlist")):
print("Wrong file type!")
else:
netlistFileLines = []
with open (circuitFile, "r") as f:
for line in f.readlines():
netlistFileLines.append(line.split('#')[0].split('\n')[0])
# Getting frequency, if any
if(line[:3] == '.ac'):
circuitFreq = float(line.split()[2])
# Setting Angular Frequency w
w = 2*PI*circuitFreq
try:
# Finding the location of the identifiers
identifier1 = netlistFileLines.index(CIRCUIT_START)
identifier2 = netlistFileLines.index(CIRCUIT_END)
circuitBody = netlistFileLines[identifier1+1:identifier2]
for line in circuitBody:
# Extracting the data from the line
lineTokens = line.split()
# Appending new nodes to a list
try:
if lineTokens[1] not in circuitNodes:
circuitNodes.append(lineTokens[1])
if lineTokens[2] not in circuitNodes:
circuitNodes.append(lineTokens[2])
except IndexError:
continue
# Resistor
if lineTokens[0][0] == RESISTOR:
circuitComponents[RESISTOR].append(resistor(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3]))
# Capacitor
elif lineTokens[0][0] == CAPACITOR:
circuitComponents[CAPACITOR].append(capacitor(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3]))
# Inductor
elif lineTokens[0][0] == INDUCTOR:
circuitComponents[INDUCTOR].append(inductor(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3]))
# Voltage Source
elif lineTokens[0][0] == IVS:
if len(lineTokens) == 5: # DC Source
circuitComponents[IVS].append(voltageSource(lineTokens[0], lineTokens[1], lineTokens[2], float(lineTokens[4])))
elif len(lineTokens) == 6: # AC Source
if circuitFreq == 1e-100:
sys.exit("Frequency of AC Source not specified!!")
circuitComponents[IVS].append(voltageSource(lineTokens[0], lineTokens[1], lineTokens[2], float(lineTokens[4])/2, lineTokens[5]))
# Current Source
elif lineTokens[0][0] == ICS:
if len(lineTokens) == 5: # DC Source
circuitComponents[ICS].append(currentSource(lineTokens[0], lineTokens[1], lineTokens[2], float(lineTokens[4])))
elif len(lineTokens) == 6: # AC Source
if circuitFreq == 1e-100:
sys.exit("Frequency of AC Source not specified!!")
circuitComponents[ICS].append(currentSource(lineTokens[0], lineTokens[1], lineTokens[2], float(lineTokens[4])/2, lineTokens[5]))
# VCVS
elif lineTokens[0][0] == VCVS:
circuitComponents[VCVS].append(vcvs(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3], lineTokens[4], lineTokens[5]))
# VCCS
elif lineTokens[0][0] == VCCS:
circuitComponents[VCCS].append(vcvs(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3], lineTokens[4], lineTokens[5]))
# CCVS
elif lineTokens[0][0] == CCVS:
circuitComponents[CCVS].append(ccvs(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3], lineTokens[4]))
# CCCS
elif lineTokens[0][0] == CCCS:
circuitComponents[CCCS].append(cccs(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3], lineTokens[4]))
# Erroneous Component Name
else:
sys.exit("Wrong Component Given. ABORT!")
try:
circuitNodes.remove('GND')
circuitNodes = ['GND'] + circuitNodes
except:
sys.exit("No ground node specified in the circuit!!")
# Creating a dictionary with node names and their numbers (to reduce the time taken by later parts of the program)
nodeNumbers = {circuitNodes[i]:i for i in range(len(circuitNodes))}
numNodes = len(circuitNodes)
numVS = len(circuitComponents[IVS])+len(circuitComponents[VCVS])+len(circuitComponents[CCVS])
# Creating Matrices M and b
matrixM = np.zeros((numNodes+numVS, numNodes+numVS), np.complex)
matrixB = np.zeros((numNodes+numVS,), np.complex)
# GND Equation
matrixM[0][0] = 1.0
# Resistor Equations
for r in circuitComponents[RESISTOR]:
if r.node1 != 'GND':
matrixM[nodeNumbers[r.node1]][nodeNumbers[r.node1]] += 1/r.value
matrixM[nodeNumbers[r.node1]][nodeNumbers[r.node2]] -= 1/r.value
if r.node2 != 'GND':
matrixM[nodeNumbers[r.node2]][nodeNumbers[r.node1]] -= 1/r.value
matrixM[nodeNumbers[r.node2]][nodeNumbers[r.node2]] += 1/r.value
# Capacitor Equations
for c in circuitComponents[CAPACITOR]:
if c.node1 != 'GND':
matrixM[nodeNumbers[c.node1]][nodeNumbers[c.node1]] += complex(0, w*c.value)
matrixM[nodeNumbers[c.node1]][nodeNumbers[c.node2]] -= complex(0, w*c.value)
if c.node2 != 'GND':
matrixM[nodeNumbers[c.node2]][nodeNumbers[c.node1]] -= complex(0, w*c.value)
matrixM[nodeNumbers[c.node2]][nodeNumbers[c.node2]] += complex(0, w*c.value)
# Inductor Equations
for l in circuitComponents[INDUCTOR]:
if l.node1 != 'GND':
matrixM[nodeNumbers[l.node1]][nodeNumbers[l.node1]] += complex(0, -1.0/(w*l.value))
matrixM[nodeNumbers[l.node1]][nodeNumbers[l.node2]] -= complex(0, -1.0/(w*l.value))
if l.node2 != 'GND':
matrixM[nodeNumbers[l.node2]][nodeNumbers[l.node1]] -= complex(0, -1.0/(w*l.value))
matrixM[nodeNumbers[l.node2]][nodeNumbers[l.node2]] += complex(0, -1.0/(w*l.value))
# Voltage Source Equations
for i in range(len(circuitComponents[IVS])):
# Equation accounting for current through the source
if circuitComponents[IVS][i].node1 != 'GND':
matrixM[nodeNumbers[circuitComponents[IVS][i].node1]][numNodes+i] = 1.0
if circuitComponents[IVS][i].node2 != 'GND':
matrixM[nodeNumbers[circuitComponents[IVS][i].node2]][numNodes+i] = -1.0
# Auxiliary Equations
matrixM[numNodes+i][nodeNumbers[circuitComponents[IVS][i].node1]] = -1.0
matrixM[numNodes+i][nodeNumbers[circuitComponents[IVS][i].node2]] = +1.0
matrixB[numNodes+i] = cmath.rect(circuitComponents[IVS][i].value, circuitComponents[IVS][i].phase*PI/180)
# Current Source Equations
for i in circuitComponents[ICS]:
if i.node1 != 'GND':
matrixB[nodeNumbers[i.node1]] = -1*i.value
if i.node2 != 'GND':
matrixB[nodeNumbers[i.node2]] = i.value
# VCVS Equations
for i in range(len(circuitComponents[VCVS])):
# Equation accounting for current through the source
if circuitComponents[VCVS][i].node1 != 'GND':
matrixM[nodeNumbers[circuitComponents[VCVS][i].node1]][numNodes+len(circuitComponents[IVS])+i] = 1.0
if circuitComponents[VCVS][i].node2 != 'GND':
matrixM[nodeNumbers[circuitComponents[VCVS][i].node2]][numNodes+len(circuitComponents[IVS])+i] = -1.0
matrixM[numNodes+len(circuitComponents[IVS])+i][nodeNumbers[circuitComponents[VCVS][i].node1]] = 1.0
matrixM[numNodes+len(circuitComponents[IVS])+i][nodeNumbers[circuitComponents[VCVS][i].node2]] = -1.0
matrixM[numNodes+len(circuitComponents[IVS])+i][nodeNumbers[circuitComponents[VCVS][i].node3]] = -1.0*circuitComponents[VCVS][i].value
matrixM[numNodes+len(circuitComponents[IVS])+i][nodeNumbers[circuitComponents[VCVS][i].node4]] = 1.0*circuitComponents[VCVS][i].value
# CCVS Equations
for i in range(len(circuitComponents[CCVS])):
# Equation accounting for current through the source
if circuitComponents[VCVS][i].node1 != 'GND':
matrixM[nodeNumbers[circuitComponents[CCVS][i].node1]][numNodes+len(circuitComponents[IVS])+len(circuitComponents[VCVS])+i] = 1.0
if circuitComponents[VCVS][i].node2 != 'GND':
matrixM[nodeNumbers[circuitComponents[VCVS][i].node2]][numNodes+len(circuitComponents[IVS])+len(circuitComponents[VCVS])+i] = -1.0
matrixM[numNodes+len(circuitComponents[IVS])+len(circuitComponents[VCVS])+i][nodeNumbers[circuitComponents[CCVS][i].node1]] = 1.0
matrixM[numNodes+len(circuitComponents[IVS])+len(circuitComponents[VCVS])+i][nodeNumbers[circuitComponents[CCVS][i].node2]] = -1.0
matrixM[numNodes+len(circuitComponents[IVS])+len(circuitComponents[VCVS])+i][numNodes+len(circuitComponents[IVS])+len(circuitComponents[VCVS])+i] = -1.0*circuitComponents[CCVS][i].value
# VCCS Equations
for vccs in circuitComponents[VCCS]:
if vccs.node1 != 'GND':
matrixM[nodeNumbers[vccs.node1]][nodeNumbers[vccs.node4]]+=vccs.value
matrixM[nodeNumbers[vccs.node1]][nodeNumbers[vccs.node3]]-=vccs.value
if vccs.node2 != 'GND':
matrixM[nodeNumbers[vccs.node2]][nodeNumbers[vccs.node4]]-=vccs.value
matrixM[nodeNumbers[vccs.node3]][nodeNumbers[vccs.node3]]+=vccs.value
# CCCS Equations
for cccs in circuitComponents[CCCS]:
def getIndexIVS(vName):
for i in range(len(circuitComponents[IVS])):
if circuitComponents[IVS][i].name == vName:
return i
if cccs.node1 != 'GND':
matrixM[nodeNumbers[cccs.node1]][numNodes+getIndexIVS(cccs.vSource)]-=cccs.value
if cccs.node2 != 'GND':
matrixM[nodeNumbers[cccs.node2]][numNodes+getIndexIVS(cccs.vSource)]+=cccs.value
try:
x = np.linalg.solve(matrixM, matrixB)
circuitCurrents = []
# Formatting Output Data
for v in circuitComponents[IVS]:
circuitCurrents.append("current in "+v.name)
for v in circuitComponents[VCVS]:
circuitCurrents.append("current in "+v.name)
for v in circuitComponents[CCVS]:
circuitCurrents.append("current in "+v.name)
# Printing output in table format
print(pd.DataFrame(x, circuitNodes+circuitCurrents, columns=['Voltage / Current']))
print("The values given above are AMPLITUDE values and NOT RMS values.")
except np.linalg.LinAlgError:
sys.exit("Singular Matrix Formed! Please check if you have entered the circuit definition correctly!")
except ValueError:
sys.exit("Netlist does not abide to given format!")
except FileNotFoundError:
sys.exit("Given file does not exist!")
| 3.21875 | 3 |
Lib/Co.py | M507/Guessing-passwords-using-machine-learning | 6 | 2106 | <gh_stars>1-10
import subprocess
import os.path
"""
Stylish input()
"""
def s_input(string):
return input(string+">").strip("\n")
"""
Execute command locally
"""
def execute_command(command):
if len(command) > 0:
print(command)
proc = subprocess.Popen(command.split(" "), stdout=subprocess.PIPE, cwd="/tmp")
return proc
"""
Get all subdirectories of a directory.
"""
def getSubs(dirname):
dirs = [d for d in os.listdir(dirname) if os.path.isdir(os.path.join(dirname, d))]
# subdirectories = [dirname + "/" + subDirName for subDirName in subdirectories]
subdirectories = []
for dir in dirs:
subdirectories.append(dirname + '/' + dir)
return subdirectories
"""
Rocket science
"""
def answer(string):
a = input(string)
if a == "Y" or a == 'y' or a == 'Yes' or a == 'yes':
return True
else:
return False
| 3.0625 | 3 |
project3_code/part_0/main.py | rachelbrown347/CS294-26_code | 1 | 2107 | <gh_stars>1-10
import numpy as np
import matplotlib.pyplot as plt
from skimage.exposure import rescale_intensity
from unsharp import *
# Load file and normalize to 0-1
fname = 'iguana.jpg'
im = plt.imread(fname)
if im.mean() >= 1:
im = im/255.
sigma = 5
amplitude = 1.5
imsharp = unsharp_mask(im, sigma, amplitude)
imsharp = rescale_intensity(imsharp, in_range=(0, 1), out_range=(0,1))
new_fname = fname[:-4]+'_sharp.jpg'
plt.imsave(new_fname, imsharp) | 2.203125 | 2 |
meregistro/apps/registro/models/EstablecimientoDomicilio.py | MERegistro/meregistro | 0 | 2108 | <reponame>MERegistro/meregistro
# -*- coding: utf-8 -*-
from django.db import models
from apps.registro.models.TipoDomicilio import TipoDomicilio
from apps.registro.models.Localidad import Localidad
from apps.registro.models.Establecimiento import Establecimiento
from django.core.exceptions import ValidationError
from apps.seguridad.audit import audit
@audit
class EstablecimientoDomicilio(models.Model):
TIPO_POSTAL = 'Postal'
TIPO_INSTITUCIONAL = 'Institucional'
establecimiento = models.ForeignKey(Establecimiento, related_name='domicilios')
tipo_domicilio = models.ForeignKey(TipoDomicilio)
localidad = models.ForeignKey(Localidad, related_name='domicilios_establecimientos')
calle = models.CharField(max_length=100)
altura = models.CharField(max_length=15)
referencia = models.CharField(max_length=255, null=True, blank=True)
cp = models.CharField(max_length=20)
class Meta:
app_label = 'registro'
db_table = 'registro_establecimiento_domicilio'
def __unicode__(self):
if self.cp:
cp = " (CP: " + self.cp + ")"
else:
cp = ""
return "%s %s - %s %s" % (self.calle, self.altura, self.localidad.nombre, cp)
def __init__(self, *args, **kwargs):
super(EstablecimientoDomicilio, self).__init__(*args, **kwargs)
| 1.8125 | 2 |
python_for_everybody/py2_p4i_old/6.5findslicestringextract.py | timothyyu/p4e-prac | 0 | 2109 | <gh_stars>0
# 6.5 Write code using find() and string slicing (see section 6.10) to extract
# the number at the end of the line below.
# Convert the extracted value to a floating point number and print it out.
text = "X-DSPAM-Confidence: 0.8475";
pos = text.find(':')
text = float(text[pos+1:])
print text | 3.53125 | 4 |
tools/lucid/engine.py | Petr-By/qtpyvis | 3 | 2110 | import logging
logger = logging.getLogger(__name__)
print(f"!!!!!!!!!! getEffectiveLevel: {logger.getEffectiveLevel()} !!!!!!!!!!!!!")
from dltb.base.observer import Observable, change
from network import Network, loader
from network.lucid import Network as LucidNetwork
# lucid.modelzoo.vision_models:
# A module providinge the pretrained networks by name, e.g.
# models.AlexNet
import lucid.modelzoo.vision_models as models
import lucid.modelzoo.nets_factory as nets
from lucid.modelzoo.vision_base import Model as LucidModel
import lucid.optvis.objectives as objectives
import lucid.optvis.param as param
import lucid.optvis.render as render
import lucid.optvis.transform as transform
class Engine(Observable, method='engine_changed',
changes={'engine_changed', 'model_changed', 'unit_changed'}):
"""The Engine is a wrapper around the lucid module.
Attributes
----------
_network: LucidNetwork
The currently selected lucid network. None if no model
is selected.
_model: LucidModel
The currently selected lucid model. None if no model is
selected.
"""
def __init__(self):
super().__init__()
self._network = None
self._model = None
self._layer = None
self._unit = None
self.image = None
self.running = False
@property
def model(self) -> LucidModel:
"""The currently selected lucid model. None if no model is
selected.
"""
return self._model
@property
def model_name(self) -> str:
"""The name of the currently selected lucid model. None if
no model is selected.
"""
return None if self._network is None else self._network.name
@change
def load_model(self, name: str) -> LucidModel:
"""Load the Lucid model with the given name.
Returns
-------
model: LucidModel
A reference to the LucidModel.
"""
logger.info(f"load_model({name})")
try:
#self._network = LucidNetwork(name=name)
self._network = loader.load_lucid(name)
self._model = self._network.model
except KeyError as e:
self._network = None
self._model = None
logger.info(f"NAME={name}/{self.model_name} : {self._model}")
self._layer = None
self._unit = None
self.change(model_changed=True, unit_changed=True)
return self._model
@change
def set_layer(self, name: str, unit: int=0) -> None:
"""Set the currently selected layer.
Arguments
---------
name: str
The name of the layer.
unit: int
The index of the unit in the layer.
"""
if name == self.layer:
return
if self._model is None:
return
try:
self._layer = next(x for x in self._model.layers
if x['name'] == name)
self._unit = unit
except StopIteration: # name not in layer list
self._layer = None
self._unit = None
self.change(unit_changed=True)
@property
def layer(self) -> str:
"""The name of the currently selected layer.
"""
return None if self._layer is None else self._layer['name']
@layer.setter
def layer(self, name: str) -> None:
"""Set the currently selected layer.
"""
self.set_layer(name)
@property
def layer_type(self) -> str:
"""The type of the currently selected layer.
"""
return None if self._layer is None else self._layer['type']
@property
def layer_units(self) -> int:
"""The number of units in the currently selected layer.
"""
return None if self._layer is None else self._layer['size']
@change
def _set_unit(self, unit: int) -> None:
if unit == self.unit:
return
if unit is None:
self._unit = None
self.change(unit_changed=True)
elif self._layer is None:
raise ValueError('Setting unit failed as no layer is selected')
elif not 0 <= unit < self._layer['size']:
raise ValueError(f"Invalid unit {unit} for current layer"
f" of size {self._layer['size']}")
else:
self._unit = unit
self.change(unit_changed=True)
@property
def unit(self) -> int:
"""The index of the currently selected unit or None if no
unit is selected.
"""
return None if self._unit is None else self._unit
@unit.setter
def unit(self, unit: int) -> None:
"""The index of the currently selected unit or None if no
unit is selected.
"""
self._set_unit(unit)
@property
def layer_id(self) -> str:
"""The id of the currently selected layer or None if no
unit is selected.
"""
if self._layer is None:
return None
if self._layer['type'] == 'conv':
return self._layer['name'] + '_pre_relu'
return self._layer['name']
@property
def unit_id(self) -> str:
"""The id of the currently selected unit or None if no
unit is selected.
"""
return (None if self._layer is None
else self.layer_id + ':' + str(self._unit))
def _doRun(self, running: bool=True) -> None:
self.running = running
self.notify_observers(EngineChange(engine_changed=True))
def start(self):
self.image = None
self._doRun(True)
obj = objectives.channel(self.layer_id, self.unit)
self.image = render.render_vis(self.model, obj)
#self.image = render.render_vis(self.model, self.unit_id)
self._doRun(False)
def stop(self):
self._doRun(False)
def start_multi(self):
self.image = None
self._doRun(True)
logger.info("!!! running all:")
for unit in range(self.layer_units):
self.unit = unit
self.notify_observers(EngineChange(unit_changed=True))
logger.info(f"!!! running unit {unit}")
obj = objectives.channel(self.layer_id, unit)
self.image = render.render_vis(self.model, obj)
if not self.running:
break
self._doRun(True)
self._doRun(False)
# FIXME[old]: this is too make old code happy. New code should use
# Engine.Change and Engine.Observer directly.
EngineChange = Engine.Change
EngineObserver = Engine.Observer
| 2.328125 | 2 |
synapse/storage/events.py | natamelo/synapse | 0 | 2111 | # -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2018-2019 New Vector Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import logging
from collections import Counter as c_counter, OrderedDict, deque, namedtuple
from functools import wraps
from six import iteritems, text_type
from six.moves import range
from canonicaljson import json
from prometheus_client import Counter, Histogram
from twisted.internet import defer
import synapse.metrics
from synapse.api.constants import EventTypes
from synapse.api.errors import SynapseError
from synapse.events import EventBase # noqa: F401
from synapse.events.snapshot import EventContext # noqa: F401
from synapse.metrics import BucketCollector
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.state import StateResolutionStore
from synapse.storage.background_updates import BackgroundUpdateStore
from synapse.storage.event_federation import EventFederationStore
from synapse.storage.events_worker import EventsWorkerStore
from synapse.storage.state import StateGroupWorkerStore
from synapse.types import RoomStreamToken, get_domain_from_id
from synapse.util import batch_iter
from synapse.util.async_helpers import ObservableDeferred
from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
from synapse.util.frozenutils import frozendict_json_encoder
from synapse.util.logcontext import PreserveLoggingContext, make_deferred_yieldable
from synapse.util.logutils import log_function
from synapse.util.metrics import Measure
logger = logging.getLogger(__name__)
persist_event_counter = Counter("synapse_storage_events_persisted_events", "")
event_counter = Counter(
"synapse_storage_events_persisted_events_sep",
"",
["type", "origin_type", "origin_entity"],
)
# The number of times we are recalculating the current state
state_delta_counter = Counter("synapse_storage_events_state_delta", "")
# The number of times we are recalculating state when there is only a
# single forward extremity
state_delta_single_event_counter = Counter(
"synapse_storage_events_state_delta_single_event", ""
)
# The number of times we are reculating state when we could have resonably
# calculated the delta when we calculated the state for an event we were
# persisting.
state_delta_reuse_delta_counter = Counter(
"synapse_storage_events_state_delta_reuse_delta", ""
)
# The number of forward extremities for each new event.
forward_extremities_counter = Histogram(
"synapse_storage_events_forward_extremities_persisted",
"Number of forward extremities for each new event",
buckets=(1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500, "+Inf"),
)
# The number of stale forward extremities for each new event. Stale extremities
# are those that were in the previous set of extremities as well as the new.
stale_forward_extremities_counter = Histogram(
"synapse_storage_events_stale_forward_extremities_persisted",
"Number of unchanged forward extremities for each new event",
buckets=(0, 1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500, "+Inf"),
)
def encode_json(json_object):
"""
Encode a Python object as JSON and return it in a Unicode string.
"""
out = frozendict_json_encoder.encode(json_object)
if isinstance(out, bytes):
out = out.decode("utf8")
return out
class _EventPeristenceQueue(object):
"""Queues up events so that they can be persisted in bulk with only one
concurrent transaction per room.
"""
_EventPersistQueueItem = namedtuple(
"_EventPersistQueueItem", ("events_and_contexts", "backfilled", "deferred")
)
def __init__(self):
self._event_persist_queues = {}
self._currently_persisting_rooms = set()
def add_to_queue(self, room_id, events_and_contexts, backfilled):
"""Add events to the queue, with the given persist_event options.
NB: due to the normal usage pattern of this method, it does *not*
follow the synapse logcontext rules, and leaves the logcontext in
place whether or not the returned deferred is ready.
Args:
room_id (str):
events_and_contexts (list[(EventBase, EventContext)]):
backfilled (bool):
Returns:
defer.Deferred: a deferred which will resolve once the events are
persisted. Runs its callbacks *without* a logcontext.
"""
queue = self._event_persist_queues.setdefault(room_id, deque())
if queue:
# if the last item in the queue has the same `backfilled` setting,
# we can just add these new events to that item.
end_item = queue[-1]
if end_item.backfilled == backfilled:
end_item.events_and_contexts.extend(events_and_contexts)
return end_item.deferred.observe()
deferred = ObservableDeferred(defer.Deferred(), consumeErrors=True)
queue.append(
self._EventPersistQueueItem(
events_and_contexts=events_and_contexts,
backfilled=backfilled,
deferred=deferred,
)
)
return deferred.observe()
def handle_queue(self, room_id, per_item_callback):
"""Attempts to handle the queue for a room if not already being handled.
The given callback will be invoked with for each item in the queue,
of type _EventPersistQueueItem. The per_item_callback will continuously
be called with new items, unless the queue becomnes empty. The return
value of the function will be given to the deferreds waiting on the item,
exceptions will be passed to the deferreds as well.
This function should therefore be called whenever anything is added
to the queue.
If another callback is currently handling the queue then it will not be
invoked.
"""
if room_id in self._currently_persisting_rooms:
return
self._currently_persisting_rooms.add(room_id)
@defer.inlineCallbacks
def handle_queue_loop():
try:
queue = self._get_drainining_queue(room_id)
for item in queue:
try:
ret = yield per_item_callback(item)
except Exception:
with PreserveLoggingContext():
item.deferred.errback()
else:
with PreserveLoggingContext():
item.deferred.callback(ret)
finally:
queue = self._event_persist_queues.pop(room_id, None)
if queue:
self._event_persist_queues[room_id] = queue
self._currently_persisting_rooms.discard(room_id)
# set handle_queue_loop off in the background
run_as_background_process("persist_events", handle_queue_loop)
def _get_drainining_queue(self, room_id):
queue = self._event_persist_queues.setdefault(room_id, deque())
try:
while True:
yield queue.popleft()
except IndexError:
# Queue has been drained.
pass
_EventCacheEntry = namedtuple("_EventCacheEntry", ("event", "redacted_event"))
def _retry_on_integrity_error(func):
"""Wraps a database function so that it gets retried on IntegrityError,
with `delete_existing=True` passed in.
Args:
func: function that returns a Deferred and accepts a `delete_existing` arg
"""
@wraps(func)
@defer.inlineCallbacks
def f(self, *args, **kwargs):
try:
res = yield func(self, *args, **kwargs)
except self.database_engine.module.IntegrityError:
logger.exception("IntegrityError, retrying.")
res = yield func(self, *args, delete_existing=True, **kwargs)
defer.returnValue(res)
return f
# inherits from EventFederationStore so that we can call _update_backward_extremities
# and _handle_mult_prev_events (though arguably those could both be moved in here)
class EventsStore(
StateGroupWorkerStore,
EventFederationStore,
EventsWorkerStore,
BackgroundUpdateStore,
):
def __init__(self, db_conn, hs):
super(EventsStore, self).__init__(db_conn, hs)
self._event_persist_queue = _EventPeristenceQueue()
self._state_resolution_handler = hs.get_state_resolution_handler()
# Collect metrics on the number of forward extremities that exist.
# Counter of number of extremities to count
self._current_forward_extremities_amount = c_counter()
BucketCollector(
"synapse_forward_extremities",
lambda: self._current_forward_extremities_amount,
buckets=[1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500, "+Inf"],
)
# Read the extrems every 60 minutes
def read_forward_extremities():
# run as a background process to make sure that the database transactions
# have a logcontext to report to
return run_as_background_process(
"read_forward_extremities", self._read_forward_extremities
)
hs.get_clock().looping_call(read_forward_extremities, 60 * 60 * 1000)
@defer.inlineCallbacks
def _read_forward_extremities(self):
def fetch(txn):
txn.execute(
"""
select count(*) c from event_forward_extremities
group by room_id
"""
)
return txn.fetchall()
res = yield self.runInteraction("read_forward_extremities", fetch)
self._current_forward_extremities_amount = c_counter(list(x[0] for x in res))
@defer.inlineCallbacks
def persist_events(self, events_and_contexts, backfilled=False):
"""
Write events to the database
Args:
events_and_contexts: list of tuples of (event, context)
backfilled (bool): Whether the results are retrieved from federation
via backfill or not. Used to determine if they're "new" events
which might update the current state etc.
Returns:
Deferred[int]: the stream ordering of the latest persisted event
"""
partitioned = {}
for event, ctx in events_and_contexts:
partitioned.setdefault(event.room_id, []).append((event, ctx))
deferreds = []
for room_id, evs_ctxs in iteritems(partitioned):
d = self._event_persist_queue.add_to_queue(
room_id, evs_ctxs, backfilled=backfilled
)
deferreds.append(d)
for room_id in partitioned:
self._maybe_start_persisting(room_id)
yield make_deferred_yieldable(
defer.gatherResults(deferreds, consumeErrors=True)
)
max_persisted_id = yield self._stream_id_gen.get_current_token()
defer.returnValue(max_persisted_id)
@defer.inlineCallbacks
@log_function
def persist_event(self, event, context, backfilled=False):
"""
Args:
event (EventBase):
context (EventContext):
backfilled (bool):
Returns:
Deferred: resolves to (int, int): the stream ordering of ``event``,
and the stream ordering of the latest persisted event
"""
deferred = self._event_persist_queue.add_to_queue(
event.room_id, [(event, context)], backfilled=backfilled
)
self._maybe_start_persisting(event.room_id)
yield make_deferred_yieldable(deferred)
max_persisted_id = yield self._stream_id_gen.get_current_token()
defer.returnValue((event.internal_metadata.stream_ordering, max_persisted_id))
def _maybe_start_persisting(self, room_id):
@defer.inlineCallbacks
def persisting_queue(item):
with Measure(self._clock, "persist_events"):
yield self._persist_events(
item.events_and_contexts, backfilled=item.backfilled
)
self._event_persist_queue.handle_queue(room_id, persisting_queue)
@_retry_on_integrity_error
@defer.inlineCallbacks
def _persist_events(
self, events_and_contexts, backfilled=False, delete_existing=False
):
"""Persist events to db
Args:
events_and_contexts (list[(EventBase, EventContext)]):
backfilled (bool):
delete_existing (bool):
Returns:
Deferred: resolves when the events have been persisted
"""
if not events_and_contexts:
return
if backfilled:
stream_ordering_manager = self._backfill_id_gen.get_next_mult(
len(events_and_contexts)
)
else:
stream_ordering_manager = self._stream_id_gen.get_next_mult(
len(events_and_contexts)
)
with stream_ordering_manager as stream_orderings:
for (event, context), stream in zip(events_and_contexts, stream_orderings):
event.internal_metadata.stream_ordering = stream
chunks = [
events_and_contexts[x : x + 100]
for x in range(0, len(events_and_contexts), 100)
]
for chunk in chunks:
# We can't easily parallelize these since different chunks
# might contain the same event. :(
# NB: Assumes that we are only persisting events for one room
# at a time.
# map room_id->list[event_ids] giving the new forward
# extremities in each room
new_forward_extremeties = {}
# map room_id->(type,state_key)->event_id tracking the full
# state in each room after adding these events.
# This is simply used to prefill the get_current_state_ids
# cache
current_state_for_room = {}
# map room_id->(to_delete, to_insert) where to_delete is a list
# of type/state keys to remove from current state, and to_insert
# is a map (type,key)->event_id giving the state delta in each
# room
state_delta_for_room = {}
if not backfilled:
with Measure(self._clock, "_calculate_state_and_extrem"):
# Work out the new "current state" for each room.
# We do this by working out what the new extremities are and then
# calculating the state from that.
events_by_room = {}
for event, context in chunk:
events_by_room.setdefault(event.room_id, []).append(
(event, context)
)
for room_id, ev_ctx_rm in iteritems(events_by_room):
latest_event_ids = yield self.get_latest_event_ids_in_room(
room_id
)
new_latest_event_ids = yield self._calculate_new_extremities(
room_id, ev_ctx_rm, latest_event_ids
)
latest_event_ids = set(latest_event_ids)
if new_latest_event_ids == latest_event_ids:
# No change in extremities, so no change in state
continue
# there should always be at least one forward extremity.
# (except during the initial persistence of the send_join
# results, in which case there will be no existing
# extremities, so we'll `continue` above and skip this bit.)
assert new_latest_event_ids, "No forward extremities left!"
new_forward_extremeties[room_id] = new_latest_event_ids
len_1 = (
len(latest_event_ids) == 1
and len(new_latest_event_ids) == 1
)
if len_1:
all_single_prev_not_state = all(
len(event.prev_event_ids()) == 1
and not event.is_state()
for event, ctx in ev_ctx_rm
)
# Don't bother calculating state if they're just
# a long chain of single ancestor non-state events.
if all_single_prev_not_state:
continue
state_delta_counter.inc()
if len(new_latest_event_ids) == 1:
state_delta_single_event_counter.inc()
# This is a fairly handwavey check to see if we could
# have guessed what the delta would have been when
# processing one of these events.
# What we're interested in is if the latest extremities
# were the same when we created the event as they are
# now. When this server creates a new event (as opposed
# to receiving it over federation) it will use the
# forward extremities as the prev_events, so we can
# guess this by looking at the prev_events and checking
# if they match the current forward extremities.
for ev, _ in ev_ctx_rm:
prev_event_ids = set(ev.prev_event_ids())
if latest_event_ids == prev_event_ids:
state_delta_reuse_delta_counter.inc()
break
logger.info("Calculating state delta for room %s", room_id)
with Measure(
self._clock, "persist_events.get_new_state_after_events"
):
res = yield self._get_new_state_after_events(
room_id,
ev_ctx_rm,
latest_event_ids,
new_latest_event_ids,
)
current_state, delta_ids = res
# If either are not None then there has been a change,
# and we need to work out the delta (or use that
# given)
if delta_ids is not None:
# If there is a delta we know that we've
# only added or replaced state, never
# removed keys entirely.
state_delta_for_room[room_id] = ([], delta_ids)
elif current_state is not None:
with Measure(
self._clock, "persist_events.calculate_state_delta"
):
delta = yield self._calculate_state_delta(
room_id, current_state
)
state_delta_for_room[room_id] = delta
# If we have the current_state then lets prefill
# the cache with it.
if current_state is not None:
current_state_for_room[room_id] = current_state
yield self.runInteraction(
"persist_events",
self._persist_events_txn,
events_and_contexts=chunk,
backfilled=backfilled,
delete_existing=delete_existing,
state_delta_for_room=state_delta_for_room,
new_forward_extremeties=new_forward_extremeties,
)
persist_event_counter.inc(len(chunk))
if not backfilled:
# backfilled events have negative stream orderings, so we don't
# want to set the event_persisted_position to that.
synapse.metrics.event_persisted_position.set(
chunk[-1][0].internal_metadata.stream_ordering
)
for event, context in chunk:
if context.app_service:
origin_type = "local"
origin_entity = context.app_service.id
elif self.hs.is_mine_id(event.sender):
origin_type = "local"
origin_entity = "*client*"
else:
origin_type = "remote"
origin_entity = get_domain_from_id(event.sender)
event_counter.labels(event.type, origin_type, origin_entity).inc()
for room_id, new_state in iteritems(current_state_for_room):
self.get_current_state_ids.prefill((room_id,), new_state)
for room_id, latest_event_ids in iteritems(new_forward_extremeties):
self.get_latest_event_ids_in_room.prefill(
(room_id,), list(latest_event_ids)
)
@defer.inlineCallbacks
def _calculate_new_extremities(self, room_id, event_contexts, latest_event_ids):
"""Calculates the new forward extremities for a room given events to
persist.
Assumes that we are only persisting events for one room at a time.
"""
# we're only interested in new events which aren't outliers and which aren't
# being rejected.
new_events = [
event
for event, ctx in event_contexts
if not event.internal_metadata.is_outlier()
and not ctx.rejected
and not event.internal_metadata.is_soft_failed()
]
latest_event_ids = set(latest_event_ids)
# start with the existing forward extremities
result = set(latest_event_ids)
# add all the new events to the list
result.update(event.event_id for event in new_events)
# Now remove all events which are prev_events of any of the new events
result.difference_update(
e_id for event in new_events for e_id in event.prev_event_ids()
)
# Remove any events which are prev_events of any existing events.
existing_prevs = yield self._get_events_which_are_prevs(result)
result.difference_update(existing_prevs)
# Finally handle the case where the new events have soft-failed prev
# events. If they do we need to remove them and their prev events,
# otherwise we end up with dangling extremities.
existing_prevs = yield self._get_prevs_before_rejected(
e_id for event in new_events for e_id in event.prev_event_ids()
)
result.difference_update(existing_prevs)
# We only update metrics for events that change forward extremities
# (e.g. we ignore backfill/outliers/etc)
if result != latest_event_ids:
forward_extremities_counter.observe(len(result))
stale = latest_event_ids & result
stale_forward_extremities_counter.observe(len(stale))
defer.returnValue(result)
@defer.inlineCallbacks
def _get_events_which_are_prevs(self, event_ids):
"""Filter the supplied list of event_ids to get those which are prev_events of
existing (non-outlier/rejected) events.
Args:
event_ids (Iterable[str]): event ids to filter
Returns:
Deferred[List[str]]: filtered event ids
"""
results = []
def _get_events_which_are_prevs_txn(txn, batch):
sql = """
SELECT prev_event_id, internal_metadata
FROM event_edges
INNER JOIN events USING (event_id)
LEFT JOIN rejections USING (event_id)
LEFT JOIN event_json USING (event_id)
WHERE
prev_event_id IN (%s)
AND NOT events.outlier
AND rejections.event_id IS NULL
""" % (
",".join("?" for _ in batch),
)
txn.execute(sql, batch)
results.extend(r[0] for r in txn if not json.loads(r[1]).get("soft_failed"))
for chunk in batch_iter(event_ids, 100):
yield self.runInteraction(
"_get_events_which_are_prevs", _get_events_which_are_prevs_txn, chunk
)
defer.returnValue(results)
@defer.inlineCallbacks
def _get_prevs_before_rejected(self, event_ids):
"""Get soft-failed ancestors to remove from the extremities.
Given a set of events, find all those that have been soft-failed or
rejected. Returns those soft failed/rejected events and their prev
events (whether soft-failed/rejected or not), and recurses up the
prev-event graph until it finds no more soft-failed/rejected events.
This is used to find extremities that are ancestors of new events, but
are separated by soft failed events.
Args:
event_ids (Iterable[str]): Events to find prev events for. Note
that these must have already been persisted.
Returns:
Deferred[set[str]]
"""
# The set of event_ids to return. This includes all soft-failed events
# and their prev events.
existing_prevs = set()
def _get_prevs_before_rejected_txn(txn, batch):
to_recursively_check = batch
while to_recursively_check:
sql = """
SELECT
event_id, prev_event_id, internal_metadata,
rejections.event_id IS NOT NULL
FROM event_edges
INNER JOIN events USING (event_id)
LEFT JOIN rejections USING (event_id)
LEFT JOIN event_json USING (event_id)
WHERE
event_id IN (%s)
AND NOT events.outlier
""" % (
",".join("?" for _ in to_recursively_check),
)
txn.execute(sql, to_recursively_check)
to_recursively_check = []
for event_id, prev_event_id, metadata, rejected in txn:
if prev_event_id in existing_prevs:
continue
soft_failed = json.loads(metadata).get("soft_failed")
if soft_failed or rejected:
to_recursively_check.append(prev_event_id)
existing_prevs.add(prev_event_id)
for chunk in batch_iter(event_ids, 100):
yield self.runInteraction(
"_get_prevs_before_rejected", _get_prevs_before_rejected_txn, chunk
)
defer.returnValue(existing_prevs)
@defer.inlineCallbacks
def _get_new_state_after_events(
self, room_id, events_context, old_latest_event_ids, new_latest_event_ids
):
"""Calculate the current state dict after adding some new events to
a room
Args:
room_id (str):
room to which the events are being added. Used for logging etc
events_context (list[(EventBase, EventContext)]):
events and contexts which are being added to the room
old_latest_event_ids (iterable[str]):
the old forward extremities for the room.
new_latest_event_ids (iterable[str]):
the new forward extremities for the room.
Returns:
Deferred[tuple[dict[(str,str), str]|None, dict[(str,str), str]|None]]:
Returns a tuple of two state maps, the first being the full new current
state and the second being the delta to the existing current state.
If both are None then there has been no change.
If there has been a change then we only return the delta if its
already been calculated. Conversely if we do know the delta then
the new current state is only returned if we've already calculated
it.
"""
# map from state_group to ((type, key) -> event_id) state map
state_groups_map = {}
# Map from (prev state group, new state group) -> delta state dict
state_group_deltas = {}
for ev, ctx in events_context:
if ctx.state_group is None:
# This should only happen for outlier events.
if not ev.internal_metadata.is_outlier():
raise Exception(
"Context for new event %s has no state "
"group" % (ev.event_id,)
)
continue
if ctx.state_group in state_groups_map:
continue
# We're only interested in pulling out state that has already
# been cached in the context. We'll pull stuff out of the DB later
# if necessary.
current_state_ids = ctx.get_cached_current_state_ids()
if current_state_ids is not None:
state_groups_map[ctx.state_group] = current_state_ids
if ctx.prev_group:
state_group_deltas[(ctx.prev_group, ctx.state_group)] = ctx.delta_ids
# We need to map the event_ids to their state groups. First, let's
# check if the event is one we're persisting, in which case we can
# pull the state group from its context.
# Otherwise we need to pull the state group from the database.
# Set of events we need to fetch groups for. (We know none of the old
# extremities are going to be in events_context).
missing_event_ids = set(old_latest_event_ids)
event_id_to_state_group = {}
for event_id in new_latest_event_ids:
# First search in the list of new events we're adding.
for ev, ctx in events_context:
if event_id == ev.event_id and ctx.state_group is not None:
event_id_to_state_group[event_id] = ctx.state_group
break
else:
# If we couldn't find it, then we'll need to pull
# the state from the database
missing_event_ids.add(event_id)
if missing_event_ids:
# Now pull out the state groups for any missing events from DB
event_to_groups = yield self._get_state_group_for_events(missing_event_ids)
event_id_to_state_group.update(event_to_groups)
# State groups of old_latest_event_ids
old_state_groups = set(
event_id_to_state_group[evid] for evid in old_latest_event_ids
)
# State groups of new_latest_event_ids
new_state_groups = set(
event_id_to_state_group[evid] for evid in new_latest_event_ids
)
# If they old and new groups are the same then we don't need to do
# anything.
if old_state_groups == new_state_groups:
defer.returnValue((None, None))
if len(new_state_groups) == 1 and len(old_state_groups) == 1:
# If we're going from one state group to another, lets check if
# we have a delta for that transition. If we do then we can just
# return that.
new_state_group = next(iter(new_state_groups))
old_state_group = next(iter(old_state_groups))
delta_ids = state_group_deltas.get((old_state_group, new_state_group), None)
if delta_ids is not None:
# We have a delta from the existing to new current state,
# so lets just return that. If we happen to already have
# the current state in memory then lets also return that,
# but it doesn't matter if we don't.
new_state = state_groups_map.get(new_state_group)
defer.returnValue((new_state, delta_ids))
# Now that we have calculated new_state_groups we need to get
# their state IDs so we can resolve to a single state set.
missing_state = new_state_groups - set(state_groups_map)
if missing_state:
group_to_state = yield self._get_state_for_groups(missing_state)
state_groups_map.update(group_to_state)
if len(new_state_groups) == 1:
# If there is only one state group, then we know what the current
# state is.
defer.returnValue((state_groups_map[new_state_groups.pop()], None))
# Ok, we need to defer to the state handler to resolve our state sets.
state_groups = {sg: state_groups_map[sg] for sg in new_state_groups}
events_map = {ev.event_id: ev for ev, _ in events_context}
# We need to get the room version, which is in the create event.
# Normally that'd be in the database, but its also possible that we're
# currently trying to persist it.
room_version = None
for ev, _ in events_context:
if ev.type == EventTypes.Create and ev.state_key == "":
room_version = ev.content.get("room_version", "1")
break
if not room_version:
room_version = yield self.get_room_version(room_id)
logger.debug("calling resolve_state_groups from preserve_events")
res = yield self._state_resolution_handler.resolve_state_groups(
room_id,
room_version,
state_groups,
events_map,
state_res_store=StateResolutionStore(self),
)
defer.returnValue((res.state, None))
@defer.inlineCallbacks
def _calculate_state_delta(self, room_id, current_state):
"""Calculate the new state deltas for a room.
Assumes that we are only persisting events for one room at a time.
Returns:
tuple[list, dict] (to_delete, to_insert): where to_delete are the
type/state_keys to remove from current_state_events and `to_insert`
are the updates to current_state_events.
"""
existing_state = yield self.get_current_state_ids(room_id)
to_delete = [key for key in existing_state if key not in current_state]
to_insert = {
key: ev_id
for key, ev_id in iteritems(current_state)
if ev_id != existing_state.get(key)
}
defer.returnValue((to_delete, to_insert))
@log_function
def _persist_events_txn(
self,
txn,
events_and_contexts,
backfilled,
delete_existing=False,
state_delta_for_room={},
new_forward_extremeties={},
):
"""Insert some number of room events into the necessary database tables.
Rejected events are only inserted into the events table, the events_json table,
and the rejections table. Things reading from those table will need to check
whether the event was rejected.
Args:
txn (twisted.enterprise.adbapi.Connection): db connection
events_and_contexts (list[(EventBase, EventContext)]):
events to persist
backfilled (bool): True if the events were backfilled
delete_existing (bool): True to purge existing table rows for the
events from the database. This is useful when retrying due to
IntegrityError.
state_delta_for_room (dict[str, (list, dict)]):
The current-state delta for each room. For each room, a tuple
(to_delete, to_insert), being a list of type/state keys to be
removed from the current state, and a state set to be added to
the current state.
new_forward_extremeties (dict[str, list[str]]):
The new forward extremities for each room. For each room, a
list of the event ids which are the forward extremities.
"""
all_events_and_contexts = events_and_contexts
min_stream_order = events_and_contexts[0][0].internal_metadata.stream_ordering
max_stream_order = events_and_contexts[-1][0].internal_metadata.stream_ordering
self._update_current_state_txn(txn, state_delta_for_room, min_stream_order)
self._update_forward_extremities_txn(
txn,
new_forward_extremities=new_forward_extremeties,
max_stream_order=max_stream_order,
)
# Ensure that we don't have the same event twice.
events_and_contexts = self._filter_events_and_contexts_for_duplicates(
events_and_contexts
)
self._update_room_depths_txn(
txn, events_and_contexts=events_and_contexts, backfilled=backfilled
)
# _update_outliers_txn filters out any events which have already been
# persisted, and returns the filtered list.
events_and_contexts = self._update_outliers_txn(
txn, events_and_contexts=events_and_contexts
)
# From this point onwards the events are only events that we haven't
# seen before.
if delete_existing:
# For paranoia reasons, we go and delete all the existing entries
# for these events so we can reinsert them.
# This gets around any problems with some tables already having
# entries.
self._delete_existing_rows_txn(txn, events_and_contexts=events_and_contexts)
self._store_event_txn(txn, events_and_contexts=events_and_contexts)
# Insert into event_to_state_groups.
self._store_event_state_mappings_txn(txn, events_and_contexts)
# We want to store event_auth mappings for rejected events, as they're
# used in state res v2.
# This is only necessary if the rejected event appears in an accepted
# event's auth chain, but its easier for now just to store them (and
# it doesn't take much storage compared to storing the entire event
# anyway).
self._simple_insert_many_txn(
txn,
table="event_auth",
values=[
{
"event_id": event.event_id,
"room_id": event.room_id,
"auth_id": auth_id,
}
for event, _ in events_and_contexts
for auth_id in event.auth_event_ids()
if event.is_state()
],
)
# _store_rejected_events_txn filters out any events which were
# rejected, and returns the filtered list.
events_and_contexts = self._store_rejected_events_txn(
txn, events_and_contexts=events_and_contexts
)
# From this point onwards the events are only ones that weren't
# rejected.
self._update_metadata_tables_txn(
txn,
events_and_contexts=events_and_contexts,
all_events_and_contexts=all_events_and_contexts,
backfilled=backfilled,
)
def _update_current_state_txn(self, txn, state_delta_by_room, stream_id):
for room_id, current_state_tuple in iteritems(state_delta_by_room):
to_delete, to_insert = current_state_tuple
# First we add entries to the current_state_delta_stream. We
# do this before updating the current_state_events table so
# that we can use it to calculate the `prev_event_id`. (This
# allows us to not have to pull out the existing state
# unnecessarily).
#
# The stream_id for the update is chosen to be the minimum of the stream_ids
# for the batch of the events that we are persisting; that means we do not
# end up in a situation where workers see events before the
# current_state_delta updates.
#
sql = """
INSERT INTO current_state_delta_stream
(stream_id, room_id, type, state_key, event_id, prev_event_id)
SELECT ?, ?, ?, ?, ?, (
SELECT event_id FROM current_state_events
WHERE room_id = ? AND type = ? AND state_key = ?
)
"""
txn.executemany(
sql,
(
(
stream_id,
room_id,
etype,
state_key,
None,
room_id,
etype,
state_key,
)
for etype, state_key in to_delete
# We sanity check that we're deleting rather than updating
if (etype, state_key) not in to_insert
),
)
txn.executemany(
sql,
(
(
stream_id,
room_id,
etype,
state_key,
ev_id,
room_id,
etype,
state_key,
)
for (etype, state_key), ev_id in iteritems(to_insert)
),
)
# Now we actually update the current_state_events table
txn.executemany(
"DELETE FROM current_state_events"
" WHERE room_id = ? AND type = ? AND state_key = ?",
(
(room_id, etype, state_key)
for etype, state_key in itertools.chain(to_delete, to_insert)
),
)
self._simple_insert_many_txn(
txn,
table="current_state_events",
values=[
{
"event_id": ev_id,
"room_id": room_id,
"type": key[0],
"state_key": key[1],
}
for key, ev_id in iteritems(to_insert)
],
)
txn.call_after(
self._curr_state_delta_stream_cache.entity_has_changed,
room_id,
stream_id,
)
# Invalidate the various caches
# Figure out the changes of membership to invalidate the
# `get_rooms_for_user` cache.
# We find out which membership events we may have deleted
# and which we have added, then we invlidate the caches for all
# those users.
members_changed = set(
state_key
for ev_type, state_key in itertools.chain(to_delete, to_insert)
if ev_type == EventTypes.Member
)
for member in members_changed:
txn.call_after(
self.get_rooms_for_user_with_stream_ordering.invalidate, (member,)
)
self._invalidate_state_caches_and_stream(txn, room_id, members_changed)
def _update_forward_extremities_txn(
self, txn, new_forward_extremities, max_stream_order
):
for room_id, new_extrem in iteritems(new_forward_extremities):
self._simple_delete_txn(
txn, table="event_forward_extremities", keyvalues={"room_id": room_id}
)
txn.call_after(self.get_latest_event_ids_in_room.invalidate, (room_id,))
self._simple_insert_many_txn(
txn,
table="event_forward_extremities",
values=[
{"event_id": ev_id, "room_id": room_id}
for room_id, new_extrem in iteritems(new_forward_extremities)
for ev_id in new_extrem
],
)
# We now insert into stream_ordering_to_exterm a mapping from room_id,
# new stream_ordering to new forward extremeties in the room.
# This allows us to later efficiently look up the forward extremeties
# for a room before a given stream_ordering
self._simple_insert_many_txn(
txn,
table="stream_ordering_to_exterm",
values=[
{
"room_id": room_id,
"event_id": event_id,
"stream_ordering": max_stream_order,
}
for room_id, new_extrem in iteritems(new_forward_extremities)
for event_id in new_extrem
],
)
@classmethod
def _filter_events_and_contexts_for_duplicates(cls, events_and_contexts):
"""Ensure that we don't have the same event twice.
Pick the earliest non-outlier if there is one, else the earliest one.
Args:
events_and_contexts (list[(EventBase, EventContext)]):
Returns:
list[(EventBase, EventContext)]: filtered list
"""
new_events_and_contexts = OrderedDict()
for event, context in events_and_contexts:
prev_event_context = new_events_and_contexts.get(event.event_id)
if prev_event_context:
if not event.internal_metadata.is_outlier():
if prev_event_context[0].internal_metadata.is_outlier():
# To ensure correct ordering we pop, as OrderedDict is
# ordered by first insertion.
new_events_and_contexts.pop(event.event_id, None)
new_events_and_contexts[event.event_id] = (event, context)
else:
new_events_and_contexts[event.event_id] = (event, context)
return list(new_events_and_contexts.values())
def _update_room_depths_txn(self, txn, events_and_contexts, backfilled):
"""Update min_depth for each room
Args:
txn (twisted.enterprise.adbapi.Connection): db connection
events_and_contexts (list[(EventBase, EventContext)]): events
we are persisting
backfilled (bool): True if the events were backfilled
"""
depth_updates = {}
for event, context in events_and_contexts:
# Remove the any existing cache entries for the event_ids
txn.call_after(self._invalidate_get_event_cache, event.event_id)
if not backfilled:
txn.call_after(
self._events_stream_cache.entity_has_changed,
event.room_id,
event.internal_metadata.stream_ordering,
)
if not event.internal_metadata.is_outlier() and not context.rejected:
depth_updates[event.room_id] = max(
event.depth, depth_updates.get(event.room_id, event.depth)
)
for room_id, depth in iteritems(depth_updates):
self._update_min_depth_for_room_txn(txn, room_id, depth)
def _update_outliers_txn(self, txn, events_and_contexts):
"""Update any outliers with new event info.
This turns outliers into ex-outliers (unless the new event was
rejected).
Args:
txn (twisted.enterprise.adbapi.Connection): db connection
events_and_contexts (list[(EventBase, EventContext)]): events
we are persisting
Returns:
list[(EventBase, EventContext)] new list, without events which
are already in the events table.
"""
txn.execute(
"SELECT event_id, outlier FROM events WHERE event_id in (%s)"
% (",".join(["?"] * len(events_and_contexts)),),
[event.event_id for event, _ in events_and_contexts],
)
have_persisted = {event_id: outlier for event_id, outlier in txn}
to_remove = set()
for event, context in events_and_contexts:
if event.event_id not in have_persisted:
continue
to_remove.add(event)
if context.rejected:
# If the event is rejected then we don't care if the event
# was an outlier or not.
continue
outlier_persisted = have_persisted[event.event_id]
if not event.internal_metadata.is_outlier() and outlier_persisted:
# We received a copy of an event that we had already stored as
# an outlier in the database. We now have some state at that
# so we need to update the state_groups table with that state.
# insert into event_to_state_groups.
try:
self._store_event_state_mappings_txn(txn, ((event, context),))
except Exception:
logger.exception("")
raise
metadata_json = encode_json(event.internal_metadata.get_dict())
sql = (
"UPDATE event_json SET internal_metadata = ?" " WHERE event_id = ?"
)
txn.execute(sql, (metadata_json, event.event_id))
# Add an entry to the ex_outlier_stream table to replicate the
# change in outlier status to our workers.
stream_order = event.internal_metadata.stream_ordering
state_group_id = context.state_group
self._simple_insert_txn(
txn,
table="ex_outlier_stream",
values={
"event_stream_ordering": stream_order,
"event_id": event.event_id,
"state_group": state_group_id,
},
)
sql = "UPDATE events SET outlier = ?" " WHERE event_id = ?"
txn.execute(sql, (False, event.event_id))
# Update the event_backward_extremities table now that this
# event isn't an outlier any more.
self._update_backward_extremeties(txn, [event])
return [ec for ec in events_and_contexts if ec[0] not in to_remove]
@classmethod
def _delete_existing_rows_txn(cls, txn, events_and_contexts):
if not events_and_contexts:
# nothing to do here
return
logger.info("Deleting existing")
for table in (
"events",
"event_auth",
"event_json",
"event_edges",
"event_forward_extremities",
"event_reference_hashes",
"event_search",
"event_to_state_groups",
"guest_access",
"history_visibility",
"local_invites",
"room_names",
"state_events",
"rejections",
"redactions",
"room_memberships",
"topics",
):
txn.executemany(
"DELETE FROM %s WHERE event_id = ?" % (table,),
[(ev.event_id,) for ev, _ in events_and_contexts],
)
for table in ("event_push_actions",):
txn.executemany(
"DELETE FROM %s WHERE room_id = ? AND event_id = ?" % (table,),
[(ev.room_id, ev.event_id) for ev, _ in events_and_contexts],
)
def _store_event_txn(self, txn, events_and_contexts):
"""Insert new events into the event and event_json tables
Args:
txn (twisted.enterprise.adbapi.Connection): db connection
events_and_contexts (list[(EventBase, EventContext)]): events
we are persisting
"""
if not events_and_contexts:
# nothing to do here
return
def event_dict(event):
d = event.get_dict()
d.pop("redacted", None)
d.pop("redacted_because", None)
return d
self._simple_insert_many_txn(
txn,
table="event_json",
values=[
{
"event_id": event.event_id,
"room_id": event.room_id,
"internal_metadata": encode_json(
event.internal_metadata.get_dict()
),
"json": encode_json(event_dict(event)),
"format_version": event.format_version,
}
for event, _ in events_and_contexts
],
)
self._simple_insert_many_txn(
txn,
table="events",
values=[
{
"stream_ordering": event.internal_metadata.stream_ordering,
"topological_ordering": event.depth,
"depth": event.depth,
"event_id": event.event_id,
"room_id": event.room_id,
"type": event.type,
"processed": True,
"outlier": event.internal_metadata.is_outlier(),
"origin_server_ts": int(event.origin_server_ts),
"received_ts": self._clock.time_msec(),
"sender": event.sender,
"contains_url": (
"url" in event.content
and isinstance(event.content["url"], text_type)
),
}
for event, _ in events_and_contexts
],
)
def _store_rejected_events_txn(self, txn, events_and_contexts):
"""Add rows to the 'rejections' table for received events which were
rejected
Args:
txn (twisted.enterprise.adbapi.Connection): db connection
events_and_contexts (list[(EventBase, EventContext)]): events
we are persisting
Returns:
list[(EventBase, EventContext)] new list, without the rejected
events.
"""
# Remove the rejected events from the list now that we've added them
# to the events table and the events_json table.
to_remove = set()
for event, context in events_and_contexts:
if context.rejected:
# Insert the event_id into the rejections table
self._store_rejections_txn(txn, event.event_id, context.rejected)
to_remove.add(event)
return [ec for ec in events_and_contexts if ec[0] not in to_remove]
def _update_metadata_tables_txn(
self, txn, events_and_contexts, all_events_and_contexts, backfilled
):
"""Update all the miscellaneous tables for new events
Args:
txn (twisted.enterprise.adbapi.Connection): db connection
events_and_contexts (list[(EventBase, EventContext)]): events
we are persisting
all_events_and_contexts (list[(EventBase, EventContext)]): all
events that we were going to persist. This includes events
we've already persisted, etc, that wouldn't appear in
events_and_context.
backfilled (bool): True if the events were backfilled
"""
# Insert all the push actions into the event_push_actions table.
self._set_push_actions_for_event_and_users_txn(
txn,
events_and_contexts=events_and_contexts,
all_events_and_contexts=all_events_and_contexts,
)
if not events_and_contexts:
# nothing to do here
return
for event, context in events_and_contexts:
if event.type == EventTypes.Redaction and event.redacts is not None:
# Remove the entries in the event_push_actions table for the
# redacted event.
self._remove_push_actions_for_event_id_txn(
txn, event.room_id, event.redacts
)
# Remove from relations table.
self._handle_redaction(txn, event.redacts)
# Update the event_forward_extremities, event_backward_extremities and
# event_edges tables.
self._handle_mult_prev_events(
txn, events=[event for event, _ in events_and_contexts]
)
for event, _ in events_and_contexts:
if event.type == EventTypes.Name:
# Insert into the room_names and event_search tables.
self._store_room_name_txn(txn, event)
elif event.type == EventTypes.Topic:
# Insert into the topics table and event_search table.
self._store_room_topic_txn(txn, event)
elif event.type == EventTypes.Message:
# Insert into the event_search table.
self._store_room_message_txn(txn, event)
elif event.type == EventTypes.Redaction:
# Insert into the redactions table.
self._store_redaction(txn, event)
elif event.type == EventTypes.RoomHistoryVisibility:
# Insert into the event_search table.
self._store_history_visibility_txn(txn, event)
elif event.type == EventTypes.GuestAccess:
# Insert into the event_search table.
self._store_guest_access_txn(txn, event)
self._handle_event_relations(txn, event)
# Insert into the room_memberships table.
self._store_room_members_txn(
txn,
[
event
for event, _ in events_and_contexts
if event.type == EventTypes.Member
],
backfilled=backfilled,
)
# Insert event_reference_hashes table.
self._store_event_reference_hashes_txn(
txn, [event for event, _ in events_and_contexts]
)
state_events_and_contexts = [
ec for ec in events_and_contexts if ec[0].is_state()
]
state_values = []
for event, context in state_events_and_contexts:
vals = {
"event_id": event.event_id,
"room_id": event.room_id,
"type": event.type,
"state_key": event.state_key,
}
# TODO: How does this work with backfilling?
if hasattr(event, "replaces_state"):
vals["prev_state"] = event.replaces_state
state_values.append(vals)
self._simple_insert_many_txn(txn, table="state_events", values=state_values)
# Prefill the event cache
self._add_to_cache(txn, events_and_contexts)
def _add_to_cache(self, txn, events_and_contexts):
to_prefill = []
rows = []
N = 200
for i in range(0, len(events_and_contexts), N):
ev_map = {e[0].event_id: e[0] for e in events_and_contexts[i : i + N]}
if not ev_map:
break
sql = (
"SELECT "
" e.event_id as event_id, "
" r.redacts as redacts,"
" rej.event_id as rejects "
" FROM events as e"
" LEFT JOIN rejections as rej USING (event_id)"
" LEFT JOIN redactions as r ON e.event_id = r.redacts"
" WHERE e.event_id IN (%s)"
) % (",".join(["?"] * len(ev_map)),)
txn.execute(sql, list(ev_map))
rows = self.cursor_to_dict(txn)
for row in rows:
event = ev_map[row["event_id"]]
if not row["rejects"] and not row["redacts"]:
to_prefill.append(
_EventCacheEntry(event=event, redacted_event=None)
)
def prefill():
for cache_entry in to_prefill:
self._get_event_cache.prefill((cache_entry[0].event_id,), cache_entry)
txn.call_after(prefill)
def _store_redaction(self, txn, event):
# invalidate the cache for the redacted event
txn.call_after(self._invalidate_get_event_cache, event.redacts)
txn.execute(
"INSERT INTO redactions (event_id, redacts) VALUES (?,?)",
(event.event_id, event.redacts),
)
@defer.inlineCallbacks
def count_daily_messages(self):
"""
Returns an estimate of the number of messages sent in the last day.
If it has been significantly less or more than one day since the last
call to this function, it will return None.
"""
def _count_messages(txn):
sql = """
SELECT COALESCE(COUNT(*), 0) FROM events
WHERE type = 'm.room.message'
AND stream_ordering > ?
"""
txn.execute(sql, (self.stream_ordering_day_ago,))
count, = txn.fetchone()
return count
ret = yield self.runInteraction("count_messages", _count_messages)
defer.returnValue(ret)
@defer.inlineCallbacks
def count_daily_sent_messages(self):
def _count_messages(txn):
# This is good enough as if you have silly characters in your own
# hostname then thats your own fault.
like_clause = "%:" + self.hs.hostname
sql = """
SELECT COALESCE(COUNT(*), 0) FROM events
WHERE type = 'm.room.message'
AND sender LIKE ?
AND stream_ordering > ?
"""
txn.execute(sql, (like_clause, self.stream_ordering_day_ago))
count, = txn.fetchone()
return count
ret = yield self.runInteraction("count_daily_sent_messages", _count_messages)
defer.returnValue(ret)
@defer.inlineCallbacks
def count_daily_active_rooms(self):
def _count(txn):
sql = """
SELECT COALESCE(COUNT(DISTINCT room_id), 0) FROM events
WHERE type = 'm.room.message'
AND stream_ordering > ?
"""
txn.execute(sql, (self.stream_ordering_day_ago,))
count, = txn.fetchone()
return count
ret = yield self.runInteraction("count_daily_active_rooms", _count)
defer.returnValue(ret)
def get_current_backfill_token(self):
"""The current minimum token that backfilled events have reached"""
return -self._backfill_id_gen.get_current_token()
def get_current_events_token(self):
"""The current maximum token that events have reached"""
return self._stream_id_gen.get_current_token()
def get_all_new_forward_event_rows(self, last_id, current_id, limit):
if last_id == current_id:
return defer.succeed([])
def get_all_new_forward_event_rows(txn):
sql = (
"SELECT e.stream_ordering, e.event_id, e.room_id, e.type,"
" state_key, redacts, relates_to_id"
" FROM events AS e"
" LEFT JOIN redactions USING (event_id)"
" LEFT JOIN state_events USING (event_id)"
" LEFT JOIN event_relations USING (event_id)"
" WHERE ? < stream_ordering AND stream_ordering <= ?"
" ORDER BY stream_ordering ASC"
" LIMIT ?"
)
txn.execute(sql, (last_id, current_id, limit))
new_event_updates = txn.fetchall()
if len(new_event_updates) == limit:
upper_bound = new_event_updates[-1][0]
else:
upper_bound = current_id
sql = (
"SELECT event_stream_ordering, e.event_id, e.room_id, e.type,"
" state_key, redacts, relates_to_id"
" FROM events AS e"
" INNER JOIN ex_outlier_stream USING (event_id)"
" LEFT JOIN redactions USING (event_id)"
" LEFT JOIN state_events USING (event_id)"
" LEFT JOIN event_relations USING (event_id)"
" WHERE ? < event_stream_ordering"
" AND event_stream_ordering <= ?"
" ORDER BY event_stream_ordering DESC"
)
txn.execute(sql, (last_id, upper_bound))
new_event_updates.extend(txn)
return new_event_updates
return self.runInteraction(
"get_all_new_forward_event_rows", get_all_new_forward_event_rows
)
def get_all_new_backfill_event_rows(self, last_id, current_id, limit):
if last_id == current_id:
return defer.succeed([])
def get_all_new_backfill_event_rows(txn):
sql = (
"SELECT -e.stream_ordering, e.event_id, e.room_id, e.type,"
" state_key, redacts, relates_to_id"
" FROM events AS e"
" LEFT JOIN redactions USING (event_id)"
" LEFT JOIN state_events USING (event_id)"
" LEFT JOIN event_relations USING (event_id)"
" WHERE ? > stream_ordering AND stream_ordering >= ?"
" ORDER BY stream_ordering ASC"
" LIMIT ?"
)
txn.execute(sql, (-last_id, -current_id, limit))
new_event_updates = txn.fetchall()
if len(new_event_updates) == limit:
upper_bound = new_event_updates[-1][0]
else:
upper_bound = current_id
sql = (
"SELECT -event_stream_ordering, e.event_id, e.room_id, e.type,"
" state_key, redacts, relates_to_id"
" FROM events AS e"
" INNER JOIN ex_outlier_stream USING (event_id)"
" LEFT JOIN redactions USING (event_id)"
" LEFT JOIN state_events USING (event_id)"
" LEFT JOIN event_relations USING (event_id)"
" WHERE ? > event_stream_ordering"
" AND event_stream_ordering >= ?"
" ORDER BY event_stream_ordering DESC"
)
txn.execute(sql, (-last_id, -upper_bound))
new_event_updates.extend(txn.fetchall())
return new_event_updates
return self.runInteraction(
"get_all_new_backfill_event_rows", get_all_new_backfill_event_rows
)
@cached(num_args=5, max_entries=10)
def get_all_new_events(
self,
last_backfill_id,
last_forward_id,
current_backfill_id,
current_forward_id,
limit,
):
"""Get all the new events that have arrived at the server either as
new events or as backfilled events"""
have_backfill_events = last_backfill_id != current_backfill_id
have_forward_events = last_forward_id != current_forward_id
if not have_backfill_events and not have_forward_events:
return defer.succeed(AllNewEventsResult([], [], [], [], []))
def get_all_new_events_txn(txn):
sql = (
"SELECT e.stream_ordering, e.event_id, e.room_id, e.type,"
" state_key, redacts"
" FROM events AS e"
" LEFT JOIN redactions USING (event_id)"
" LEFT JOIN state_events USING (event_id)"
" WHERE ? < stream_ordering AND stream_ordering <= ?"
" ORDER BY stream_ordering ASC"
" LIMIT ?"
)
if have_forward_events:
txn.execute(sql, (last_forward_id, current_forward_id, limit))
new_forward_events = txn.fetchall()
if len(new_forward_events) == limit:
upper_bound = new_forward_events[-1][0]
else:
upper_bound = current_forward_id
sql = (
"SELECT event_stream_ordering, event_id, state_group"
" FROM ex_outlier_stream"
" WHERE ? > event_stream_ordering"
" AND event_stream_ordering >= ?"
" ORDER BY event_stream_ordering DESC"
)
txn.execute(sql, (last_forward_id, upper_bound))
forward_ex_outliers = txn.fetchall()
else:
new_forward_events = []
forward_ex_outliers = []
sql = (
"SELECT -e.stream_ordering, e.event_id, e.room_id, e.type,"
" state_key, redacts"
" FROM events AS e"
" LEFT JOIN redactions USING (event_id)"
" LEFT JOIN state_events USING (event_id)"
" WHERE ? > stream_ordering AND stream_ordering >= ?"
" ORDER BY stream_ordering DESC"
" LIMIT ?"
)
if have_backfill_events:
txn.execute(sql, (-last_backfill_id, -current_backfill_id, limit))
new_backfill_events = txn.fetchall()
if len(new_backfill_events) == limit:
upper_bound = new_backfill_events[-1][0]
else:
upper_bound = current_backfill_id
sql = (
"SELECT -event_stream_ordering, event_id, state_group"
" FROM ex_outlier_stream"
" WHERE ? > event_stream_ordering"
" AND event_stream_ordering >= ?"
" ORDER BY event_stream_ordering DESC"
)
txn.execute(sql, (-last_backfill_id, -upper_bound))
backward_ex_outliers = txn.fetchall()
else:
new_backfill_events = []
backward_ex_outliers = []
return AllNewEventsResult(
new_forward_events,
new_backfill_events,
forward_ex_outliers,
backward_ex_outliers,
)
return self.runInteraction("get_all_new_events", get_all_new_events_txn)
def purge_history(self, room_id, token, delete_local_events):
"""Deletes room history before a certain point
Args:
room_id (str):
token (str): A topological token to delete events before
delete_local_events (bool):
if True, we will delete local events as well as remote ones
(instead of just marking them as outliers and deleting their
state groups).
"""
return self.runInteraction(
"purge_history",
self._purge_history_txn,
room_id,
token,
delete_local_events,
)
def _purge_history_txn(self, txn, room_id, token_str, delete_local_events):
token = RoomStreamToken.parse(token_str)
# Tables that should be pruned:
# event_auth
# event_backward_extremities
# event_edges
# event_forward_extremities
# event_json
# event_push_actions
# event_reference_hashes
# event_search
# event_to_state_groups
# events
# rejections
# room_depth
# state_groups
# state_groups_state
# we will build a temporary table listing the events so that we don't
# have to keep shovelling the list back and forth across the
# connection. Annoyingly the python sqlite driver commits the
# transaction on CREATE, so let's do this first.
#
# furthermore, we might already have the table from a previous (failed)
# purge attempt, so let's drop the table first.
txn.execute("DROP TABLE IF EXISTS events_to_purge")
txn.execute(
"CREATE TEMPORARY TABLE events_to_purge ("
" event_id TEXT NOT NULL,"
" should_delete BOOLEAN NOT NULL"
")"
)
# First ensure that we're not about to delete all the forward extremeties
txn.execute(
"SELECT e.event_id, e.depth FROM events as e "
"INNER JOIN event_forward_extremities as f "
"ON e.event_id = f.event_id "
"AND e.room_id = f.room_id "
"WHERE f.room_id = ?",
(room_id,),
)
rows = txn.fetchall()
max_depth = max(row[1] for row in rows)
if max_depth < token.topological:
# We need to ensure we don't delete all the events from the database
# otherwise we wouldn't be able to send any events (due to not
# having any backwards extremeties)
raise SynapseError(
400, "topological_ordering is greater than forward extremeties"
)
logger.info("[purge] looking for events to delete")
should_delete_expr = "state_key IS NULL"
should_delete_params = ()
if not delete_local_events:
should_delete_expr += " AND event_id NOT LIKE ?"
# We include the parameter twice since we use the expression twice
should_delete_params += ("%:" + self.hs.hostname, "%:" + self.hs.hostname)
should_delete_params += (room_id, token.topological)
# Note that we insert events that are outliers and aren't going to be
# deleted, as nothing will happen to them.
txn.execute(
"INSERT INTO events_to_purge"
" SELECT event_id, %s"
" FROM events AS e LEFT JOIN state_events USING (event_id)"
" WHERE (NOT outlier OR (%s)) AND e.room_id = ? AND topological_ordering < ?"
% (should_delete_expr, should_delete_expr),
should_delete_params,
)
# We create the indices *after* insertion as that's a lot faster.
# create an index on should_delete because later we'll be looking for
# the should_delete / shouldn't_delete subsets
txn.execute(
"CREATE INDEX events_to_purge_should_delete"
" ON events_to_purge(should_delete)"
)
# We do joins against events_to_purge for e.g. calculating state
# groups to purge, etc., so lets make an index.
txn.execute("CREATE INDEX events_to_purge_id" " ON events_to_purge(event_id)")
txn.execute("SELECT event_id, should_delete FROM events_to_purge")
event_rows = txn.fetchall()
logger.info(
"[purge] found %i events before cutoff, of which %i can be deleted",
len(event_rows),
sum(1 for e in event_rows if e[1]),
)
logger.info("[purge] Finding new backward extremities")
# We calculate the new entries for the backward extremeties by finding
# events to be purged that are pointed to by events we're not going to
# purge.
txn.execute(
"SELECT DISTINCT e.event_id FROM events_to_purge AS e"
" INNER JOIN event_edges AS ed ON e.event_id = ed.prev_event_id"
" LEFT JOIN events_to_purge AS ep2 ON ed.event_id = ep2.event_id"
" WHERE ep2.event_id IS NULL"
)
new_backwards_extrems = txn.fetchall()
logger.info("[purge] replacing backward extremities: %r", new_backwards_extrems)
txn.execute(
"DELETE FROM event_backward_extremities WHERE room_id = ?", (room_id,)
)
# Update backward extremeties
txn.executemany(
"INSERT INTO event_backward_extremities (room_id, event_id)"
" VALUES (?, ?)",
[(room_id, event_id) for event_id, in new_backwards_extrems],
)
logger.info("[purge] finding redundant state groups")
# Get all state groups that are referenced by events that are to be
# deleted. We then go and check if they are referenced by other events
# or state groups, and if not we delete them.
txn.execute(
"""
SELECT DISTINCT state_group FROM events_to_purge
INNER JOIN event_to_state_groups USING (event_id)
"""
)
referenced_state_groups = set(sg for sg, in txn)
logger.info(
"[purge] found %i referenced state groups", len(referenced_state_groups)
)
logger.info("[purge] finding state groups that can be deleted")
_ = self._find_unreferenced_groups_during_purge(txn, referenced_state_groups)
state_groups_to_delete, remaining_state_groups = _
logger.info(
"[purge] found %i state groups to delete", len(state_groups_to_delete)
)
logger.info(
"[purge] de-delta-ing %i remaining state groups",
len(remaining_state_groups),
)
# Now we turn the state groups that reference to-be-deleted state
# groups to non delta versions.
for sg in remaining_state_groups:
logger.info("[purge] de-delta-ing remaining state group %s", sg)
curr_state = self._get_state_groups_from_groups_txn(txn, [sg])
curr_state = curr_state[sg]
self._simple_delete_txn(
txn, table="state_groups_state", keyvalues={"state_group": sg}
)
self._simple_delete_txn(
txn, table="state_group_edges", keyvalues={"state_group": sg}
)
self._simple_insert_many_txn(
txn,
table="state_groups_state",
values=[
{
"state_group": sg,
"room_id": room_id,
"type": key[0],
"state_key": key[1],
"event_id": state_id,
}
for key, state_id in iteritems(curr_state)
],
)
logger.info("[purge] removing redundant state groups")
txn.executemany(
"DELETE FROM state_groups_state WHERE state_group = ?",
((sg,) for sg in state_groups_to_delete),
)
txn.executemany(
"DELETE FROM state_groups WHERE id = ?",
((sg,) for sg in state_groups_to_delete),
)
logger.info("[purge] removing events from event_to_state_groups")
txn.execute(
"DELETE FROM event_to_state_groups "
"WHERE event_id IN (SELECT event_id from events_to_purge)"
)
for event_id, _ in event_rows:
txn.call_after(self._get_state_group_for_event.invalidate, (event_id,))
# Delete all remote non-state events
for table in (
"events",
"event_json",
"event_auth",
"event_edges",
"event_forward_extremities",
"event_reference_hashes",
"event_search",
"rejections",
):
logger.info("[purge] removing events from %s", table)
txn.execute(
"DELETE FROM %s WHERE event_id IN ("
" SELECT event_id FROM events_to_purge WHERE should_delete"
")" % (table,)
)
# event_push_actions lacks an index on event_id, and has one on
# (room_id, event_id) instead.
for table in ("event_push_actions",):
logger.info("[purge] removing events from %s", table)
txn.execute(
"DELETE FROM %s WHERE room_id = ? AND event_id IN ("
" SELECT event_id FROM events_to_purge WHERE should_delete"
")" % (table,),
(room_id,),
)
# Mark all state and own events as outliers
logger.info("[purge] marking remaining events as outliers")
txn.execute(
"UPDATE events SET outlier = ?"
" WHERE event_id IN ("
" SELECT event_id FROM events_to_purge "
" WHERE NOT should_delete"
")",
(True,),
)
# synapse tries to take out an exclusive lock on room_depth whenever it
# persists events (because upsert), and once we run this update, we
# will block that for the rest of our transaction.
#
# So, let's stick it at the end so that we don't block event
# persistence.
#
# We do this by calculating the minimum depth of the backwards
# extremities. However, the events in event_backward_extremities
# are ones we don't have yet so we need to look at the events that
# point to it via event_edges table.
txn.execute(
"""
SELECT COALESCE(MIN(depth), 0)
FROM event_backward_extremities AS eb
INNER JOIN event_edges AS eg ON eg.prev_event_id = eb.event_id
INNER JOIN events AS e ON e.event_id = eg.event_id
WHERE eb.room_id = ?
""",
(room_id,),
)
min_depth, = txn.fetchone()
logger.info("[purge] updating room_depth to %d", min_depth)
txn.execute(
"UPDATE room_depth SET min_depth = ? WHERE room_id = ?",
(min_depth, room_id),
)
# finally, drop the temp table. this will commit the txn in sqlite,
# so make sure to keep this actually last.
txn.execute("DROP TABLE events_to_purge")
logger.info("[purge] done")
def _find_unreferenced_groups_during_purge(self, txn, state_groups):
"""Used when purging history to figure out which state groups can be
deleted and which need to be de-delta'ed (due to one of its prev groups
being scheduled for deletion).
Args:
txn
state_groups (set[int]): Set of state groups referenced by events
that are going to be deleted.
Returns:
tuple[set[int], set[int]]: The set of state groups that can be
deleted and the set of state groups that need to be de-delta'ed
"""
# Graph of state group -> previous group
graph = {}
# Set of events that we have found to be referenced by events
referenced_groups = set()
# Set of state groups we've already seen
state_groups_seen = set(state_groups)
# Set of state groups to handle next.
next_to_search = set(state_groups)
while next_to_search:
# We bound size of groups we're looking up at once, to stop the
# SQL query getting too big
if len(next_to_search) < 100:
current_search = next_to_search
next_to_search = set()
else:
current_search = set(itertools.islice(next_to_search, 100))
next_to_search -= current_search
# Check if state groups are referenced
sql = """
SELECT DISTINCT state_group FROM event_to_state_groups
LEFT JOIN events_to_purge AS ep USING (event_id)
WHERE state_group IN (%s) AND ep.event_id IS NULL
""" % (
",".join("?" for _ in current_search),
)
txn.execute(sql, list(current_search))
referenced = set(sg for sg, in txn)
referenced_groups |= referenced
# We don't continue iterating up the state group graphs for state
# groups that are referenced.
current_search -= referenced
rows = self._simple_select_many_txn(
txn,
table="state_group_edges",
column="prev_state_group",
iterable=current_search,
keyvalues={},
retcols=("prev_state_group", "state_group"),
)
prevs = set(row["state_group"] for row in rows)
# We don't bother re-handling groups we've already seen
prevs -= state_groups_seen
next_to_search |= prevs
state_groups_seen |= prevs
for row in rows:
# Note: Each state group can have at most one prev group
graph[row["state_group"]] = row["prev_state_group"]
to_delete = state_groups_seen - referenced_groups
to_dedelta = set()
for sg in referenced_groups:
prev_sg = graph.get(sg)
if prev_sg and prev_sg in to_delete:
to_dedelta.add(sg)
return to_delete, to_dedelta
@defer.inlineCallbacks
def is_event_after(self, event_id1, event_id2):
"""Returns True if event_id1 is after event_id2 in the stream
"""
to_1, so_1 = yield self._get_event_ordering(event_id1)
to_2, so_2 = yield self._get_event_ordering(event_id2)
defer.returnValue((to_1, so_1) > (to_2, so_2))
@cachedInlineCallbacks(max_entries=5000)
def _get_event_ordering(self, event_id):
res = yield self._simple_select_one(
table="events",
retcols=["topological_ordering", "stream_ordering"],
keyvalues={"event_id": event_id},
allow_none=True,
)
if not res:
raise SynapseError(404, "Could not find event %s" % (event_id,))
defer.returnValue(
(int(res["topological_ordering"]), int(res["stream_ordering"]))
)
def get_all_updated_current_state_deltas(self, from_token, to_token, limit):
def get_all_updated_current_state_deltas_txn(txn):
sql = """
SELECT stream_id, room_id, type, state_key, event_id
FROM current_state_delta_stream
WHERE ? < stream_id AND stream_id <= ?
ORDER BY stream_id ASC LIMIT ?
"""
txn.execute(sql, (from_token, to_token, limit))
return txn.fetchall()
return self.runInteraction(
"get_all_updated_current_state_deltas",
get_all_updated_current_state_deltas_txn,
)
AllNewEventsResult = namedtuple(
"AllNewEventsResult",
[
"new_forward_events",
"new_backfill_events",
"forward_ex_outliers",
"backward_ex_outliers",
],
)
| 1.242188 | 1 |
dev/buildtool/metrics.py | premm1983/Spinnaker | 0 | 2112 | <reponame>premm1983/Spinnaker<gh_stars>0
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metrics support manager."""
import logging
from buildtool import in_memory_metrics
from buildtool import prometheus_metrics
from buildtool import stackdriver_metrics
from buildtool.util import add_parser_argument
class MetricsManager(object):
"""Acts as factory for specialized BaseMetricsRegistry singleton."""
__metrics_registry = None
@staticmethod
def singleton():
"""Returns the BaseMetricsRegistry once startup_metrics is called."""
if MetricsManager.__metrics_registry is None:
raise Exception('startup_metrics was not called.')
return MetricsManager.__metrics_registry
@staticmethod
def init_argument_parser(parser, defaults):
"""Init argparser with metrics-related options."""
in_memory_metrics.init_argument_parser(parser, defaults)
prometheus_metrics.init_argument_parser(parser, defaults)
stackdriver_metrics.init_argument_parser(parser, defaults)
add_parser_argument(
parser, 'metric_name_scope', defaults, 'buildtool',
help='scope prefix for metrics generated by this tool')
add_parser_argument(
parser, 'monitoring_enabled', defaults, False, type=bool,
help='Enable monitoring to stackdriver.')
add_parser_argument(
parser, 'monitoring_flush_frequency', defaults, 5,
help='Frequency at which to push metrics in seconds.')
add_parser_argument(
parser, 'monitoring_system', defaults, 'file',
choices=['file', 'prometheus', 'stackdriver'],
help='Where to store metrics.')
@staticmethod
def startup_metrics(options):
"""Startup metrics module with concrete system."""
monitoring_systems = {
'file': in_memory_metrics.InMemoryMetricsRegistry,
'prometheus': prometheus_metrics.PrometheusMetricsRegistry,
'stackdriver': stackdriver_metrics.StackdriverMetricsRegistry
}
klas = monitoring_systems[options.monitoring_system]
logging.info('Initializing monitoring with systme="%s"', klas.__name__)
MetricsManager.__metrics_registry = klas(options)
if options.monitoring_enabled and options.monitoring_flush_frequency > 0:
MetricsManager.__metrics_registry.start_pusher_thread()
return MetricsManager.__metrics_registry
@staticmethod
def shutdown_metrics():
"""Write final metrics out to metrics server."""
registry = MetricsManager.singleton()
registry.stop_pusher_thread()
registry.flush_updated_metrics()
registry.flush_final_metrics()
| 2.140625 | 2 |
src/python/pants/backend/android/tasks/aapt_builder.py | hythloday/pants | 11 | 2113 | <gh_stars>10-100
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
import os
import subprocess
from twitter.common import log
from pants.backend.android.targets.android_binary import AndroidBinary
from pants.backend.android.targets.android_resources import AndroidResources
from pants.backend.android.tasks.aapt_task import AaptTask
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnit
from pants.util.dirutil import safe_mkdir
class AaptBuilder(AaptTask):
"""Build an android bundle with compiled code and assets.
This class gathers compiled classes (an Android dex archive) and packages it with the
target's resource files. The output is an unsigned .apk, an Android application package file.
"""
@classmethod
def product_types(cls):
return ['apk']
@staticmethod
def is_app(target):
return isinstance(target, AndroidBinary)
def __init__(self, *args, **kwargs):
super(AaptBuilder, self).__init__(*args, **kwargs)
def prepare(self, round_manager):
round_manager.require_data('dex')
def render_args(self, target, resource_dir, inputs):
args = []
# Glossary of used aapt flags. Aapt handles a ton of action, this will continue to expand.
# : 'package' is the main aapt operation (see class docstring for more info).
# : '-M' is the AndroidManifest.xml of the project.
# : '-S' points to the resource_dir to "spider" down while collecting resources.
# : '-I' packages to add to base "include" set, here the android.jar of the target-sdk.
# : '--ignored-assets' patterns for the aapt to skip. This is the default w/ 'BUILD*' added.
# : '-F' The name and location of the .apk file to output
# : additional positional arguments are treated as input directories to gather files from.
args.extend([self.aapt_tool(target.build_tools_version)])
args.extend(['package', '-M', target.manifest])
args.extend(['-S'])
args.extend(resource_dir)
args.extend(['-I', self.android_jar_tool(target.target_sdk)])
args.extend(['--ignore-assets', self.ignored_assets])
args.extend(['-F', os.path.join(self.workdir, target.app_name + '-unsigned.apk')])
args.extend(inputs)
log.debug('Executing: {0}'.format(args))
return args
def execute(self):
safe_mkdir(self.workdir)
# TODO(mateor) map stderr and stdout to workunit streams (see CR 859)
with self.context.new_workunit(name='apk-bundle', labels=[WorkUnit.MULTITOOL]):
targets = self.context.targets(self.is_app)
with self.invalidated(targets) as invalidation_check:
invalid_targets = []
for vt in invalidation_check.invalid_vts:
invalid_targets.extend(vt.targets)
for target in invalid_targets:
# 'input_dirs' is the folder containing the Android dex file
input_dirs = []
# 'gen_out' holds resource folders (e.g. 'res')
gen_out = []
mapping = self.context.products.get('dex')
for basedir in mapping.get(target):
input_dirs.append(basedir)
def gather_resources(target):
"""Gather the 'resource_dir' of the target"""
if isinstance(target, AndroidResources):
gen_out.append(os.path.join(get_buildroot(), target.resource_dir))
target.walk(gather_resources)
process = subprocess.Popen(self.render_args(target, gen_out, input_dirs))
result = process.wait()
if result != 0:
raise TaskError('Android aapt tool exited non-zero ({code})'.format(code=result))
for target in targets:
self.context.products.get('apk').add(target, self.workdir).append(target.app_name + "-unsigned.apk")
| 2.15625 | 2 |
fat/fat_bert_nq/ppr/apr_lib.py | kiss2u/google-research | 1 | 2114 | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains a class which acts as a wrapper around the PPR algorithm.
This class has the following functionality:
1. Load the KB graph,
2. Given list of seed entities, get topk entities from PPR.
3. Get unique facts between all extracted entities.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from fat.fat_bert_nq.ppr.apr_algo import csr_personalized_pagerank
from fat.fat_bert_nq.ppr.apr_algo import csr_topk_fact_extractor
from fat.fat_bert_nq.ppr.kb_csr_io import CsrData
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_bool(
'verbose_logging', False,
'If true, all of the warnings related to data processing will be printed. '
'A number of warnings are expected for a normal NQ evaluation.')
class ApproximatePageRank(object):
"""APR main lib which is used to wrap functions around ppr algo."""
def __init__(self):
self.data = CsrData()
self.data.load_csr_data(
full_wiki=FLAGS.full_wiki, files_dir=FLAGS.apr_files_dir)
def get_topk_extracted_ent(self, seeds, alpha, topk):
"""Extract topk entities given seeds.
Args:
seeds: An Ex1 vector with weight on every seed entity
alpha: probability for PPR
topk: max top entities to extract
Returns:
extracted_ents: list of selected entities
extracted_scores: list of scores of selected entities
"""
ppr_scores = csr_personalized_pagerank(seeds, self.data.adj_mat_t_csr,
alpha)
sorted_idx = np.argsort(ppr_scores)[::-1]
extracted_ents = sorted_idx[:topk]
extracted_scores = ppr_scores[sorted_idx[:topk]]
# Check for really low values
# Get idx of First value < 1e-6, limit extracted ents till there
zero_idx = np.where(ppr_scores[extracted_ents] < 1e-6)[0]
if zero_idx.shape[0] > 0:
extracted_ents = extracted_ents[:zero_idx[0]]
return extracted_ents, extracted_scores
def get_facts(self, entities, topk, alpha, seed_weighting=True):
"""Get subgraph describing a neighbourhood around given entities.
Args:
entities: A list of Wikidata entities
topk: Max entities to extract from PPR
alpha: Node probability for PPR
seed_weighting: Boolean for performing weighting seeds by freq in passage
Returns:
unique_facts: A list of unique facts around the seeds.
"""
if FLAGS.verbose_logging:
tf.logging.info('Getting subgraph')
entity_ids = [
int(self.data.ent2id[x]) for x in entities if x in self.data.ent2id
]
if FLAGS.verbose_logging:
tf.logging.info(
str([self.data.entity_names['e'][str(x)]['name'] for x in entity_ids
]))
freq_dict = {x: entity_ids.count(x) for x in entity_ids}
seed = np.zeros((self.data.adj_mat.shape[0], 1))
if not seed_weighting:
seed[entity_ids] = 1. / len(set(entity_ids))
else:
for x, y in freq_dict.items():
seed[x] = y
seed = seed / seed.sum()
extracted_ents, extracted_scores = self.get_topk_extracted_ent(
seed, alpha, topk)
if FLAGS.verbose_logging:
tf.logging.info('Extracted ents: ')
tf.logging.info(
str([
self.data.entity_names['e'][str(x)]['name']
for x in extracted_ents
]))
facts = csr_topk_fact_extractor(self.data.adj_mat_t_csr, self.data.rel_dict,
freq_dict, self.data.entity_names,
extracted_ents, extracted_scores)
if FLAGS.verbose_logging:
tf.logging.info('Extracted facts: ')
tf.logging.info(str(facts))
# Extract 1 unique fact per pair of entities (fact with highest score)
# Sort by scores
unique_facts = {}
for (sub, obj, rel, score) in facts:
fwd_dir = (sub, obj)
rev_dir = (obj, sub)
if fwd_dir in unique_facts and score > unique_facts[fwd_dir][1]:
unique_facts[fwd_dir] = (rel, score)
elif rev_dir in unique_facts and score > unique_facts[rev_dir][1]:
unique_facts[fwd_dir] = (rel, score)
del unique_facts[rev_dir] # Remove existing entity pair
else:
unique_facts[(sub, obj)] = (rel, score)
unique_facts = list(unique_facts.items())
return unique_facts
| 2.0625 | 2 |
src/optimal_gardening.py | evanlynch/optimal-gardening | 0 | 2115 | <gh_stars>0
import os
import sys
import time
from IPython.display import Image
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sb
sb.set_style("dark")
#### Initial Setup ####
#plant info
plant_info = pd.read_csv('../data/plant_data.csv')
plant_info.index.name = 'plant_index'
plants = plant_info.name.to_numpy()
plant_index = plant_info.index.to_numpy()
num_plants = len(plants)
plant_sun_req = plant_info.sun.to_numpy()
perennials = plant_info[plant_info.perennial==1].index.to_list()
problem_plants = plant_info[plant_info.problem_plant==1].index.to_list()
#calculate weighted average preference for each plant
family = ['evan','gina','liesse','lizzie','jack']
plant_info['avg_pref'] = np.average(plant_info[family],axis=1,weights=[.5,.5,0,0,0])
plant_info.drop(family,axis=1,inplace=True)
preferences = plant_info.avg_pref.to_numpy()
#bed info
bed_info = pd.read_csv('../data/bed_data.csv')
bed_info.index.name = 'bed_index'
beds = bed_info.bed.to_numpy()
bed_index = bed_info.index.to_numpy()
bed_sun_req = bed_info.sun.to_numpy()
num_beds = len(beds)
#time dimension
num_years = 3
years = np.array(range(1,num_years+1))
year_index = np.array(range(num_years))
#for keeping track of what axis is which
plant_axis = 0
bed_axis = 1
year_axis = 2
##### Constraints #####
#initialize sun constraint. 1 where plant can feasibly be planted in bed. 0 where sun requirements do not match.
sun_constraint = np.ones(shape=(num_plants,num_beds,num_years))
for p in plant_index:
for b in bed_index:
p_sun = plant_sun_req[p]
b_sun = bed_sun_req[b]
if p_sun != b_sun:
sun_constraint[p,b,:] = 0
def enforce_sun_constraint(plan,sun_constraint):
"""
Force plan to be 0 where sun requirements for plant and bed do not match.
"""
return plan*sun_constraint
def enforce_perennial_constraint(plan,plant,bed,year,perennials):
"""Forward fill plan for perennial plants. If 1 in a given bed/year, it will be 1 in same bed thereafter."""
perennial_plan = plan.copy()
#what was planted the year before
plant_last_year = perennial_plan[:,bed,year-1].argmax()
#if the plant is a perennial, plant it this year and every year thereafter
if plant in perennials:
perennial_plan[:,bed,year:] = 0 # zeros out anything else that may have been planted in bed in current and subsequent years during a previous make_neighbor call
perennial_plan[plant,bed,year:] = 1 #sets plant to 1 in bed every year after the current year
#if what was planted already in this bed was a perennial, remove it from previous years
elif plant_last_year in perennials:
perennial_plan[plant_last_year,bed,:year] = 0
return perennial_plan
def enforce_disease_constraint(plan,problem_plants):
"""Creates a mask to determine if the same veg was planted in the same bed over multiple years.
Multiplies the plan for problem plants by 0 in subsequent years where we planned to put them in the same bed
"""
disease_plan = plan.copy()
#mask to determine cases where same thing was planted in the same bed yoy
same_veg_in_bed_yoy = disease_plan.cumsum(axis=year_axis)>1
#multiply plan for specific problem plants by 0
disease_plan[problem_plants] = disease_plan[problem_plants]*(abs(1-same_veg_in_bed_yoy)[problem_plants])
return disease_plan
##### Objectives #####
#the most satisfied you could be (planting fruit or vegetable with highest preference in all beds every year)
max_yums = num_beds*num_years*np.max(preferences)
def compute_yummy_score(plan,preferences,max_yums):
"""Takes the weighted average of the preferences of each plant, weighted by the total qty of plants
in the current plan for each plant. Maximization encourages plants with higher preferences to be planted in higher quantities."""
plan_yummy = plan.copy()
plan_by_plant = plan_yummy.sum(axis=(bed_axis,year_axis))
yums = round(np.dot(preferences,plan_by_plant)/max_yums*100,1)
return yums
def compute_variety_score(plan,num_plants):
"""Sums the number of unique plants that are actually planted in the garden. Counts the number of plants that are being planted across all beds.
Then counts the number of plants with non-zero planting plan.
Maximization encourages more unique plants to be planted."""
plan_variety = plan.copy()
num_plants_in_plan = (plan_variety.sum(axis=(bed_axis,year_axis)) > 0).sum()
variety_score = round(num_plants_in_plan/num_plants*100,1)
return variety_score
#### Analysis & Visualization ####
def visualize_garden(bed_info):
garden_layout = bed_info.sun.map({'Full sun':1,'Partial sun':2,'Partial shade':3}).to_numpy().reshape(14,3)
palette = ["#ffa200","#fcbd53","#ffd58f"]
f, ax = plt.subplots(figsize=(10, 6))
ax = sb.heatmap(garden_layout,linewidths=5,linecolor='white',cmap=sb.color_palette(palette),cbar=False)
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
plt.rcParams.update({'font.size': 13})
return ax
def visualize_plan(bed_info,bed_index,years):
for year in years:
garden_viz = visualize_garden(bed_info)
garden_viz.set_title(f'Year {year}')
for bed in bed_index:
x = bed_info.iloc[bed].x
y = bed_info.iloc[bed].y
plt.text(x + 0.5, y + 0.5, bed_info.loc[(bed_info.x==x)&(bed_info.y==y)][f'year_{year}'].iloc[0],
horizontalalignment='center',verticalalignment='center')
def annual_bed_plan(best_plan,bed_info,plant_info,bed_index,year_index):
for t in year_index:
bed_plan = []
for b in bed_index:
plant_idx = np.argmax(best_plan[:,b,t])
plant = plant_info.iloc[plant_idx]['name']
bed_plan.append(plant)
bed_info[f'year_{t+1}'] = pd.Series(bed_plan)
return bed_info
def visualize_obj_iters(current_plan_obj_values):
objectives = []
yummy_scores = []
variety_scores = []
for i in current_plan_obj_values:
objectives.append(i[1]['objective'])
yummy_scores.append(i[1]['yummy_score'])
variety_scores.append(i[1]['variety_score'])
df = pd.DataFrame([objectives,yummy_scores,variety_scores]).T#,yummy_scores,variety_scores]).T
df.columns = ['obj_value','yummy_scores','variety_scores']#,'yummy_score','variety_score']
df.reset_index(inplace=True)
df = df.melt(id_vars=['index'],var_name='objective')
fig, ax = plt.subplots(figsize=(20,8))
sb.scatterplot(data=df,x='index',y='value',hue='objective',edgecolor=None,s=5)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
ax.set_title('Objective Values of Current Solution by Iteration')
# ax2 = plt.twinx()
# sb.scatterplot(data=df.drop_duplicates(['index','total_plants']),x='index',y='objective',edgecolor=None,ax=ax2,color='black',s=5) | 2.71875 | 3 |
adv_lib/utils/attack_utils.py | Daulbaev/adversarial-library | 55 | 2116 | import warnings
from collections import OrderedDict
from distutils.version import LooseVersion
from functools import partial
from inspect import isclass
from typing import Callable, Optional, Dict, Union
import numpy as np
import torch
import tqdm
from torch import Tensor, nn
from torch.nn import functional as F
from adv_lib.distances.lp_norms import l0_distances, l1_distances, l2_distances, linf_distances
from adv_lib.utils import ForwardCounter, BackwardCounter, predict_inputs
def generate_random_targets(labels: Tensor, num_classes: int) -> Tensor:
"""
Generates one random target in (num_classes - 1) possibilities for each label that is different from the original
label.
Parameters
----------
labels: Tensor
Original labels. Generated targets will be different from labels.
num_classes: int
Number of classes to generate the random targets from.
Returns
-------
targets: Tensor
Random target for each label. Has the same shape as labels.
"""
random = torch.rand(len(labels), num_classes, device=labels.device, dtype=torch.float)
random.scatter_(1, labels.unsqueeze(-1), 0)
return random.argmax(1)
def get_all_targets(labels: Tensor, num_classes: int):
"""
Generates all possible targets that are different from the original labels.
Parameters
----------
labels: Tensor
Original labels. Generated targets will be different from labels.
num_classes: int
Number of classes to generate the random targets from.
Returns
-------
targets: Tensor
Random targets for each label. shape: (len(labels), num_classes - 1).
"""
all_possible_targets = torch.zeros(len(labels), num_classes - 1, dtype=torch.long)
all_classes = set(range(num_classes))
for i in range(len(labels)):
this_label = labels[i].item()
other_labels = list(all_classes.difference({this_label}))
all_possible_targets[i] = torch.tensor(other_labels)
return all_possible_targets
def run_attack(model: nn.Module,
inputs: Tensor,
labels: Tensor,
attack: Callable,
targets: Optional[Tensor] = None,
batch_size: Optional[int] = None) -> dict:
device = next(model.parameters()).device
to_device = lambda tensor: tensor.to(device)
targeted, adv_labels = False, labels
if targets is not None:
targeted, adv_labels = True, targets
batch_size = batch_size or len(inputs)
# run attack only on non already adversarial samples
already_adv = []
chunks = [tensor.split(batch_size) for tensor in [inputs, adv_labels]]
for (inputs_chunk, label_chunk) in zip(*chunks):
batch_chunk_d, label_chunk_d = [to_device(tensor) for tensor in [inputs_chunk, label_chunk]]
preds = model(batch_chunk_d).argmax(1)
is_adv = (preds == label_chunk_d) if targeted else (preds != label_chunk_d)
already_adv.append(is_adv.cpu())
not_adv = ~torch.cat(already_adv, 0)
start, end = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)
forward_counter, backward_counter = ForwardCounter(), BackwardCounter()
model.register_forward_pre_hook(forward_counter)
if LooseVersion(torch.__version__) >= LooseVersion('1.8'):
model.register_full_backward_hook(backward_counter)
else:
model.register_backward_hook(backward_counter)
average_forwards, average_backwards = [], [] # number of forward and backward calls per sample
advs_chunks = []
chunks = [tensor.split(batch_size) for tensor in [inputs[not_adv], adv_labels[not_adv]]]
total_time = 0
for (inputs_chunk, label_chunk) in tqdm.tqdm(zip(*chunks), ncols=80, total=len(chunks[0])):
batch_chunk_d, label_chunk_d = [to_device(tensor.clone()) for tensor in [inputs_chunk, label_chunk]]
start.record()
advs_chunk_d = attack(model, batch_chunk_d, label_chunk_d, targeted=targeted)
# performance monitoring
end.record()
torch.cuda.synchronize()
total_time += (start.elapsed_time(end)) / 1000 # times for cuda Events are in milliseconds
average_forwards.append(forward_counter.num_samples_called / len(batch_chunk_d))
average_backwards.append(backward_counter.num_samples_called / len(batch_chunk_d))
forward_counter.reset(), backward_counter.reset()
advs_chunks.append(advs_chunk_d.cpu())
if isinstance(attack, partial) and (callback := attack.keywords.get('callback')) is not None:
callback.reset_windows()
adv_inputs = inputs.clone()
adv_inputs[not_adv] = torch.cat(advs_chunks, 0)
data = {
'inputs': inputs,
'labels': labels,
'targets': adv_labels if targeted else None,
'adv_inputs': adv_inputs,
'time': total_time,
'num_forwards': sum(average_forwards) / len(chunks[0]),
'num_backwards': sum(average_backwards) / len(chunks[0]),
}
return data
_default_metrics = OrderedDict([
('linf', linf_distances),
('l0', l0_distances),
('l1', l1_distances),
('l2', l2_distances),
])
def compute_attack_metrics(model: nn.Module,
attack_data: Dict[str, Union[Tensor, float]],
batch_size: Optional[int] = None,
metrics: Dict[str, Callable] = _default_metrics) -> Dict[str, Union[Tensor, float]]:
inputs, labels, targets, adv_inputs = map(attack_data.get, ['inputs', 'labels', 'targets', 'adv_inputs'])
if adv_inputs.min() < 0 or adv_inputs.max() > 1:
warnings.warn('Values of produced adversarials are not in the [0, 1] range -> Clipping to [0, 1].')
adv_inputs.clamp_(min=0, max=1)
device = next(model.parameters()).device
to_device = lambda tensor: tensor.to(device)
batch_size = batch_size or len(inputs)
chunks = [tensor.split(batch_size) for tensor in [inputs, labels, adv_inputs]]
all_predictions = [[] for _ in range(6)]
distances = {k: [] for k in metrics.keys()}
metrics = {k: v().to(device) if (isclass(v.func) if isinstance(v, partial) else False) else v for k, v in
metrics.items()}
append = lambda list, data: list.append(data.cpu())
for inputs_chunk, labels_chunk, adv_chunk in zip(*chunks):
inputs_chunk, adv_chunk = map(to_device, [inputs_chunk, adv_chunk])
clean_preds, adv_preds = [predict_inputs(model, chunk.to(device)) for chunk in [inputs_chunk, adv_chunk]]
list(map(append, all_predictions, [*clean_preds, *adv_preds]))
for metric, metric_func in metrics.items():
distances[metric].append(metric_func(adv_chunk, inputs_chunk).detach().cpu())
logits, probs, preds, logits_adv, probs_adv, preds_adv = [torch.cat(l) for l in all_predictions]
for metric in metrics.keys():
distances[metric] = torch.cat(distances[metric], 0)
accuracy_orig = (preds == labels).float().mean().item()
if targets is not None:
success = (preds_adv == targets)
labels = targets
else:
success = (preds_adv != labels)
prob_orig = probs.gather(1, labels.unsqueeze(1)).squeeze(1)
prob_adv = probs_adv.gather(1, labels.unsqueeze(1)).squeeze(1)
labels_infhot = torch.zeros_like(logits_adv).scatter_(1, labels.unsqueeze(1), float('inf'))
real = logits_adv.gather(1, labels.unsqueeze(1)).squeeze(1)
other = (logits_adv - labels_infhot).max(1).values
diff_vs_max_adv = (real - other)
nll = F.cross_entropy(logits, labels, reduction='none')
nll_adv = F.cross_entropy(logits_adv, labels, reduction='none')
data = {
'time': attack_data['time'],
'num_forwards': attack_data['num_forwards'],
'num_backwards': attack_data['num_backwards'],
'targeted': targets is not None,
'preds': preds,
'adv_preds': preds_adv,
'accuracy_orig': accuracy_orig,
'success': success,
'probs_orig': prob_orig,
'probs_adv': prob_adv,
'logit_diff_adv': diff_vs_max_adv,
'nll': nll,
'nll_adv': nll_adv,
'distances': distances,
}
return data
def print_metrics(metrics: dict) -> None:
np.set_printoptions(formatter={'float': '{:0.3f}'.format}, threshold=16, edgeitems=3,
linewidth=120) # To print arrays with less precision
print('Original accuracy: {:.2%}'.format(metrics['accuracy_orig']))
print('Attack done in: {:.2f}s with {:.4g} forwards and {:.4g} backwards.'.format(
metrics['time'], metrics['num_forwards'], metrics['num_backwards']))
success = metrics['success'].numpy()
fail = bool(success.mean() != 1)
print('Attack success: {:.2%}'.format(success.mean()) + fail * ' - {}'.format(success))
for distance, values in metrics['distances'].items():
data = values.numpy()
print('{}: {} - Average: {:.3f} - Median: {:.3f}'.format(distance, data, data.mean(), np.median(data)) +
fail * ' | Avg over success: {:.3f}'.format(data[success].mean()))
attack_type = 'targets' if metrics['targeted'] else 'correct'
print('Logit({} class) - max_Logit(other classes): {} - Average: {:.2f}'.format(
attack_type, metrics['logit_diff_adv'].numpy(), metrics['logit_diff_adv'].numpy().mean()))
print('NLL of target/pred class: {:.3f}'.format(metrics['nll_adv'].numpy().mean()))
| 2.1875 | 2 |
thawSlumpChangeDet/polygons_compare.py | Summer0328/ChangeDet_DL-1 | 3 | 2117 | #!/usr/bin/env python
# Filename: polygons_cd
"""
introduction: compare two polygons in to shape file
authors: <NAME>
email:<EMAIL>
add time: 26 February, 2020
"""
import sys,os
from optparse import OptionParser
# added path of DeeplabforRS
sys.path.insert(0, os.path.expanduser('~/codes/PycharmProjects/DeeplabforRS'))
import basic_src.io_function as io_function
import basic_src.basic as basic
import basic_src.map_projection as map_projection
import parameters
import polygons_cd_multi
import polygons_cd
def main(options, args):
old_shp_path = args[0]
new_shp_path = args[1]
# check files do exist
assert io_function.is_file_exist(new_shp_path)
assert io_function.is_file_exist(old_shp_path)
# check projection of the shape file, should be the same
old_shp_proj4 = map_projection.get_raster_or_vector_srs_info_proj4(old_shp_path)
new_shp_proj4 = map_projection.get_raster_or_vector_srs_info_proj4(new_shp_path)
if old_shp_proj4 != new_shp_proj4:
raise ValueError('error, projection insistence between %s and %s' % (old_shp_proj4, new_shp_proj4))
main_shp_name = polygons_cd_multi.get_main_shp_name(old_shp_path,new_shp_path)
# conduct change detection
if options.output is not None:
main_shp_name = options.output
# get expanding and shrinking parts
output_path_expand = 'expand_' + main_shp_name
output_path_shrink = 'shrink_' + main_shp_name
polygons_cd.polygons_change_detection(old_shp_path, new_shp_path, output_path_expand,output_path_shrink)
if __name__ == "__main__":
usage = "usage: %prog [options] old_shape_file new_shape_file "
parser = OptionParser(usage=usage, version="1.0 2020-02-26")
parser.description = 'Introduction: compare two groups of polygons '
parser.add_option("-p", "--para",
action="store", dest="para_file",
help="the parameters file")
parser.add_option('-o', '--output',
action="store", dest = 'output',
help='the path to save the change detection results')
(options, args) = parser.parse_args()
if len(sys.argv) < 2:
parser.print_help()
sys.exit(2)
# # set parameters files
# if options.para_file is None:
# print('error, no parameters file')
# parser.print_help()
# sys.exit(2)
# else:
# parameters.set_saved_parafile_path(options.para_file)
basic.setlogfile('polygons_changeDetection.log')
main(options, args)
| 2.859375 | 3 |
andela_labs/Car Class Lab (OOP)/car.py | brotich/andela_bootcamp_X | 0 | 2118 | <reponame>brotich/andela_bootcamp_X
class Car(object):
"""
Car class that can be used to instantiate various vehicles.
It takes in arguments that depict the type, model, and name
of the vehicle
"""
def __init__(self, name="General", model="GM", car_type="saloon"):
num_of_wheels = 4
num_of_doors = 4
if car_type == "trailer":
num_of_wheels = 8
if name == "Porshe" or name == "Koenigsegg":
num_of_doors = 2
self.name = name
self.model = model
self.type = car_type
self.num_of_doors = num_of_doors
self.num_of_wheels = num_of_wheels
self.speed = 0
def drive(self, gear):
if self.type == "trailer":
self.speed = gear * 77 / 7
elif self.type == "saloon":
self.speed = gear * 1000 / 3
return self
def is_saloon(self):
return self.type == 'saloon'
| 3.875 | 4 |
CV Model/Model - JupyterNotebook/mrcnn/tfliteconverter.py | fcsiba/Smart-Cart | 0 | 2119 | <filename>CV Model/Model - JupyterNotebook/mrcnn/tfliteconverter.py
import tensorflow as tf
# Convert the model.
converter = tf.lite.TFLiteConverter.from_saved_model('model.py')
tflite_model = converter.convert()
open("trash_ai.tflite", "wb").write(tflite_model) | 2.46875 | 2 |
basicapp/cron.py | shivamsinghal212/Url-Shortener | 0 | 2120 | from django_cron import CronJobBase, Schedule
from .models import Link
from django.utils import timezone
class MyCronJob(CronJobBase):
RUN_EVERY_MINS = 1 # every 2 hours
schedule = Schedule(run_every_mins=RUN_EVERY_MINS)
code = 'basicapp.cron' # a unique code
def do(self):
current_time = timezone.now()
links = Link.objects.all()
for obj in links:
print("Checking last hit date for: ", obj.shortenURL)
delta = current_time - obj.last_hit
if delta.days > 2:
print('link is older than 2 days, DELETING!')
obj.delete()
else:
print('link was recently hit, Wont Delete.')
| 2.84375 | 3 |
weasyprint/tests/test_stacking.py | Smylers/WeasyPrint | 0 | 2121 | # coding: utf8
"""
weasyprint.tests.stacking
-------------------------
:copyright: Copyright 2011-2012 <NAME> and contributors, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import division, unicode_literals
from ..stacking import StackingContext
from .test_boxes import serialize
from .test_layout import parse
from .testing_utils import assert_no_logs
def to_lists(page):
html, = page.children
return serialize_stacking(StackingContext.from_box(html, page))
def serialize_box(box):
return '%s %s' % (box.element_tag, box.sourceline)
def serialize_stacking(context):
return (
serialize_box(context.box),
[serialize_box(b) for b in context.blocks_and_cells],
[serialize_stacking(c) for c in context.zero_z_contexts],
)
@assert_no_logs
def test_nested():
page, = parse('''\
<p id=lorem></p>
<div style="position: relative">
<p id=lipsum></p>
</p>
''')
assert to_lists(page) == (
'html 1',
['body 1', 'p 1'],
[(
'div 2',
['p 3'],
[])])
page, = parse('''\
<div style="position: relative">
<p style="position: relative"></p>
</div>
''')
assert to_lists(page) == (
'html 1',
['body 1'],
[('div 1', [], []), # In this order
('p 2', [], [])])
@assert_no_logs
def test_image_contexts():
page, = parse('''
<body>Some text: <img style="position: relative" src=pattern.png>''')
html, = page.children
context = StackingContext.from_box(html, page)
# The image is *not* in this context:
assert serialize([context.box]) == [
('html', 'Block', [
('body', 'Block', [
('body', 'Line', [
('body', 'Text', 'Some text: ')])])])]
# ... but in a sub-context:
assert serialize(c.box for c in context.zero_z_contexts) == [
('img', 'InlineReplaced', '<replaced>')]
| 2.25 | 2 |
django-magic-link/customers/views.py | industrydive/sourcelist | 5 | 2122 | from django.shortcuts import render
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from sesame import utils
from django.core.mail import send_mail
def login_page(request):
if request.method == "POST":
email = request.POST.get("emailId")
user = User.objects.get(email=email)
login_token = utils.get_query_string(user)
login_link = "http://127.0.0.1:8000/customers/{}".format(login_token)
html_message = """
<p>Hi there,</p>
<p>Here is your <a href="{}">magic link</a> </p>
<p>Thanks,</p>
<p>Django Admin</p>
""".format(login_link)
send_mail(
'Django Magic Link',
html_message,
'<EMAIL>',
[email],
fail_silently=False,
html_message = html_message
)
return render(request, "login.html", context={"message":"Please check your email for magic link."})
return render(request, "login.html")
@login_required
def customers_home_page(request):
return render(request, "customers/index.html") | 2.140625 | 2 |
python-lib/config/dss_parameter.py | dataiku/dss-plugin-nlp-analysis | 1 | 2123 | from .custom_check import CustomCheck, CustomCheckError
from typing import Any, List
import logging
logger = logging.getLogger(__name__)
class DSSParameterError(Exception):
"""Exception raised when at least one CustomCheck fails."""
pass
class DSSParameter:
"""Object related to one parameter. It is mainly used for checks to run in backend for custom forms.
Attributes:
name(str): Name of the parameter
value(Any): Value of the parameter
checks(list[dict], optional): Checks to run on provided value
required(bool, optional): Whether the value can be None
"""
def __init__(
self, name: str, value: Any, checks: List[dict] = None, required: bool = False
):
"""Initialization method for the DSSParameter class
Args:
name(str): Name of the parameter
value(Any): Value of the parameter
checks(list[dict], optional): Checks to run on provided value
required(bool, optional): Whether the value can be None
"""
if checks is None:
checks = []
self.name = name
self.value = value
self.checks = [CustomCheck(**check) for check in checks]
if required:
self.checks.append(CustomCheck(type="exists"))
self.run_checks()
def run_checks(self):
"""Runs all checks provided for this parameter"""
errors = []
for check in self.checks:
try:
check.run(self.value)
except CustomCheckError as err:
errors.append(err)
if errors:
self.handle_failure(errors)
self.handle_success()
def handle_failure(self, errors: List[CustomCheckError]):
"""Is called when at least one test fails. It will raise an Exception with understandable text
Args:
errors(list[CustomCheckError]: Errors met when running checks
Raises:
DSSParameterError: Raises if at least on check fails
"""
raise DSSParameterError(self.format_failure_message(errors))
def format_failure_message(self, errors: List[CustomCheckError]) -> str:
"""Format failure text
Args:
errors(list[CustomCheckError]: Errors met when running checks
Returns:
str: Formatted error message
"""
return """
Error for parameter \"{name}\" :
{errors}
""".format(
name=self.name, errors="\n".join(["\t {}".format(e) for e in errors])
)
def handle_success(self):
"""Called if all checks are successful. Prints a success message"""
self.print_success_message()
def print_success_message(self):
"""Formats the succee message"""
logger.info("All checks have been successfully done for {}.".format(self.name))
def __repr__(self):
return "DSSParameter(name={}, value={})".format(self.name, self.value)
def __str__(self):
return "DSSParameter(name={}, value={})".format(self.name, self.value)
| 3.4375 | 3 |
misc/import_ch_zurich.py | mstarikov/transitfeed | 0 | 2124 | <filename>misc/import_ch_zurich.py
#!/usr/bin/python2.4
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""imports Zurich timetables, converting them from DIVA export format
to Google Transit format."""
from __future__ import print_function
# This was written before transitfeed.py and we haven't yet found the
# motivation to port it. Please see the examples directory for better
# examples.
try:
from io import StringIO as cStringIO
except ImportError:
import cStringIO
import csv
import datetime
import optparse
import sys
import urllib
import zipfile
# Zurich tram lines
TRAM_LINES = {'2': ['FF3300', 'FFFFFF'],
'3': ['009933', 'FFFFFF'],
'4': ['333399', 'FFFFFF'],
'5': ['996600', 'FFFFFF'],
'6': ['CC9933', 'FFFFFF'],
'7': ['000000', 'FFFFFF'],
'8': ['99CC00', '000000'],
'9': ['333399', 'FFFFFF'],
'10': ['FF6699', 'FFFFFF'],
'11': ['009933', 'FFFFFF'],
'12': ['FFFFFF', '000000'],
'13': ['FFCC33', '000000'],
'14': ['3399CC', 'FFFFFF'],
'15': ['FF3300', 'FFFFFF']}
# Terms that indicate points of interest. Used to split station names
# to (name, city).
POI_TERMS = {'Bahnhof': 1, 'Dorfzentrum': 1, 'Schiffstation': 1,
'Station': 1, u'Zentrum': 1,
'Dorfplatz': 1, 'Zentrum/Bahnhof': 1, 'Dorf': 1}
# Maps station names to (name, city). Used as exception list where our
# simple heuristcs doesn't work.
SPECIAL_NAMES = {
'Freienbach SOB, Bahnhof': ('Freienbach SOB', 'Freienbach'),
'Herrliberg-Feldmeilen,Bhf West': ('Bahnhof West', 'Herrliberg-Feldmeilen'),
'Neue Forch': ('Neue Forch', u'Z\u00fcrich'),
'O<NAME>': ('O<NAME>', 'Oberrieden'),
'Spital Zollikerberg': ('Spital', 'Zollikerberg'),
'Triemli': ('Triemli', u'Z\u00fcrich'),
'Zentrum Glatt': ('Zentrum Glatt', 'Wallisellen'),
}
# Cities whose names we want to prettify/correct at import time.
SPECIAL_CITIES = {
'Affoltern a. A.': 'Affoltern am Albis',
'Wangen b. D.': 'Wangen'
}
def read_csv(s, cols):
csv_dialect = csv.Sniffer().sniff(s[0])
reader = csv.reader(s, csv_dialect)
header = next(reader)
col_index = [-1] * len(cols)
for i in range(len(cols)):
if cols[i] in header:
col_index[i] = header.index(cols[i])
for row in reader:
result = [None] * len(cols)
for i in range(len(cols)):
ci = col_index[i]
if ci >= 0:
result[i] = row[ci].decode('iso-8859-1').strip()
yield result
def convert_c_h1903(x, y):
"Converts coordinates from the 1903 Swiss national grid system to WGS-84."
yb = (x - 600000.0) / 1e6;
xb = (y - 200000.0) / 1e6;
lam = 2.6779094 \
+ 4.728982 * yb \
+ 0.791484 * yb * xb \
+ 0.1306 * yb * xb * xb \
- 0.0436 * yb * yb * yb
phi = 16.9023892 \
+ 3.238372 * xb \
- 0.270978 * yb * yb \
- 0.002582 * xb * xb \
- 0.0447 * yb * yb * xb \
- 0.0140 * xb * xb * xb
return phi * 100.0 / 36.0, lam * 100.0 / 36.0
def encode_for_csv(x):
"Encodes one value for CSV."
k = x.encode('utf-8')
if ',' in k or '"' in k:
return '"%s"' % k.replace('"', '""')
else:
return k
def write_row(stream, values):
"writes one row of comma-separated values to stream."
stream.write(','.join([encode_for_csv(val) for val in values]))
stream.write('\n')
class Station:
pass
class Route:
pass
class Pattern:
pass
class Trip:
pass
# https://developers.google.com/transit/gtfs/
TYPE_TRAM = 0
TYPE_BUS = 3
class Divaimporter:
def __init__(self, coord_converter, drop_unadvertised_lines):
self.coord_converter = coord_converter
self.stations = {} # id --> Station
self.routes = {} # id --> Route
self.patterns = {} # id --> Pattern
self.services = {} # id --> [date, date, ...] (sorted)
self.pickup_type = {} # (trip_id, stop_seq) --> '0'=normal/'1'=no pickup
self.drop_off_type = {} # (trip_id, stop_seq) --> '0'/'1', '1'=no drop-off
self.trips = {} # id --> Trip
self.goodTrips = {}
self._drop_unadvertised_lines = drop_unadvertised_lines
@staticmethod
def demangle_name(name):
"Applies some simple heuristics to split names into (city, name)."
# Handle special cases where our heuristcs doesn't work.
# Example:"Triemli" --> ("Triemli", "Zurich").
if name in SPECIAL_NAMES:
return SPECIAL_NAMES[name]
# Expand abbreviations.
for abbrev, expanded in [('str.', 'strasse'),
('Schiffst.', 'Schiffstation')]:
suffix_pos = name.rfind(abbrev)
if suffix_pos > 0:
name = name[:suffix_pos] + expanded
# end for
names = name.split(", ", 1)
if len(names) == 2:
if names[1] in POI_TERMS:
nam = u'%s %s' % (names[0], names[1])
else:
nam = names[1]
city = names[0]
else:
# "Zurich Enge": First word of station name designates the city
nam = names[0]
city = nam.split(' ')[0]
return nam, SPECIAL_CITIES.get(city, city)
def import_feeds(self, inpath):
inzip = zipfile.ZipFile(inpath, mode="r")
read = lambda name, prefix="": (prefix + inzip.read(name)).splitlines()
# The advertised lines file has no column headers.
self.import_stations(read('rec_ort.mdv'), read('bedienendeLinien_google.csv',
"ORT_NR;LI_NR;;;;"))
self.import_routes(read('rec_lin_ber.mdv'))
self.import_patterns(read('lid_verlauf.mdv'))
self.import_services(read('tagesart_merkmal.mdv'),
read('firmenkalender.mdv'))
self.import_traffic_restrictions(read('vb_regio.mdv'))
self.import_boarding(read('bedverb.mdv'))
self.import_stop_times(read('lid_fahrzeitart.mdv'))
self.import_trips(read('rec_frt.mdv'))
def import_stations(self, station_file, adv_file):
"imports the rec_ort.mdv file."
for id, name, x, y, uic_code in \
read_csv(station_file, ['ORT_NR', 'ORT_NAME',
'ORT_POS_X', 'ORT_POS_Y', 'ORT_NR_NATIONAL']):
station = Station()
station.id = id
station.position = self.coord_converter(float(x), float(y))
station.uic_code = ''
if uic_code and len(uic_code) == 7 and uic_code[:2] == '85':
station.uic_code = uic_code
station.name, station.city = self.demangle_name(name)
station.country = 'CH'
station.url = 'http://fahrplan.zvv.ch/?to.0=' + \
urllib.quote(name.encode('iso-8859-1'))
station.advertised_lines = set()
self.stations[id] = station
for station_id, line_id in read_csv(adv_file, ['ORT_NR', 'LI_NR']):
if station_id in self.stations:
# Line ids in this file have leading zeroes, remove.
self.stations[station_id].advertised_lines.add(line_id.lstrip("0"))
else:
print("Warning, advertised lines file references " \
"unknown station, id " + station_id)
def import_routes(self, s):
"imports the rec_lin_ber.mdv file."
# the line id is really qualified with an area_id (BEREICH_NR), but the
# table of advertised lines does not include area. Fortunately, it seems
# that line ids are unique across all areas, so we can just throw it away.
for line_id, name in \
read_csv(s, ['LI_NR', 'LINIEN_BEZ_DRUCK']):
route = Route()
route.id = line_id
route.name = name
route.color = "FFFFFF"
route.color_text = "000000"
if name in TRAM_LINES:
route.type = TYPE_TRAM
route.color = TRAM_LINES[name][0]
route.color_text = TRAM_LINES[name][1]
else:
route.type = TYPE_BUS
if route.name[0:1] == "N":
route.color = "000000"
route.color_text = "FFFF00"
self.routes[route.id] = route
def import_patterns(self, s):
"imports the lid_verlauf.mdv file."
for line, strli, direction, seq, station_id in \
read_csv(s, ['LI_NR', 'STR_LI_VAR', 'LI_RI_NR', 'LI_LFD_NR', 'ORT_NR']):
pattern_id = u'Pat.%s.%s.%s' % (line, strli, direction)
pattern = self.patterns.get(pattern_id, None)
if not pattern:
pattern = Pattern()
pattern.id = pattern_id
pattern.stops = []
pattern.stoptimes = {}
self.patterns[pattern_id] = pattern
seq = int(seq) - 1
if len(pattern.stops) <= seq:
pattern.stops.extend([None] * (seq - len(pattern.stops) + 1))
pattern.stops[seq] = station_id
def import_boarding(self, drop_off_file):
"Reads the bedverb.mdv file."
for trip_id, seq, code in \
read_csv(drop_off_file, ['FRT_FID', 'LI_LFD_NR', 'BEDVERB_CODE']):
key = (trip_id, int(seq) - 1)
if code == 'A':
self.pickup_type[key] = '1' # '1' = no pick-up
elif code == 'E':
self.drop_off_type[key] = '1' # '1' = no drop-off
elif code == 'B':
# 'B' just means that rider needs to push a button to have the driver
# stop. We don't encode this for now.
pass
else:
raise ValueError('Unexpected code in bedverb.mdv; '
'FRT_FID=%s BEDVERB_CODE=%s' % (trip_id, code))
def import_services(self, daytype_file, days_file):
daytypes = {} # 'j06' --> {20060713:1, 20060714:1, ...}
schedules = {} # {'j06':1, 'p27':1}
for schedule, daytype, date in \
read_csv(days_file, ['FPL_KUERZEL', 'TAGESART_NR', 'BETRIEBSTAG']):
schedule = schedule.strip()
daytypes.setdefault('%s.%s' % (schedule, daytype), {})[int(date)] = 1
schedules[schedule] = 1
schedules = schedules.keys()
service_days = {} # 'Cj06.H9' --> {20060713:1, 20060714:1, ...}
for daytype, service_id in \
read_csv(daytype_file, ['TAGESART_NR', 'TAGESMERKMAL_NR']):
for schedule in schedules:
service = 'C%s.%s' % (schedule, service_id)
for date in daytypes['%s.%s' % (schedule, daytype)].iterkeys():
service_days.setdefault(service, {})[date] = 1
for k in service_days.iterkeys():
self.services[k] = service_days[k].keys()
self.services[k].sort()
def import_traffic_restrictions(self, restrictions_file):
"Reads the vb_regio.mdv file."
ParseDate = lambda x: datetime.date(int(x[:4]), int(x[4:6]), int(x[6:8]))
MonthNr = lambda x: int(x[:4]) * 12 + int(x[4:6])
for schedule, id, bitmask, start_date, end_date in \
read_csv(restrictions_file,
['FPL_KUERZEL', 'VB', 'VB_DATUM', 'DATUM_VON', 'DATUM_BIS']):
id = u"VB%s.%s" % (schedule, id)
bitmask = bitmask.strip()
dates = {}
# This is ugly as hell, I know. I briefly explain what I do:
# 8 characters in the bitmask equal a month ( 8 * 4bits = 32, no month has
# more than 31 days, so it's ok).
# Then I check if the current day of the month is in the bitmask (by
# shifting the bit by x days and comparing it to the bitmask).
# If so I calculate back what year month and actual day I am in
# (very disgusting) and mark that date...
for i in range(MonthNr(end_date) - MonthNr(start_date) + 1):
mask = int(bitmask[i * 8:i * 8 + 8], 16)
for d in range(32):
if 1 << d & mask:
year = int(start_date[0:4]) + ((int(start_date[4:6]) + i - 1)) / 12
month = ((int(start_date[4:6]) + i - 1) % 12) + 1
day = d + 1
cur_date = str(year) + ("0" + str(month))[-2:] + ("0" + str(day))[-2:]
dates[int(cur_date)] = 1
self.services[id] = dates.keys()
self.services[id].sort()
def import_stop_times(self, stoptimes_file):
"imports the lid_fahrzeitart.mdv file."
for line, strli, direction, seq, stoptime_id, drive_secs, wait_secs in \
read_csv(stoptimes_file,
['LI_NR', 'STR_LI_VAR', 'LI_RI_NR', 'LI_LFD_NR',
'FGR_NR', 'FZT_REL', 'HZEIT']):
pattern = self.patterns[u'Pat.%s.%s.%s' % (line, strli, direction)]
stoptimes = pattern.stoptimes.setdefault(stoptime_id, [])
seq = int(seq) - 1
drive_secs = int(drive_secs)
wait_secs = int(wait_secs)
assert len(stoptimes) == seq # fails if seq not in order
stoptimes.append((drive_secs, wait_secs))
def import_trips(self, trips_file):
"imports the rec_frt.mdv file."
for trip_id, trip_starttime, line, strli, direction, \
stoptime_id, schedule_id, daytype_id, restriction_id, \
dest_station_id, dest_stop_id, trip_type in \
read_csv(trips_file,
['FRT_FID', 'FRT_START', 'LI_NR', 'STR_LI_VAR', 'LI_RI_NR',
'FGR_NR', 'FPL_KUERZEL', 'TAGESMERKMAL_NR', 'VB',
'FRT_HP_AUS', 'HALTEPUNKT_NR_ZIEL', 'FAHRTART_NR']):
if trip_type != '1':
print("skipping Trip ", trip_id, line, direction, \
dest_station_id, trip_type)
continue # 1=normal, 2=empty, 3=from depot, 4=to depot, 5=other
trip = Trip()
# The trip_id (FRT_FID) field is not unique in the vbz data, as of Dec 2009
# to prevent overwritingimported trips when we key them by trip.id
# we should make trip.id unique, by combining trip_id and line
trip.id = ("%s_%s") % (trip_id, line)
trip.starttime = int(trip_starttime)
trip.route = self.routes[line]
dest_station = self.stations[dest_station_id]
pattern_id = u'Pat.%s.%s.%s' % (line, strli, direction)
trip.pattern = self.patterns[pattern_id]
trip.stoptimes = trip.pattern.stoptimes[stoptime_id]
if restriction_id:
service_id = u'VB%s.%s' % (schedule_id, restriction_id)
else:
service_id = u'C%s.%s' % (schedule_id, daytype_id)
trip.service_id = service_id
assert len(self.services[service_id]) > 0
assert not trip.id in self.trips
self.trips[trip.id] = trip
def write(self, outpath):
"writes a .zip file in Google Transit format."
out = zipfile.ZipFile(outpath, mode="w", compression=zipfile.ZIP_DEFLATED)
for filename, func in [('agency.txt', self.write_agency),
('calendar.txt', self.write_calendar),
('calendar_dates.txt', self.write_calendarDates),
('routes.txt', self.write_routes),
('trips.txt', self.write_trips),
('stops.txt', self.write_stations),
('stop_times.txt', self.write_stop_times)]:
s = cStringIO.StringIO()
func(s)
out.writestr(filename, s.getvalue())
out.close()
@staticmethod
def write_agency(out):
out.write('agency_name,agency_url,agency_lang,agency_timezone\n')
out.write('VBZ,http://www.vbz.ch/,de,Europe/Zurich\n')
def write_routes(self, out):
out.write('route_id,route_short_name,route_long_name,route_type,'
'route_color,route_text_color\n')
k = [(r.id, r) for r in self.routes.itervalues()]
k.sort()
for id, route in k:
name = encode_for_csv(route.name)
out.write('%s,%s,%s,%s,%s,%s\n' % (
id, name, name, route.type, route.color, route.color_text))
def write_stations(self, out):
out.write('stop_id,stop_uic_code,stop_name,stop_city,stop_country,'
'stop_lat,stop_lon,stop_url\n')
stations = [(s.id, s) for s in self.stations.itervalues()]
stations.sort()
for id, s in stations:
write_row(out,
[id, s.uic_code, s.name, s.city, s.country,
str(s.position[0]), str(s.position[1]), s.url])
def write_calendar(self, out):
out.write('service_id,monday,tuesday,wednesday,thursday,'
'friday,saturday,sunday,start_date,end_date\n')
for service_id, service in self.services.iteritems():
out.write('%s,0,0,0,0,0,0,0,%d,%d\n' %
(encode_for_csv(service_id), service[0], service[-1]))
def write_calendarDates(self, out):
out.write('service_id,date,exception_type\n')
for service_id, service in self.services.iteritems():
encoded_service_id = encode_for_csv(service_id)
for date in service:
out.write('%s,%d,1\n' % (encoded_service_id, date))
def write_trips(self, out):
out.write('trip_id,route_id,service_id,trip_headsign\n')
trips = [(t.id, t) for t in self.trips.itervalues()]
trips.sort()
for (trip_id, trip) in trips:
if (not len(trip.pattern.stops)) or (None in trip.pattern.stops):
print("*** Skipping bad trip: ", [trip.id])
continue
self.goodTrips[trip_id] = True
headsign = self.stations[trip.pattern.stops[-1]].name
write_row(out, [trip.id, trip.route.id, trip.service_id, headsign])
@staticmethod
def format_time(t):
return "%02d:%02d:%02d" % (t / 3600, (t % 3600) / 60, t % 60)
def write_stop_times(self, out):
out.write('trip_id,stop_sequence,stop_id,arrival_time,departure_time,'
'pickup_type,drop_off_type\n')
trips = [(t.id, t) for t in self.trips.itervalues()]
trips.sort()
for (trip_id, trip) in trips:
if trip_id not in self.goodTrips:
continue
assert len(trip.stoptimes) == len(trip.pattern.stops)
time = trip.starttime
for seq in range(len(trip.stoptimes)):
drive_time, wait_time = trip.stoptimes[seq]
time += drive_time
station = self.stations[trip.pattern.stops[seq]]
if not self._drop_unadvertised_lines or \
trip.route.id in station.advertised_lines:
write_row(out, [trip.id, str(seq + 1), station.id,
self.format_time(time),
self.format_time(time + wait_time),
self.pickup_type.get((trip.id, seq), '0'),
self.drop_off_type.get((trip.id, seq), '0')])
time += wait_time
def main(argv):
# It's hard to replicate the old behavior of --drop_unadvertised_lines, so we
# don't. Instead, there are only two options without arguments:
# nothing drop
# --nodrop_unadvertised_lines do not drop
# --drop_unadvertised_lines drop
opt_parser = optparse.OptionParser()
# drop_unadvertised_lines: Only export the departures of lines that
# are advertised at the station in question. This is used to remove
# depot trips etc, to not confuse the data in schedule bubbles. Use
# --nodrop_unadvertised_lines to disable that.
opt_parser.add_option('--drop_unadvertised_lines', action='store_true',
dest='drop_unadvertised_lines', default=True)
opt_parser.add_option('--nodrop_unadvertised_lines', action='store_false',
dest='drop_unadvertised_lines')
opt_parser.add_option('--in_file', action='store', type='string')
opt_parser.add_option('--out_file', action='store', type='string')
options, unused_arguments = opt_parser.parse_args(argv[1:])
if options.in_file is None:
raise SystemExit('Please provide a value to the --in_file flag.')
if options.out_file is None:
raise SystemExit('Please provide a value to the --out_file flag.')
importer = Divaimporter(convert_c_h1903, options.drop_unadvertised_lines)
importer.Import(options.in_file)
importer.write(options.out_file)
print('Wrote output to', options.out_file)
if __name__ == '__main__':
main(sys.argv)
| 2.25 | 2 |
modules/documents.py | rotsee/protokollen | 4 | 2125 | # -*- coding: utf-8 -*-
"""This module contains classes for documents, and lists of documents.
Documents are defined by the document rules in settings.py
A file can contain one or more document. However, a document can
not be constructed from more than one file. This is a limitation,
obvious in cases like Gotlands kommun, where meeting minutes are
split up in a large number of files.
"""
import settings
from modules.utils import make_unicode, last_index
from modules.extractors.documentBase import ExtractionNotAllowed
document_headers = {
"Content-Type": "text/plain",
"Content-Disposition": "attachment",
"Cache-Control": "public"
}
class DocumentList(object):
"""Contains a list of documents, extracted from a file.
"""
def __init__(self, extractor):
"""Create a list of documents, using `extractor`
"""
self._documents = []
page_types_and_dates = []
"""Keep track of documents by type and date, to be able to merge
documents depending on `settings.document_type_settings`
"""
# Loop through pages, and add pages of the same type and date together
last_page_type = None
last_page_date = None
documents = []
try:
for page in extractor.get_next_page():
temp_doc = Document(page, extractor)
if (len(documents) > 0 and
temp_doc.type_ == last_page_type and
temp_doc.date == last_page_date):
documents[-1].merge_with(temp_doc)
else:
documents.append(temp_doc)
page_types_and_dates.append((temp_doc.type_, temp_doc.date))
last_page_type = temp_doc.type_
last_page_date = temp_doc.date
except ExtractionNotAllowed:
raise ExtractionNotAllowed
# merge documents, if disallow_infixes == True
doc_settings = settings.document_type_settings
disallow_infixes = [d for d in doc_settings
if doc_settings[d]["disallow_infixes"] is True]
"""Document types that disallow holes"""
num_docs = len(page_types_and_dates)
i = 0
while i < num_docs:
(type_, date) = page_types_and_dates[i]
last_match = last_index(page_types_and_dates, (type_, date))
if type_ in disallow_infixes and last_match > i:
num_docs_to_merge = last_match - i + 1
new_doc = documents.pop(0)
for j in range(i, last_match):
new_doc.merge_with(documents.pop(0))
self._documents.append(new_doc)
i += num_docs_to_merge
else:
doc_to_merge = documents.pop(0)
self._documents.append(doc_to_merge)
i += 1
def get_next_document(self):
for document in self._documents:
yield document
def __len__(self):
"""len is the number of documents"""
return len(self._documents)
class Document(object):
"""Represents a single document
"""
text = ""
header = ""
date = None
type_ = None
def __init__(self, page, extractor):
"""Create a document stub from a page. Use add_page
to keep extending this document.
"""
self.text = page.get_text()
self.header = page.get_header() or extractor.get_header()
self.date = page.get_date() or extractor.get_date()
self.type_ = self.get_document_type()
self.date = page.get_date() or extractor.get_date()
def append_page(self, page):
"""Append content from a page to this document.
"""
pass
def append_text(self, text):
"""Append content to this document.
"""
self.text += text
def merge_with(self, document):
"""Merge this document with another one"""
try:
self.text += document.text
except UnicodeDecodeError:
self.text = make_unicode(self.text) + make_unicode(document.text)
def __len__(self):
"""len is the length of the total plaintext"""
return len(self.text)
def get_document_type(self):
"""
Return the first matching document type, based on this
header text.
"""
for document_type in settings.document_rules:
if self.parse_rules(document_type[1], self.header):
return document_type[0]
return None
def parse_rules(self, tuple_, header):
"""Parse document rules. See settings.py for syntax"""
rule_key = tuple_[0].upper()
rule_val = tuple_[1]
header = header.upper()
# --------- Logical separators --------
if rule_key == "AND":
hit = True
for rule in rule_val:
hit = hit and self.parse_rules(rule, header)
return hit
elif rule_key == "OR":
hit = False
for rule in rule_val:
hit = hit or self.parse_rules(rule, header)
return hit
elif rule_key == "NOT":
hit = not self.parse_rules(rule_val, header)
return hit
# -------------- Rules ----------------
elif rule_key == "HEADER_CONTAINS":
try:
pos = make_unicode(header).find(rule_val.upper())
except UnicodeDecodeError:
pos = -1
return pos > -1
if __name__ == "__main__":
print "This module is only intended to be called from other scripts."
import sys
sys.exit()
| 2.796875 | 3 |
tools/amp_segment/ina_speech_segmenter.py | saratkumar/galaxy | 1 | 2126 | #!/usr/bin/env python3
import os
import os.path
import shutil
import subprocess
import sys
import tempfile
import uuid
import mgm_utils
def main():
(root_dir, input_file, json_file) = sys.argv[1:4]
tmpName = str(uuid.uuid4())
tmpdir = "/tmp"
temp_input_file = f"{tmpdir}/{tmpName}.dat"
temp_output_file = f"{tmpdir}/{tmpName}.json"
shutil.copy(input_file, temp_input_file)
sif = mgm_utils.get_sif_dir(root_dir) + "/ina_segmentation.sif"
r = subprocess.run(["singularity", "run", sif, temp_input_file, temp_output_file])
shutil.copy(temp_output_file, json_file)
if os.path.exists(temp_input_file):
os.remove(temp_input_file)
if os.path.exists(temp_output_file):
os.remove(temp_output_file)
exit(r.returncode)
if __name__ == "__main__":
main()
| 2.234375 | 2 |
csat/django/fields.py | GaretJax/csat | 0 | 2127 | <filename>csat/django/fields.py<gh_stars>0
from lxml import etree
from django import forms
from django.db import models
class XMLFileField(models.FileField):
def __init__(self, *args, **kwargs):
self.schema = kwargs.pop('schema')
super(XMLFileField, self).__init__(*args, **kwargs)
def clean(self, *args, **kwargs):
data = super(XMLFileField, self).clean(*args, **kwargs)
with data as fh:
doc = etree.parse(fh)
with open(self.schema) as fh:
schema = etree.XMLSchema(etree.parse(fh))
if not schema.validate(doc):
raise forms.ValidationError('The XML file failed to validate '
'against the supplied schema.')
return data
| 2.296875 | 2 |
keras_cv_attention_models/yolox/yolox.py | RishabhSehgal/keras_cv_attention_models | 0 | 2128 | import tensorflow as tf
from tensorflow import keras
from keras_cv_attention_models.attention_layers import (
activation_by_name,
batchnorm_with_activation,
conv2d_no_bias,
depthwise_conv2d_no_bias,
add_pre_post_process,
)
from keras_cv_attention_models import model_surgery
from keras_cv_attention_models.download_and_load import reload_model_weights
from keras_cv_attention_models.coco.eval_func import DecodePredictions
PRETRAINED_DICT = {
"yolox_nano": {"coco": "7c97d60d4cc9d54321176f844acee627"},
"yolox_tiny": {"coco": "f9b51ff24290090c86a10a45f811140b"},
"yolox_s": {"coco": "a989f5a808ddc4a8242157a6a3e64977"},
"yolox_m": {"coco": "5c2333d2f12b2f48e3ec8555b29d242f"},
"yolox_l": {"coco": "a07c48994b7a67dba421025ef39b858b"},
"yolox_x": {"coco": "de9741d3f67f50c54856bcae0f07b7ef"},
}
""" CSPDarknet backbone """
BATCH_NORM_EPSILON = 1e-3
BATCH_NORM_MOMENTUM = 0.03
def conv_dw_pw_block(inputs, filters, kernel_size=1, strides=1, use_depthwise_conv=False, activation="swish", name=""):
nn = inputs
if use_depthwise_conv:
nn = depthwise_conv2d_no_bias(nn, kernel_size, strides, padding="SAME", name=name)
nn = batchnorm_with_activation(nn, activation=activation, epsilon=BATCH_NORM_EPSILON, momentum=BATCH_NORM_MOMENTUM, name=name + "dw_")
kernel_size, strides = 1, 1
nn = conv2d_no_bias(nn, filters, kernel_size, strides, padding="SAME", name=name)
nn = batchnorm_with_activation(nn, activation=activation, epsilon=BATCH_NORM_EPSILON, momentum=BATCH_NORM_MOMENTUM, name=name)
return nn
def csp_block(inputs, expansion=0.5, use_shortcut=True, use_depthwise_conv=False, activation="swish", name=""):
input_channels = inputs.shape[-1]
nn = conv_dw_pw_block(inputs, int(input_channels * expansion), activation=activation, name=name + "1_")
nn = conv_dw_pw_block(nn, input_channels, kernel_size=3, strides=1, use_depthwise_conv=use_depthwise_conv, activation=activation, name=name + "2_")
if use_shortcut:
nn = keras.layers.Add()([inputs, nn])
return nn
def csp_stack(inputs, depth, out_channels=-1, expansion=0.5, use_shortcut=True, use_depthwise_conv=False, activation="swish", name=""):
out_channels = inputs.shape[-1] if out_channels == -1 else out_channels
hidden_channels = int(out_channels * expansion)
short = conv_dw_pw_block(inputs, hidden_channels, kernel_size=1, activation=activation, name=name + "short_")
deep = conv_dw_pw_block(inputs, hidden_channels, kernel_size=1, activation=activation, name=name + "deep_")
for id in range(depth):
block_name = name + "block{}_".format(id + 1)
deep = csp_block(deep, 1, use_shortcut=use_shortcut, use_depthwise_conv=use_depthwise_conv, activation=activation, name=block_name)
out = tf.concat([deep, short], axis=-1)
out = conv_dw_pw_block(out, out_channels, kernel_size=1, activation=activation, name=name + "output_")
return out
def spatial_pyramid_pooling(inputs, pool_sizes=(5, 9, 13), activation="swish", name=""):
input_channels = inputs.shape[-1]
nn = conv_dw_pw_block(inputs, input_channels // 2, kernel_size=1, activation=activation, name=name + "1_")
pp = [keras.layers.MaxPooling2D(pool_size=ii, strides=1, padding="SAME")(nn) for ii in pool_sizes]
nn = tf.concat([nn, *pp], axis=-1)
nn = conv_dw_pw_block(nn, input_channels, kernel_size=1, activation=activation, name=name + "2_")
return nn
def focus_stem(inputs, filters, kernel_size=3, strides=1, padding="valid", activation="swish", name=""):
if padding.lower() == "same": # Handling odd input_shape
inputs = tf.pad(inputs, [[0, 0], [0, 1], [0, 1], [0, 0]])
patch_top_left = inputs[:, :-1:2, :-1:2]
patch_top_right = inputs[:, :-1:2, 1::2]
patch_bottom_left = inputs[:, fc00:db20:35b:7399::5, :-1:2]
patch_bottom_right = inputs[:, fc00:db20:35b:7399::5, 1::2]
else:
patch_top_left = inputs[:, fdf8:f53e:61e4::18, ::2]
patch_top_right = inputs[:, fdf8:f53e:61e4::18, 1::2]
patch_bottom_left = inputs[:, fc00:db20:35b:7399::5, ::2]
patch_bottom_right = inputs[:, fc00:db20:35b:7399::5, 1::2]
nn = tf.concat([patch_top_left, patch_bottom_left, patch_top_right, patch_bottom_right], axis=-1)
nn = conv_dw_pw_block(nn, filters, kernel_size=kernel_size, strides=strides, activation=activation, name=name)
return nn
def CSPDarknet(width_mul=1, depth_mul=1, out_features=[-3, -2, -1], use_depthwise_conv=False, input_shape=(512, 512, 3), activation="swish", model_name=""):
base_channels, base_depth = int(width_mul * 64), max(round(depth_mul * 3), 1)
inputs = keras.layers.Input(input_shape)
""" Stem """
nn = focus_stem(inputs, base_channels, activation=activation, name="stem_")
features = [nn]
""" dark blocks """
depthes = [base_depth, base_depth * 3, base_depth * 3, base_depth]
channels = [base_channels * 2, base_channels * 4, base_channels * 8, base_channels * 16]
use_spps = [False, False, False, True]
use_shortcuts = [True, True, True, False]
for id, (channel, depth, use_spp, use_shortcut) in enumerate(zip(channels, depthes, use_spps, use_shortcuts)):
stack_name = "stack{}_".format(id + 1)
nn = conv_dw_pw_block(nn, channel, kernel_size=3, strides=2, use_depthwise_conv=use_depthwise_conv, activation=activation, name=stack_name)
if use_spp:
nn = spatial_pyramid_pooling(nn, activation=activation, name=stack_name + "spp_")
# nn = SPPBottleneck(base_channels * 16, base_channels * 16, activation=act)
nn = csp_stack(nn, depth, use_shortcut=use_shortcut, use_depthwise_conv=use_depthwise_conv, activation=activation, name=stack_name)
features.append(nn)
nn = [features[ii] for ii in out_features]
model = keras.models.Model(inputs, nn, name=model_name)
return model
""" path aggregation fpn """
def upsample_merge(inputs, csp_depth, use_depthwise_conv=False, activation="swish", name=""):
# print(f">>>> upsample_merge inputs: {[ii.shape for ii in inputs] = }")
target_channel = inputs[-1].shape[-1]
fpn_out = conv_dw_pw_block(inputs[0], target_channel, activation=activation, name=name + "fpn_")
# inputs[0] = keras.layers.UpSampling2D(size=(2, 2), interpolation="nearest", name=name + "up")(fpn_out)
inputs[0] = tf.image.resize(fpn_out, tf.shape(inputs[-1])[1:-1], method="nearest")
nn = tf.concat(inputs, axis=-1)
nn = csp_stack(nn, csp_depth, target_channel, 0.5, False, use_depthwise_conv, activation=activation, name=name)
return fpn_out, nn
def downsample_merge(inputs, csp_depth, use_depthwise_conv=False, activation="swish", name=""):
# print(f">>>> downsample_merge inputs: {[ii.shape for ii in inputs] = }")
inputs[0] = conv_dw_pw_block(inputs[0], inputs[-1].shape[-1], 3, 2, use_depthwise_conv, activation=activation, name=name + "down_")
nn = tf.concat(inputs, axis=-1)
nn = csp_stack(nn, csp_depth, nn.shape[-1], 0.5, False, use_depthwise_conv, activation=activation, name=name)
return nn
def path_aggregation_fpn(features, depth_mul=1, use_depthwise_conv=False, activation="swish", name=""):
# p5 ─> fpn_out0 ───────────> pan_out0
# ↓ ↑
# p4 ─> f_out0 ─> fpn_out1 ─> pan_out1
# ↓ ↑
# p3 ───────────> pan_out2 ──────┘
csp_depth = max(round(depth_mul * 3), 1)
p3, p4, p5 = features # p3: [64, 64, 256], p4: [32, 32, 512], p5: [16, 16, 1024]
# fpn_out0: [16, 16, 512], f_out0: [32, 32, 512]
fpn_out0, f_out0 = upsample_merge([p5, p4], csp_depth, use_depthwise_conv=use_depthwise_conv, activation=activation, name=name + "c3p4_")
# fpn_out1: [32, 32, 256], pan_out2: [64, 64, 256]
fpn_out1, pan_out2 = upsample_merge([f_out0, p3], csp_depth, use_depthwise_conv=use_depthwise_conv, activation=activation, name=name + "c3p3_")
# pan_out1: [32, 32, 512]
pan_out1 = downsample_merge([pan_out2, fpn_out1], csp_depth, use_depthwise_conv=use_depthwise_conv, activation=activation, name=name + "c3n3_")
# pan_out0: [16, 16, 1024]
pan_out0 = downsample_merge([pan_out1, fpn_out0], csp_depth, use_depthwise_conv=use_depthwise_conv, activation=activation, name=name + "c3n4_")
return [pan_out2, pan_out1, pan_out0]
""" YOLOXHead """
def yolox_head_single(inputs, out_channels, num_classes=80, num_anchors=1, use_depthwise_conv=False, use_object_scores=True, activation="swish", name=""):
bias_init = tf.constant_initializer(-tf.math.log((1 - 0.01) / 0.01).numpy())
# stem
stem = conv_dw_pw_block(inputs, out_channels, activation=activation, name=name + "stem_")
# cls_convs, cls_preds
cls_nn = conv_dw_pw_block(stem, out_channels, kernel_size=3, use_depthwise_conv=use_depthwise_conv, activation=activation, name=name + "cls_1_")
cls_nn = conv_dw_pw_block(cls_nn, out_channels, kernel_size=3, use_depthwise_conv=use_depthwise_conv, activation=activation, name=name + "cls_2_")
cls_out = keras.layers.Conv2D(num_classes * num_anchors, kernel_size=1, bias_initializer=bias_init, name=name + "class_out")(cls_nn)
cls_out = activation_by_name(cls_out, "sigmoid", name=name + "class_out_")
cls_out = keras.layers.Reshape([-1, num_classes], name=name + "class_out_reshape")(cls_out)
# reg_convs, reg_preds
reg_nn = conv_dw_pw_block(stem, out_channels, kernel_size=3, use_depthwise_conv=use_depthwise_conv, activation=activation, name=name + "reg_1_")
reg_nn = conv_dw_pw_block(reg_nn, out_channels, kernel_size=3, use_depthwise_conv=use_depthwise_conv, activation=activation, name=name + "reg_2_")
reg_out = keras.layers.Conv2D(4 * num_anchors, kernel_size=1, name=name + "regression_out")(reg_nn)
reg_out = keras.layers.Reshape([-1, 4], name=name + "regression_out_reshape")(reg_out)
# obj_preds
if use_object_scores:
obj_out = keras.layers.Conv2D(1 * num_anchors, kernel_size=1, bias_initializer=bias_init, name=name + "object_out")(reg_nn)
obj_out = activation_by_name(obj_out, "sigmoid", name=name + "object_out_")
obj_out = keras.layers.Reshape([-1, 1], name=name + "object_out_reshape")(obj_out)
return tf.concat([reg_out, cls_out, obj_out], axis=-1)
else:
return tf.concat([reg_out, cls_out], axis=-1)
def yolox_head(inputs, width_mul=1.0, num_classes=80, num_anchors=1, use_depthwise_conv=False, use_object_scores=True, activation="swish", name=""):
out_channel = int(256 * width_mul)
outputs = []
for id, input in enumerate(inputs):
cur_name = name + "{}_".format(id + 1)
out = yolox_head_single(input, out_channel, num_classes, num_anchors, use_depthwise_conv, use_object_scores, activation=activation, name=cur_name)
outputs.append(out)
# outputs = tf.concat([keras.layers.Reshape([-1, ii.shape[-1]])(ii) for ii in outputs], axis=1)
outputs = tf.concat(outputs, axis=1)
return outputs
""" YOLOX models """
def YOLOX(
backbone=None,
features_pick=[-3, -2, -1],
depth_mul=1,
width_mul=-1, # -1 means: `min([ii.shape[-1] for ii in features]) / 256` for custom backbones.
use_depthwise_conv=False,
use_anchor_free_mode=True,
num_anchors="auto", # "auto" means 1 if use_anchor_free_mode else 9
use_object_scores="auto", # "auto" means same with use_anchor_free_mode
input_shape=(640, 640, 3),
num_classes=80,
activation="swish",
freeze_backbone=False,
pretrained=None,
model_name="yolox",
pyramid_levels_min=3, # Init anchors for model prediction.
anchor_scale="auto", # Init anchors for model prediction. "auto" means 1 if use_anchor_free_mode else 4
rescale_mode="raw", # For decode predictions, raw means input value in range [0, 255].
kwargs=None, # Not using, recieving parameter
):
if backbone is None:
width_mul = width_mul if width_mul > 0 else 1
backbone = CSPDarknet(width_mul, depth_mul, features_pick, use_depthwise_conv, input_shape, activation=activation, model_name="darknet")
features = backbone.outputs
else:
if isinstance(features_pick[0], str):
features = [backbone.get_layer(layer_name) for layer_name in features_pick]
else:
features = model_surgery.get_pyramide_feture_layers(backbone)
features = [features[id] for id in features_pick]
print(">>>> features:", {ii.name: ii.output_shape for ii in features})
features = [ii.output for ii in features]
width_mul = width_mul if width_mul > 0 else min([ii.shape[-1] for ii in features]) / 256
print(">>>> width_mul:", width_mul)
if freeze_backbone:
backbone.trainable = False
else:
backbone.trainable = True
inputs = backbone.inputs[0]
use_object_scores = use_anchor_free_mode if use_object_scores == "auto" else use_object_scores
num_anchors = (1 if use_anchor_free_mode else 9) if num_anchors == "auto" else num_anchors
fpn_features = path_aggregation_fpn(features, depth_mul=depth_mul, use_depthwise_conv=use_depthwise_conv, activation=activation, name="pafpn_")
outputs = yolox_head(fpn_features, width_mul, num_classes, num_anchors, use_depthwise_conv, use_object_scores, activation=activation, name="head_")
outputs = keras.layers.Activation("linear", dtype="float32", name="outputs_fp32")(outputs)
model = keras.models.Model(inputs, outputs, name=model_name)
reload_model_weights(model, PRETRAINED_DICT, "yolox", pretrained)
# AA = {"aspect_ratios": anchor_aspect_ratios, "num_scales": anchor_num_scales, "anchor_scale": anchor_scale, "grid_zero_start": anchor_grid_zero_start}
pyramid_levels = [pyramid_levels_min, pyramid_levels_min + len(features_pick) - 1] # -> [3, 5]
anchor_scale = (1 if use_anchor_free_mode else 4) if anchor_scale == "auto" else anchor_scale
post_process = DecodePredictions(backbone.input_shape[1:], pyramid_levels, anchor_scale, use_anchor_free_mode, use_object_scores)
add_pre_post_process(model, rescale_mode=rescale_mode, post_process=post_process)
return model
def YOLOXNano(input_shape=(416, 416, 3), freeze_backbone=False, num_classes=80, backbone=None, activation="swish", pretrained="coco", **kwargs):
return YOLOX(**locals(), depth_mul=0.33, width_mul=0.25, use_depthwise_conv=True, model_name=kwargs.pop("model_name", "yolox_nano"), **kwargs)
def YOLOXTiny(input_shape=(416, 416, 3), freeze_backbone=False, num_classes=80, backbone=None, activation="swish", pretrained="coco", **kwargs):
return YOLOX(**locals(), depth_mul=0.33, width_mul=0.375, model_name=kwargs.pop("model_name", "yolox_tiny"), **kwargs)
def YOLOXS(input_shape=(640, 640, 3), freeze_backbone=False, num_classes=80, backbone=None, activation="swish", pretrained="coco", **kwargs):
return YOLOX(**locals(), depth_mul=0.33, width_mul=0.5, model_name=kwargs.pop("model_name", "yolox_s"), **kwargs)
def YOLOXM(input_shape=(640, 640, 3), freeze_backbone=False, num_classes=80, backbone=None, activation="swish", pretrained="coco", **kwargs):
return YOLOX(**locals(), depth_mul=0.67, width_mul=0.75, model_name=kwargs.pop("model_name", "yolox_m"), **kwargs)
def YOLOXL(input_shape=(640, 640, 3), freeze_backbone=False, num_classes=80, backbone=None, activation="swish", pretrained="coco", **kwargs):
return YOLOX(**locals(), depth_mul=1.0, width_mul=1.0, model_name=kwargs.pop("model_name", "yolox_l"), **kwargs)
def YOLOXX(input_shape=(640, 640, 3), freeze_backbone=False, num_classes=80, backbone=None, activation="swish", pretrained="coco", **kwargs):
return YOLOX(**locals(), depth_mul=1.33, width_mul=1.25, model_name=kwargs.pop("model_name", "yolox_x"), **kwargs)
| 1.96875 | 2 |
robot-server/tests/service/json_api/test_response.py | mrod0101/opentrons | 0 | 2129 | <filename>robot-server/tests/service/json_api/test_response.py
from pytest import raises
from pydantic import ValidationError
from robot_server.service.json_api.response import (
ResponseDataModel,
ResponseModel,
MultiResponseModel,
)
from tests.service.helpers import ItemResponseModel
def test_attributes_as_dict() -> None:
MyResponse = ResponseModel[ResponseDataModel, None]
obj_to_validate = {
"data": {"id": "123"},
"links": None,
}
my_response_object = MyResponse(**obj_to_validate)
assert my_response_object.dict() == {
"links": None,
"data": {
"id": "123",
},
}
def test_attributes_as_item_model() -> None:
ItemResponse = ResponseModel[ItemResponseModel, None]
obj_to_validate = {
"links": None,
"data": {"id": "123", "name": "apple", "quantity": 10, "price": 1.20},
}
my_response_obj = ItemResponse(**obj_to_validate)
assert my_response_obj.dict() == {
"links": None,
"data": {
"id": "123",
"name": "apple",
"quantity": 10,
"price": 1.20,
},
}
def test_list_item_model() -> None:
ItemResponse = MultiResponseModel[ItemResponseModel, None]
obj_to_validate = {
"links": None,
"data": [
{"id": "123", "name": "apple", "quantity": 10, "price": 1.20},
{"id": "321", "name": "banana", "quantity": 20, "price": 2.34},
],
}
my_response_obj = ItemResponse(**obj_to_validate)
assert my_response_obj.dict() == {
"links": None,
"data": [
{
"id": "123",
"name": "apple",
"quantity": 10,
"price": 1.20,
},
{
"id": "321",
"name": "banana",
"quantity": 20,
"price": 2.34,
},
],
}
def test_attributes_as_item_model_empty_dict() -> None:
ItemResponse = ResponseModel[ItemResponseModel, None]
obj_to_validate = {
"links": None,
"data": {
"id": "123",
},
}
with raises(ValidationError) as e:
ItemResponse(**obj_to_validate)
assert e.value.errors() == [
{
"loc": ("data", "name"),
"msg": "field required",
"type": "value_error.missing",
},
{
"loc": ("data", "quantity"),
"msg": "field required",
"type": "value_error.missing",
},
{
"loc": ("data", "price"),
"msg": "field required",
"type": "value_error.missing",
},
]
def test_response_constructed_with_resource_object() -> None:
ItemResponse = ResponseModel[ItemResponseModel, None]
item = ItemResponseModel(id="abc123", name="pear", price=1.2, quantity=10)
data = item.dict()
assert ItemResponse(data=data, links=None).dict() == {
"links": None,
"data": {
"id": "abc123",
"name": "pear",
"price": 1.2,
"quantity": 10,
},
}
def test_response_constructed_with_resource_object_list() -> None:
ItemResponse = MultiResponseModel[ItemResponseModel, None]
items = [
ItemResponseModel(id="1", name="apple", price=1.5, quantity=3),
ItemResponseModel(id="2", name="pear", price=1.2, quantity=10),
ItemResponseModel(id="3", name="orange", price=2.2, quantity=5),
]
response = ItemResponse(data=items, links=None)
assert response.dict() == {
"links": None,
"data": [
{
"id": "1",
"name": "apple",
"price": 1.5,
"quantity": 3,
},
{
"id": "2",
"name": "pear",
"price": 1.2,
"quantity": 10,
},
{
"id": "3",
"name": "orange",
"price": 2.2,
"quantity": 5,
},
],
}
| 2.578125 | 3 |
stickmanZ/__main__.py | MichaelMcFarland98/cse210-project | 1 | 2130 | <filename>stickmanZ/__main__.py
from game.game_view import GameView
from game.menu_view import menu_view
from game import constants
import arcade
SCREEN_WIDTH = constants.SCREEN_WIDTH
SCREEN_HEIGHT = constants.SCREEN_HEIGHT
SCREEN_TITLE = constants.SCREEN_TITLE
window = arcade.Window(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
start_view = menu_view()
window.show_view(start_view)
arcade.run()
| 2.078125 | 2 |
neutron/db/migration/alembic_migrations/versions/mitaka/contract/c6c112992c9_rbac_qos_policy.py | congnt95/neutron | 1,080 | 2131 | <gh_stars>1000+
# Copyright 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from alembic import op
from oslo_utils import uuidutils
import sqlalchemy as sa
from neutron.db import rbac_db_models
"""rbac_qos_policy
Revision ID: c6c112992c9
Revises: <PASSWORD>
Create Date: 2015-11-25 18:45:03.831359
"""
# revision identifiers, used by Alembic.
revision = 'c6c112992c9'
down_revision = 'e3278ee65050'
depends_on = ('15e43b934f81',)
qos_rbacs = sa.Table(
'qospolicyrbacs', sa.MetaData(),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('tenant_id', sa.String(length=255),
nullable=True),
sa.Column('target_tenant', sa.String(length=255),
nullable=False),
sa.Column('action', sa.String(length=255), nullable=False),
sa.Column('object_id', sa.String(length=36), nullable=False))
# A simple model of the qos_policies table with only the fields needed for
# the migration.
qos_policy = sa.Table('qos_policies', sa.MetaData(),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('tenant_id',
sa.String(length=255)),
sa.Column('shared', sa.Boolean(), nullable=False))
def upgrade():
op.bulk_insert(qos_rbacs, get_values())
op.drop_column('qos_policies', 'shared')
def get_values():
session = sa.orm.Session(bind=op.get_bind())
values = []
for row in session.query(qos_policy).filter(qos_policy.c.shared).all():
values.append({'id': uuidutils.generate_uuid(), 'object_id': row[0],
'tenant_id': row[1], 'target_tenant': '*',
'action': rbac_db_models.ACCESS_SHARED})
session.commit()
return values
| 1.4375 | 1 |
chapter5/ch5_gcp_subscriber.py | ericchou1/network-devops-kafka-up-and-running | 1 | 2132 | from concurrent.futures import TimeoutError
from google.cloud import pubsub_v1
project_id = "pubsub-testing-331300"
subscription_id = "test-sub"
# Number of seconds the subscriber should listen for messages
timeout = 5.0
subscriber = pubsub_v1.SubscriberClient()
# The `subscription_path` method creates a fully qualified identifier
# in the form `projects/{project_id}/subscriptions/{subscription_id}`
subscription_path = subscriber.subscription_path(project_id, subscription_id)
def callback(message: pubsub_v1.subscriber.message.Message) -> None:
print(f"Received {message}.")
message.ack()
streaming_pull_future = subscriber.subscribe(subscription_path, callback=callback)
print(f"Listening for messages on {subscription_path}..\n")
# Wrap subscriber in a 'with' block to automatically call close() when done.
with subscriber:
try:
# When `timeout` is not set, result() will block indefinitely,
# unless an exception is encountered first.
streaming_pull_future.result(timeout=timeout)
except TimeoutError:
streaming_pull_future.cancel() # Trigger the shutdown.
streaming_pull_future.result() # Block until the shutdown is complete.
| 2.796875 | 3 |
odoo-13.0/addons/google_drive/models/res_config_settings.py | VaibhavBhujade/Blockchain-ERP-interoperability | 0 | 2133 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
class ResConfigSettings(models.TransientModel):
_inherit = "res.config.settings"
google_drive_authorization_code = fields.Char(string='Authorization Code', config_parameter='google_drive_authorization_code')
google_drive_uri = fields.Char(compute='_compute_drive_uri', string='URI', help="The URL to generate the authorization code from Google")
is_google_drive_token_generated = fields.Boolean(string='Refresh Token Generated')
@api.depends('google_drive_authorization_code')
def _compute_drive_uri(self):
google_drive_uri = self.env['google.service']._get_google_token_uri('drive', scope=self.env['google.drive.config'].get_google_scope())
for config in self:
config.google_drive_uri = google_drive_uri
def get_values(self):
res = super(ResConfigSettings, self).get_values()
refresh_token = self.env['ir.config_parameter'].sudo().get_param('google_drive_refresh_token', False)
res.update(is_google_drive_token_generated=bool(refresh_token))
return res
def confirm_setup_token(self):
params = self.env['ir.config_parameter'].sudo()
authorization_code_before = params.get_param('google_drive_authorization_code')
authorization_code = self.google_drive_authorization_code
if authorization_code != authorization_code_before:
refresh_token = (
self.env['google.service'].generate_refresh_token('drive', authorization_code)
if authorization_code else False
)
params.set_param('google_drive_refresh_token', refresh_token)
def action_setup_token(self):
self.ensure_one()
template = self.env.ref('google_drive.google_drive_auth_code_wizard')
return {
'name': _('Set up refresh token'),
'type': 'ir.actions.act_window',
'res_model': 'res.config.settings',
'views': [(template.id, 'form')],
'target': 'new',
}
| 1.992188 | 2 |
dataloaders/loader.py | sanger640/attMPTI | 93 | 2134 | <filename>dataloaders/loader.py
""" Data Loader for Generating Tasks
Author: <NAME>, 2020
"""
import os
import random
import math
import glob
import numpy as np
import h5py as h5
import transforms3d
from itertools import combinations
import torch
from torch.utils.data import Dataset
def sample_K_pointclouds(data_path, num_point, pc_attribs, pc_augm, pc_augm_config,
scan_names, sampled_class, sampled_classes, is_support=False):
'''sample K pointclouds and the corresponding labels for one class (one_way)'''
ptclouds = []
labels = []
for scan_name in scan_names:
ptcloud, label = sample_pointcloud(data_path, num_point, pc_attribs, pc_augm, pc_augm_config,
scan_name, sampled_classes, sampled_class, support=is_support)
ptclouds.append(ptcloud)
labels.append(label)
ptclouds = np.stack(ptclouds, axis=0)
labels = np.stack(labels, axis=0)
return ptclouds, labels
def sample_pointcloud(data_path, num_point, pc_attribs, pc_augm, pc_augm_config, scan_name,
sampled_classes, sampled_class=0, support=False, random_sample=False):
sampled_classes = list(sampled_classes)
data = np.load(os.path.join(data_path, 'data', '%s.npy' %scan_name))
N = data.shape[0] #number of points in this scan
if random_sample:
sampled_point_inds = np.random.choice(np.arange(N), num_point, replace=(N < num_point))
else:
# If this point cloud is for support/query set, make sure that the sampled points contain target class
valid_point_inds = np.nonzero(data[:,6] == sampled_class)[0] # indices of points belonging to the sampled class
if N < num_point:
sampled_valid_point_num = len(valid_point_inds)
else:
valid_ratio = len(valid_point_inds)/float(N)
sampled_valid_point_num = int(valid_ratio*num_point)
sampled_valid_point_inds = np.random.choice(valid_point_inds, sampled_valid_point_num, replace=False)
sampled_other_point_inds = np.random.choice(np.arange(N), num_point-sampled_valid_point_num,
replace=(N<num_point))
sampled_point_inds = np.concatenate([sampled_valid_point_inds, sampled_other_point_inds])
data = data[sampled_point_inds]
xyz = data[:, 0:3]
rgb = data[:, 3:6]
labels = data[:,6].astype(np.int)
xyz_min = np.amin(xyz, axis=0)
xyz -= xyz_min
if pc_augm:
xyz = augment_pointcloud(xyz, pc_augm_config)
if 'XYZ' in pc_attribs:
xyz_min = np.amin(xyz, axis=0)
XYZ = xyz - xyz_min
xyz_max = np.amax(XYZ, axis=0)
XYZ = XYZ/xyz_max
ptcloud = []
if 'xyz' in pc_attribs: ptcloud.append(xyz)
if 'rgb' in pc_attribs: ptcloud.append(rgb/255.)
if 'XYZ' in pc_attribs: ptcloud.append(XYZ)
ptcloud = np.concatenate(ptcloud, axis=1)
if support:
groundtruth = labels==sampled_class
else:
groundtruth = np.zeros_like(labels)
for i, label in enumerate(labels):
if label in sampled_classes:
groundtruth[i] = sampled_classes.index(label)+1
return ptcloud, groundtruth
def augment_pointcloud(P, pc_augm_config):
"""" Augmentation on XYZ and jittering of everything """
M = transforms3d.zooms.zfdir2mat(1)
if pc_augm_config['scale'] > 1:
s = random.uniform(1 / pc_augm_config['scale'], pc_augm_config['scale'])
M = np.dot(transforms3d.zooms.zfdir2mat(s), M)
if pc_augm_config['rot'] == 1:
angle = random.uniform(0, 2 * math.pi)
M = np.dot(transforms3d.axangles.axangle2mat([0, 0, 1], angle), M) # z=upright assumption
if pc_augm_config['mirror_prob'] > 0: # mirroring x&y, not z
if random.random() < pc_augm_config['mirror_prob'] / 2:
M = np.dot(transforms3d.zooms.zfdir2mat(-1, [1, 0, 0]), M)
if random.random() < pc_augm_config['mirror_prob'] / 2:
M = np.dot(transforms3d.zooms.zfdir2mat(-1, [0, 1, 0]), M)
P[:, :3] = np.dot(P[:, :3], M.T)
if pc_augm_config['jitter']:
sigma, clip = 0.01, 0.05 # https://github.com/charlesq34/pointnet/blob/master/provider.py#L74
P = P + np.clip(sigma * np.random.randn(*P.shape), -1 * clip, clip).astype(np.float32)
return P
class MyDataset(Dataset):
def __init__(self, data_path, dataset_name, cvfold=0, num_episode=50000, n_way=3, k_shot=5, n_queries=1,
phase=None, mode='train', num_point=4096, pc_attribs='xyz', pc_augm=False, pc_augm_config=None):
super(MyDataset).__init__()
self.data_path = data_path
self.n_way = n_way
self.k_shot = k_shot
self.n_queries = n_queries
self.num_episode = num_episode
self.phase = phase
self.mode = mode
self.num_point = num_point
self.pc_attribs = pc_attribs
self.pc_augm = pc_augm
self.pc_augm_config = pc_augm_config
if dataset_name == 's3dis':
from dataloaders.s3dis import S3DISDataset
self.dataset = S3DISDataset(cvfold, data_path)
elif dataset_name == 'scannet':
from dataloaders.scannet import ScanNetDataset
self.dataset = ScanNetDataset(cvfold, data_path)
else:
raise NotImplementedError('Unknown dataset %s!' % dataset_name)
if mode == 'train':
self.classes = np.array(self.dataset.train_classes)
elif mode == 'test':
self.classes = np.array(self.dataset.test_classes)
else:
raise NotImplementedError('Unkown mode %s! [Options: train/test]' % mode)
print('MODE: {0} | Classes: {1}'.format(mode, self.classes))
self.class2scans = self.dataset.class2scans
def __len__(self):
return self.num_episode
def __getitem__(self, index, n_way_classes=None):
if n_way_classes is not None:
sampled_classes = np.array(n_way_classes)
else:
sampled_classes = np.random.choice(self.classes, self.n_way, replace=False)
support_ptclouds, support_masks, query_ptclouds, query_labels = self.generate_one_episode(sampled_classes)
if self.mode == 'train' and self.phase == 'metatrain':
remain_classes = list(set(self.classes) - set(sampled_classes))
try:
sampled_valid_classes = np.random.choice(np.array(remain_classes), self.n_way, replace=False)
except:
raise NotImplementedError('Error! The number remaining classes is less than %d_way' %self.n_way)
valid_support_ptclouds, valid_support_masks, valid_query_ptclouds, \
valid_query_labels = self.generate_one_episode(sampled_valid_classes)
return support_ptclouds.astype(np.float32), \
support_masks.astype(np.int32), \
query_ptclouds.astype(np.float32), \
query_labels.astype(np.int64), \
valid_support_ptclouds.astype(np.float32), \
valid_support_masks.astype(np.int32), \
valid_query_ptclouds.astype(np.float32), \
valid_query_labels.astype(np.int64)
else:
return support_ptclouds.astype(np.float32), \
support_masks.astype(np.int32), \
query_ptclouds.astype(np.float32), \
query_labels.astype(np.int64), \
sampled_classes.astype(np.int32)
def generate_one_episode(self, sampled_classes):
support_ptclouds = []
support_masks = []
query_ptclouds = []
query_labels = []
black_list = [] # to store the sampled scan names, in order to prevent sampling one scan several times...
for sampled_class in sampled_classes:
all_scannames = self.class2scans[sampled_class].copy()
if len(black_list) != 0:
all_scannames = [x for x in all_scannames if x not in black_list]
selected_scannames = np.random.choice(all_scannames, self.k_shot+self.n_queries, replace=False)
black_list.extend(selected_scannames)
query_scannames = selected_scannames[:self.n_queries]
support_scannames = selected_scannames[self.n_queries:]
query_ptclouds_one_way, query_labels_one_way = sample_K_pointclouds(self.data_path, self.num_point,
self.pc_attribs, self.pc_augm,
self.pc_augm_config,
query_scannames,
sampled_class,
sampled_classes,
is_support=False)
support_ptclouds_one_way, support_masks_one_way = sample_K_pointclouds(self.data_path, self.num_point,
self.pc_attribs, self.pc_augm,
self.pc_augm_config,
support_scannames,
sampled_class,
sampled_classes,
is_support=True)
query_ptclouds.append(query_ptclouds_one_way)
query_labels.append(query_labels_one_way)
support_ptclouds.append(support_ptclouds_one_way)
support_masks.append(support_masks_one_way)
support_ptclouds = np.stack(support_ptclouds, axis=0)
support_masks = np.stack(support_masks, axis=0)
query_ptclouds = np.concatenate(query_ptclouds, axis=0)
query_labels = np.concatenate(query_labels, axis=0)
return support_ptclouds, support_masks, query_ptclouds, query_labels
def batch_train_task_collate(batch):
task_train_support_ptclouds, task_train_support_masks, task_train_query_ptclouds, task_train_query_labels, \
task_valid_support_ptclouds, task_valid_support_masks, task_valid_query_ptclouds, task_valid_query_labels = list(zip(*batch))
task_train_support_ptclouds = np.stack(task_train_support_ptclouds)
task_train_support_masks = np.stack(task_train_support_masks)
task_train_query_ptclouds = np.stack(task_train_query_ptclouds)
task_train_query_labels = np.stack(task_train_query_labels)
task_valid_support_ptclouds = np.stack(task_valid_support_ptclouds)
task_valid_support_masks = np.stack(task_valid_support_masks)
task_valid_query_ptclouds = np.array(task_valid_query_ptclouds)
task_valid_query_labels = np.stack(task_valid_query_labels)
data = [torch.from_numpy(task_train_support_ptclouds).transpose(3,4), torch.from_numpy(task_train_support_masks),
torch.from_numpy(task_train_query_ptclouds).transpose(2,3), torch.from_numpy(task_train_query_labels),
torch.from_numpy(task_valid_support_ptclouds).transpose(3,4), torch.from_numpy(task_valid_support_masks),
torch.from_numpy(task_valid_query_ptclouds).transpose(2,3), torch.from_numpy(task_valid_query_labels)]
return data
################################################ Static Testing Dataset ################################################
class MyTestDataset(Dataset):
def __init__(self, data_path, dataset_name, cvfold=0, num_episode_per_comb=100, n_way=3, k_shot=5, n_queries=1,
num_point=4096, pc_attribs='xyz', mode='valid'):
super(MyTestDataset).__init__()
dataset = MyDataset(data_path, dataset_name, cvfold=cvfold, n_way=n_way, k_shot=k_shot, n_queries=n_queries,
mode='test', num_point=num_point, pc_attribs=pc_attribs, pc_augm=False)
self.classes = dataset.classes
if mode == 'valid':
test_data_path = os.path.join(data_path, 'S_%d_N_%d_K_%d_episodes_%d_pts_%d' % (
cvfold, n_way, k_shot, num_episode_per_comb, num_point))
elif mode == 'test':
test_data_path = os.path.join(data_path, 'S_%d_N_%d_K_%d_test_episodes_%d_pts_%d' % (
cvfold, n_way, k_shot, num_episode_per_comb, num_point))
else:
raise NotImplementedError('Mode (%s) is unknown!' %mode)
if os.path.exists(test_data_path):
self.file_names = glob.glob(os.path.join(test_data_path, '*.h5'))
self.num_episode = len(self.file_names)
else:
print('Test dataset (%s) does not exist...\n Constructing...' %test_data_path)
os.mkdir(test_data_path)
class_comb = list(combinations(self.classes, n_way)) # [(),(),(),...]
self.num_episode = len(class_comb) * num_episode_per_comb
episode_ind = 0
self.file_names = []
for sampled_classes in class_comb:
sampled_classes = list(sampled_classes)
for i in range(num_episode_per_comb):
data = dataset.__getitem__(episode_ind, sampled_classes)
out_filename = os.path.join(test_data_path, '%d.h5' % episode_ind)
write_episode(out_filename, data)
self.file_names.append(out_filename)
episode_ind += 1
def __len__(self):
return self.num_episode
def __getitem__(self, index):
file_name = self.file_names[index]
return read_episode(file_name)
def batch_test_task_collate(batch):
batch_support_ptclouds, batch_support_masks, batch_query_ptclouds, batch_query_labels, batch_sampled_classes = batch[0]
data = [torch.from_numpy(batch_support_ptclouds).transpose(2,3), torch.from_numpy(batch_support_masks),
torch.from_numpy(batch_query_ptclouds).transpose(1,2), torch.from_numpy(batch_query_labels.astype(np.int64))]
return data, batch_sampled_classes
def write_episode(out_filename, data):
support_ptclouds, support_masks, query_ptclouds, query_labels, sampled_classes = data
data_file = h5.File(out_filename, 'w')
data_file.create_dataset('support_ptclouds', data=support_ptclouds, dtype='float32')
data_file.create_dataset('support_masks', data=support_masks, dtype='int32')
data_file.create_dataset('query_ptclouds', data=query_ptclouds, dtype='float32')
data_file.create_dataset('query_labels', data=query_labels, dtype='int64')
data_file.create_dataset('sampled_classes', data=sampled_classes, dtype='int32')
data_file.close()
print('\t {0} saved! | classes: {1}'.format(out_filename, sampled_classes))
def read_episode(file_name):
data_file = h5.File(file_name, 'r')
support_ptclouds = data_file['support_ptclouds'][:]
support_masks = data_file['support_masks'][:]
query_ptclouds = data_file['query_ptclouds'][:]
query_labels = data_file['query_labels'][:]
sampled_classes = data_file['sampled_classes'][:]
return support_ptclouds, support_masks, query_ptclouds, query_labels, sampled_classes
################################################ Pre-train Dataset ################################################
class MyPretrainDataset(Dataset):
def __init__(self, data_path, classes, class2scans, mode='train', num_point=4096, pc_attribs='xyz',
pc_augm=False, pc_augm_config=None):
super(MyPretrainDataset).__init__()
self.data_path = data_path
self.classes = classes
self.num_point = num_point
self.pc_attribs = pc_attribs
self.pc_augm = pc_augm
self.pc_augm_config = pc_augm_config
train_block_names = []
all_block_names = []
for k, v in sorted(class2scans.items()):
all_block_names.extend(v)
n_blocks = len(v)
n_test_blocks = int(n_blocks * 0.1)
n_train_blocks = n_blocks - n_test_blocks
train_block_names.extend(v[:n_train_blocks])
if mode == 'train':
self.block_names = list(set(train_block_names))
elif mode == 'test':
self.block_names = list(set(all_block_names) - set(train_block_names))
else:
raise NotImplementedError('Mode is unknown!')
print('[Pretrain Dataset] Mode: {0} | Num_blocks: {1}'.format(mode, len(self.block_names)))
def __len__(self):
return len(self.block_names)
def __getitem__(self, index):
block_name = self.block_names[index]
ptcloud, label = sample_pointcloud(self.data_path, self.num_point, self.pc_attribs, self.pc_augm,
self.pc_augm_config, block_name, self.classes, random_sample=True)
return torch.from_numpy(ptcloud.transpose().astype(np.float32)), torch.from_numpy(label.astype(np.int64)) | 2.109375 | 2 |
greendoge/types/condition_with_args.py | grayfallstown/greendoge-blockchain | 44 | 2135 | from dataclasses import dataclass
from typing import List
from greendoge.types.condition_opcodes import ConditionOpcode
from greendoge.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class ConditionWithArgs(Streamable):
"""
This structure is used to store parsed CLVM conditions
Conditions in CLVM have either format of (opcode, var1) or (opcode, var1, var2)
"""
opcode: ConditionOpcode
vars: List[bytes]
| 2.53125 | 3 |
homeassistant/components/hunterdouglas_powerview/entity.py | pp81381/home-assistant | 0 | 2136 | """The nexia integration base entity."""
from aiopvapi.resources.shade import ATTR_TYPE
from homeassistant.const import ATTR_MODEL, ATTR_SW_VERSION
import homeassistant.helpers.device_registry as dr
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import (
DEVICE_FIRMWARE,
DEVICE_MAC_ADDRESS,
DEVICE_MODEL,
DEVICE_NAME,
DEVICE_SERIAL_NUMBER,
DOMAIN,
FIRMWARE,
FIRMWARE_BUILD,
FIRMWARE_REVISION,
FIRMWARE_SUB_REVISION,
MANUFACTURER,
)
class HDEntity(CoordinatorEntity):
"""Base class for hunter douglas entities."""
def __init__(self, coordinator, device_info, room_name, unique_id):
"""Initialize the entity."""
super().__init__(coordinator)
self._room_name = room_name
self._unique_id = unique_id
self._device_info = device_info
@property
def unique_id(self):
"""Return the unique id."""
return self._unique_id
@property
def device_info(self) -> DeviceInfo:
"""Return the device_info of the device."""
firmware = self._device_info[DEVICE_FIRMWARE]
sw_version = f"{firmware[FIRMWARE_REVISION]}.{firmware[FIRMWARE_SUB_REVISION]}.{firmware[FIRMWARE_BUILD]}"
return DeviceInfo(
identifiers={(DOMAIN, self._device_info[DEVICE_SERIAL_NUMBER])},
connections={
(dr.CONNECTION_NETWORK_MAC, self._device_info[DEVICE_MAC_ADDRESS])
},
name=self._device_info[DEVICE_NAME],
suggested_area=self._room_name,
model=self._device_info[DEVICE_MODEL],
sw_version=sw_version,
manufacturer=MANUFACTURER,
)
class ShadeEntity(HDEntity):
"""Base class for hunter douglas shade entities."""
def __init__(self, coordinator, device_info, room_name, shade, shade_name):
"""Initialize the shade."""
super().__init__(coordinator, device_info, room_name, shade.id)
self._shade_name = shade_name
self._shade = shade
@property
def device_info(self) -> DeviceInfo:
"""Return the device_info of the device."""
device_info = DeviceInfo(
identifiers={(DOMAIN, self._shade.id)},
name=self._shade_name,
suggested_area=self._room_name,
manufacturer=MANUFACTURER,
model=str(self._shade.raw_data[ATTR_TYPE]),
via_device=(DOMAIN, self._device_info[DEVICE_SERIAL_NUMBER]),
)
for shade in self._shade.shade_types:
if shade.shade_type == device_info[ATTR_MODEL]:
device_info[ATTR_MODEL] = shade.description
break
if FIRMWARE not in self._shade.raw_data:
return device_info
firmware = self._shade.raw_data[FIRMWARE]
sw_version = f"{firmware[FIRMWARE_REVISION]}.{firmware[FIRMWARE_SUB_REVISION]}.{firmware[FIRMWARE_BUILD]}"
device_info[ATTR_SW_VERSION] = sw_version
return device_info
| 1.898438 | 2 |
keycast_env/lib/python3.8/site-packages/Xlib/ext/res.py | daxter-army/key-cast | 10 | 2137 | <reponame>daxter-army/key-cast
# Xlib.ext.res -- X-Resource extension module
#
# Copyright (C) 2021 <NAME> <<EMAIL>>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street,
# Fifth Floor,
# Boston, MA 02110-1301 USA
"""X-Resource extension allows a client to query the X server about its usage
of various resources.
For detailed description see any of the following documents.
Protocol specification:
https://www.x.org/releases/current/doc/resourceproto/resproto.txt
XCB Protocol specification:
https://cgit.freedesktop.org/xcb/proto/tree/src/res.xml
"""
from Xlib.protocol import rq
RES_MAJOR_VERSION = 1
RES_MINOR_VERSION = 2
extname = "X-Resource"
# v1.0
ResQueryVersion = 0
ResQueryClients = 1
ResQueryClientResources = 2
ResQueryClientPixmapBytes = 3
# v1.2
ResQueryClientIds = 4
ResQueryResourceBytes = 5
class QueryVersion(rq.ReplyRequest):
_request = rq.Struct(
rq.Card8("opcode"),
rq.Opcode(ResQueryVersion),
rq.RequestLength(),
rq.Card8("client_major"),
rq.Card8("client_minor"),
rq.Pad(2))
_reply = rq.Struct(
rq.ReplyCode(),
rq.Pad(1),
rq.Card16("sequence_number"),
rq.ReplyLength(),
rq.Card16("server_major"),
rq.Card16("server_minor"),
rq.Pad(20))
def query_version(self, client_major=RES_MAJOR_VERSION,
client_minor=RES_MINOR_VERSION):
""" Query the protocol version supported by the X server.
The client sends the highest supported version to the server and the
server sends the highest version it supports, but no higher than the
requested version."""
return QueryVersion(
display=self.display,
opcode=self.display.get_extension_major(extname),
client_major=client_major,
client_minor=client_minor)
Client = rq.Struct(
rq.Card32("resource_base"),
rq.Card32("resource_mask"))
class QueryClients(rq.ReplyRequest):
_request = rq.Struct(
rq.Card8("opcode"),
rq.Opcode(ResQueryClients),
rq.RequestLength())
_reply = rq.Struct(
rq.ReplyCode(),
rq.Pad(1),
rq.Card16("sequence_number"),
rq.ReplyLength(),
rq.LengthOf("clients", 4),
rq.Pad(20),
rq.List("clients", Client))
def query_clients(self):
"""Request the list of all currently connected clients."""
return QueryClients(
display=self.display,
opcode=self.display.get_extension_major(extname))
Type = rq.Struct(
rq.Card32("resource_type"),
rq.Card32("count"))
class QueryClientResources(rq.ReplyRequest):
_request = rq.Struct(
rq.Card8("opcode"),
rq.Opcode(ResQueryClientResources),
rq.RequestLength(),
rq.Card32("client"))
_reply = rq.Struct(
rq.ReplyCode(),
rq.Pad(1),
rq.Card16("sequence_number"),
rq.ReplyLength(),
rq.LengthOf("types", 4),
rq.Pad(20),
rq.List("types", Type))
def query_client_resources(self, client):
"""Request the number of resources owned by a client.
The server will return the counts of each type of resource.
"""
return QueryClientResources(
display=self.display,
opcode=self.display.get_extension_major(extname),
client=client)
class QueryClientPixmapBytes(rq.ReplyRequest):
_request = rq.Struct(
rq.Card8("opcode"),
rq.Opcode(ResQueryClientPixmapBytes),
rq.RequestLength(),
rq.Card32("client"))
_reply = rq.Struct(
rq.ReplyCode(),
rq.Pad(1),
rq.Card16("sequence_number"),
rq.ReplyLength(),
rq.Card32("bytes"),
rq.Card32("bytes_overflow"),
rq.Pad(16))
def query_client_pixmap_bytes(self, client):
"""Query the pixmap usage of some client.
The returned number is a sum of memory usage of each pixmap that can be
attributed to the given client.
"""
return QueryClientPixmapBytes(
display=self.display,
opcode=self.display.get_extension_major(extname),
client=client)
class SizeOf(rq.LengthOf):
"""A SizeOf stores the size in bytes of some other Field whose size
may vary, e.g. List
"""
def __init__(self, name, size, item_size):
rq.LengthOf.__init__(self, name, size)
self.item_size = item_size
def parse_value(self, length, display):
return length // self.item_size
ClientXIDMask = 1 << 0
LocalClientPIDMask = 1 << 1
ClientIdSpec = rq.Struct(
rq.Card32("client"),
rq.Card32("mask"))
ClientIdValue = rq.Struct(
rq.Object("spec", ClientIdSpec),
SizeOf("value", 4, 4),
rq.List("value", rq.Card32Obj))
class QueryClientIds(rq.ReplyRequest):
_request = rq.Struct(
rq.Card8("opcode"),
rq.Opcode(ResQueryClientIds),
rq.RequestLength(),
rq.LengthOf("specs", 4),
rq.List("specs", ClientIdSpec))
_reply = rq.Struct(
rq.ReplyCode(),
rq.Pad(1),
rq.Card16("sequence_number"),
rq.ReplyLength(),
rq.LengthOf("ids", 4),
rq.Pad(20),
rq.List("ids", ClientIdValue))
def query_client_ids(self, specs):
"""Request to identify a given set of clients with some identification method.
The request sends a list of specifiers that select clients and
identification methods to server. The server then tries to identify the
chosen clients using the identification methods specified for each client.
The server returns IDs for those clients that were successfully identified.
"""
return QueryClientIds(
display=self.display,
opcode=self.display.get_extension_major(extname),
specs=specs)
ResourceIdSpec = rq.Struct(
rq.Card32("resource"),
rq.Card32("type"))
ResourceSizeSpec = rq.Struct(
# inline struct ResourceIdSpec to work around
# a parser bug with nested objects
rq.Card32("resource"),
rq.Card32("type"),
rq.Card32("bytes"),
rq.Card32("ref_count"),
rq.Card32("use_count"))
ResourceSizeValue = rq.Struct(
rq.Object("size", ResourceSizeSpec),
rq.LengthOf("cross_references", 4),
rq.List("cross_references", ResourceSizeSpec))
class QueryResourceBytes(rq.ReplyRequest):
_request = rq.Struct(
rq.Card8("opcode"),
rq.Opcode(ResQueryResourceBytes),
rq.RequestLength(),
rq.Card32("client"),
rq.LengthOf("specs", 4),
rq.List("specs", ResourceIdSpec))
_reply = rq.Struct(
rq.ReplyCode(),
rq.Pad(1),
rq.Card16("sequence_number"),
rq.ReplyLength(),
rq.LengthOf("sizes", 4),
rq.Pad(20),
rq.List("sizes", ResourceSizeValue))
def query_resource_bytes(self, client, specs):
"""Query the sizes of resources from X server.
The request sends a list of specifiers that selects resources for size
calculation. The server tries to calculate the sizes of chosen resources
and returns an estimate for a resource only if the size could be determined
"""
return QueryResourceBytes(
display=self.display,
opcode=self.display.get_extension_major(extname),
client=client,
specs=specs)
def init(disp, info):
disp.extension_add_method("display", "res_query_version", query_version)
disp.extension_add_method("display", "res_query_clients", query_clients)
disp.extension_add_method("display", "res_query_client_resources",
query_client_resources)
disp.extension_add_method("display", "res_query_client_pixmap_bytes",
query_client_pixmap_bytes)
disp.extension_add_method("display", "res_query_client_ids",
query_client_ids)
disp.extension_add_method("display", "res_query_resource_bytes",
query_resource_bytes)
| 1.539063 | 2 |
rubra/cmdline_args.py | scwatts/rubra | 14 | 2138 | # Process the unix command line of the pipeline.
import argparse
from version import rubra_version
def get_cmdline_args():
return parser.parse_args()
parser = argparse.ArgumentParser(
description='A bioinformatics pipeline system.')
parser.add_argument(
'pipeline',
metavar='PIPELINE_FILE',
type=str,
help='Your Ruffus pipeline stages (a Python module)')
parser.add_argument(
'--config',
metavar='CONFIG_FILE',
type=str,
nargs='+',
required=True,
help='One or more configuration files (Python modules)')
parser.add_argument(
'--verbose',
type=int,
choices=(0, 1, 2),
required=False,
default=1,
help='Output verbosity level: 0 = quiet; 1 = normal; \
2 = chatty (default is 1)')
parser.add_argument(
'--style',
type=str,
choices=('print', 'run', 'flowchart', 'touchfiles'),
required=False,
default='print',
help='Pipeline behaviour: print; run; touchfiles; flowchart (default is print)')
parser.add_argument(
'--force',
metavar='TASKNAME',
type=str,
required=False,
default=[],
nargs='+',
help='tasks which are forced to be out of date regardless of timestamps')
parser.add_argument(
'--end',
metavar='TASKNAME',
type=str,
required=False,
help='end points (tasks) for the pipeline')
parser.add_argument(
'--rebuild',
type=str,
choices=('fromstart', 'fromend'),
required=False,
default='fromstart',
help='rebuild outputs by working back from end tasks or forwards \
from start tasks (default is fromstart)')
parser.add_argument(
'--version', action='version', version='%(prog)s ' + rubra_version)
| 2.546875 | 3 |
main.py | KH241/Geohashing | 0 | 2139 | import webbrowser
import config
from Generator import Generator
def main():
generator = Generator()
latitude, longitude = generator.getCoordinates()
webbrowser.open(config.api_request.format(latitude, longitude))
if __name__ == '__main__':
main()
| 2.65625 | 3 |
knx-test.py | WAvdBeek/CoAPthon3 | 1 | 2140 | <reponame>WAvdBeek/CoAPthon3
#!/usr/bin/env python
import getopt
import socket
import sys
import cbor
#from cbor2 import dumps, loads
import json
import time
import traceback
from coapthon.client.helperclient import HelperClient
from coapthon.utils import parse_uri
from coapthon import defines
client = None
paths = {}
paths_extend = {}
my_base = ""
def usage(): # pragma: no cover
print("Command:\tknxcoapclient.py -o -p [-P]")
print("Options:")
print("\t-o, --operation=\tGET|GETNONE|PUT|POST|DELETE|DISCOVER|OBSERVE")
print("\t-p, --path=\t\t\tPath of the request")
print("\t-P, --payload=\t\tPayload of the request")
print("\t-c, --contenttype=\t\tcontenttype of the request")
print("\t-f, --payload-file=\t\tFile with payload of the request")
def get_url(line):
data = line.split(">")
url = data[0]
return url[1:]
def get_ct(line):
tagvalues = line.split(";")
for tag in tagvalues:
if tag.startswith("ct"):
ct_value_all = tag.split("=")
ct_value = ct_value_all[1].split(",")
return ct_value[0]
return ""
def get_base(url):
# python3 knxcoapclient.py -o GET -p coap://[fe80::6513:3050:71a7:5b98]:63914/a -c 50
my_url = url.replace("coap://","")
mybase = my_url.split("/")
return mybase[0]
def get_base_from_link(payload):
print("get_base_from_link\n")
global paths
global paths_extend
lines = payload.splitlines()
# add the
if len(paths) == 0:
my_base = get_base(get_url(lines[0]))
return my_base
def get_sn(my_base):
print("Get SN :");
sn = execute_get("coap://"+my_base+"/dev/sn", 60)
json_data = cbor.loads(sn.payload)
#print ("SN : ", json_data)
return json_data
def install(my_base):
sn = get_sn(my_base)
print (" SN : ", sn)
iid = "5" # installation id
if "000001" == sn :
# sensor, e.g sending
print ("--------------------")
print ("Installing SN: ", sn)
content = { 2: "reset"}
print("reset :", content);
execute_post("coap://"+my_base+"/.well-known/knx", 60, 60, content)
content = True
print("set PM :", content);
execute_put("coap://"+my_base+"/dev/pm", 60, 60, content)
content = 1
print("set IA :", content);
execute_put("coap://"+my_base+"/dev/ia", 60, 60, content)
content = iid
execute_put("coap://"+my_base+"/dev/iid", 60, 60, content)
content = { 2: "startLoading"}
print("lsm :", content);
execute_post("coap://"+my_base+"/a/lsm", 60, 60, content)
execute_get("coap://"+my_base+"/a/lsm", 60)
# group object table
# id (0)= 1
# url (11)= /p/light
# ga (7 )= 1
# cflags (8) = ["r" ] ; read = 1, write = 2, transmit = 3 update = 4
content = [ {0: 1, 11: "p/push", 7:[1], 8: [2] } ]
execute_post("coap://"+my_base+"/fp/g", 60, 60, content)
execute_get("coap://"+my_base+"/fp/g", 40)
# recipient table
# id (0)= 1
# ia (12)
# url (11)= .knx
# ga (7 )= 1
# cflags (8) = ["r" ] ; read = 1, write = 2, transmit = 3 update = 4
content = [ {0: 1, 11: "/p/push", 7:[1], 12 :"blah.blah" } ]
execute_post("coap://"+my_base+"/fp/r", 60, 60, content)
content = False
print("set PM :", content);
execute_put("coap://"+my_base+"/dev/pm", 60, 60, content)
content = { 2: "loadComplete"}
print("lsm :", content);
execute_post("coap://"+my_base+"/a/lsm", 60, 60, content)
execute_get("coap://"+my_base+"/a/lsm", 60)
if "000002" == sn :
# actuator ==> receipient
# should use /fp/r
print ("--------------------")
print ("installing SN: ", sn)
content = True
print("set PM :", content);
execute_put("coap://"+my_base+"/dev/pm", 60, 60, content)
content = 2
print("set IA :", content);
execute_put("coap://"+my_base+"/dev/ia", 60, 60, content)
content = iid
execute_put("coap://"+my_base+"/dev/iid", 60, 60, content)
content = { 2: "startLoading"}
print("lsm :", content);
execute_post("coap://"+my_base+"/a/lsm", 60, 60, content)
execute_get("coap://"+my_base+"/a/lsm", 60)
# group object table
# id (0)= 1
# url (11)= /p/light
# ga (7 )= 1
# cflags (8) = ["r" ] ; read = 1, write = 2, transmit = 3 update = 4
content = [ { 0: 1, 11: "/p/light", 7:[1], 8: [1] } ]
execute_post("coap://"+my_base+"/fp/g", 60, 60, content)
execute_get("coap://"+my_base+"/fp/g", 40)
# publisher table
# id (0)= 1
# ia (12)
# url (11)= .knx
# ga (7 )= 1
# cflags (8) = ["r" ] ; read = 1, write = 2, transmit = 3 update = 4
content = [ {0: 1, 11: ".knx", 7:[1], 12 :"blah.blah" } ]
execute_post("coap://"+my_base+"/fp/p", 60, 60, content)
content = False
print("set PM :", content);
execute_put("coap://"+my_base+"/dev/pm", 60, 60, content)
content = { 2: "loadComplete"}
print("lsm :", content);
execute_post("coap://"+my_base+"/a/lsm", 60, 60, content)
execute_get("coap://"+my_base+"/a/lsm", 60)
# do a post
content = {"sia": 5678, "st": 55, "ga": 1, "value": 100 }
content = { 4: 5678, "st": 55, 7: 1, "value": 100 }
# st ga value (1)
#content = { 5: { 6: 1, 7: 1, 1: True } }
#execute_post("coap://"+my_base+"/.knx", 60, 60, content)
content = {4: 5678, 5: { 6: 1, 7: 1, 1: False } }
#execute_post("coap://"+my_base+"/.knx", 60, 60, content)
#execute_post("coap://[FF02::FD]:5683/.knx", 60, 60, content)
# no json tags as strings
def do_sequence_dev(my_base):
print("===================")
print("Get SN :");
sn = execute_get("coap://"+my_base+"/dev/sn", 60)
sn = get_sn(my_base)
print (" SN : ", sn)
print("===================")
print("Get HWT :");
execute_get("coap://"+my_base+"/dev/hwt", 60)
print("===================")
print("Get HWV :");
execute_get("coap://"+my_base+"/dev/hwv", 60)
print("===================")
print("Get FWV :");
execute_get("coap://"+my_base+"/dev/fwv", 60)
print("===================")
print("Get Model :");
execute_get("coap://"+my_base+"/dev/model", 60)
print("===================")
content = True
print("set PM :", content);
execute_put("coap://"+my_base+"/dev/pm", 60, 60, content)
execute_get("coap://"+my_base+"/dev/pm", 60)
content = False
print("set PM :", content);
execute_put("coap://"+my_base+"/dev/pm", 60, 60, content)
execute_get("coap://"+my_base+"/dev/pm", 60)
print("===================")
content = 44
print("set IA :", content);
execute_put("coap://"+my_base+"/dev/ia", 60, 60, content)
execute_get("coap://"+my_base+"/dev/ia", 60)
print("===================")
content = "my host name"
print("set hostname :", content);
execute_put("coap://"+my_base+"/dev/hostname", 60, 60, content)
execute_get("coap://"+my_base+"/dev/hostname", 60)
print("===================")
content = " iid xxx"
print("set iid :", content);
execute_put("coap://"+my_base+"/dev/iid", 60, 60, content)
execute_get("coap://"+my_base+"/dev/iid", 60)
# id ==> 0
# href ==> 11
# ga ==> 7
# cflag ==> 8
def do_sequence_fp_g_int(my_base):
# url, content, accept, contents
content = [ {0: 1, 11: "xxxx1", 8: [1,2,3,4,5], 7:[2222,3333]} ]
execute_post("coap://"+my_base+"/fp/g", 60, 60, content)
execute_get("coap://"+my_base+"/fp/g/1", 60)
execute_get("coap://"+my_base+"/fp/g", 40)
content = [ {0: 2, 11: "xxxxyyy2", 8: [1,4,5], 7:[44,55,33]}, {0: 3, 1: "xxxxyyy3", 8: [1,4,5], 7:[44,55,33]} ]
execute_post("coap://"+my_base+"/fp/g", 60, 60, content)
execute_get("coap://"+my_base+"/fp/g/2", 60)
execute_get("coap://"+my_base+"/fp/g/3", 60)
execute_get("coap://"+my_base+"/fp/g", 40)
execute_del("coap://"+my_base+"/fp/g/3", 60)
execute_get("coap://"+my_base+"/fp/g/3", 60)
execute_get("coap://"+my_base+"/fp/g", 40)
def do_sequence_fp_g(my_base):
# url, content, accept, contents
content = [ {"id": 1, "href": "xxxx1", "cflag": [1,2,3,4,5], "ga":[2222,3333]} ]
execute_post("coap://"+my_base+"/fp/g", 60, 60, content)
execute_get("coap://"+my_base+"/fp/g/1", 60)
execute_get("coap://"+my_base+"/fp/g", 40)
content = [ {"id": 2, "href": "xxxxyyy2", "cflag": [1,4,5], "ga":[44,55,33]}, {"id": 3, "href": "xxxxyyy3", "cflag": [1,4,5], "ga":[44,55,33]} ]
execute_post("coap://"+my_base+"/fp/g", 60, 60, content)
execute_get("coap://"+my_base+"/fp/g/2", 60)
execute_get("coap://"+my_base+"/fp/g/3", 60)
execute_get("coap://"+my_base+"/fp/g", 40)
execute_del("coap://"+my_base+"/fp/g/3", 60)
execute_get("coap://"+my_base+"/fp/g/3", 60)
execute_get("coap://"+my_base+"/fp/g", 40)
# id ==> 0
# ia ==> 12
# path ==> 112
# url ==> 10
# ga ==> 7
def do_sequence_fp_p_int(my_base):
# url, content, accept, contents
content = [ {0: 1, 12: "Ia.IA1", 112: "path1", 7:[2222,3333]} ]
execute_post("coap://"+my_base+"/fp/p", 60, 60, content)
execute_get("coap://"+my_base+"/fp/p/1", 60)
# 40 == application-link format
execute_get("coap://"+my_base+"/fp/p", 40)
content = [ {0: 2, 12: "xxxxyyyia2", 112: "path2", 7:[44,55,33]},
{0: 3, 12: "xxxxyyyia3", 112: "path3", 7:[44,55,33]} ]
execute_post("coap://"+my_base+"/fp/p", 60, 60, content)
execute_get("coap://"+my_base+"/fp/p/2", 60)
execute_get("coap://"+my_base+"/fp/p/3", 60)
execute_get("coap://"+my_base+"/fp/p", 40)
execute_del("coap://"+my_base+"/fp/p/3", 60)
execute_get("coap://"+my_base+"/fp/p/3", 60)
execute_get("coap://"+my_base+"/fp/p", 40)
def do_sequence_fp_p(my_base):
# url, content, accept, contents
content = [ {"id": 1, "ia": "Ia.IA1", "path": "path1", "ga":[2222,3333]} ]
execute_post("coap://"+my_base+"/fp/p", 60, 60, content)
execute_get("coap://"+my_base+"/fp/p/1", 60)
# 40 == application-link format
execute_get("coap://"+my_base+"/fp/p", 40)
content = [ {"id": 2, "ia": "xxxxyyyia2", "path": "path2","ga":[44,55,33]}, {"id": 3, "ia": "xxxxyyyia3", "path": "path3","ga":[44,55,33]} ]
execute_post("coap://"+my_base+"/fp/p", 60, 60, content)
execute_get("coap://"+my_base+"/fp/p/2", 60)
execute_get("coap://"+my_base+"/fp/p/3", 60)
execute_get("coap://"+my_base+"/fp/p", 40)
execute_del("coap://"+my_base+"/fp/p/3", 60)
execute_get("coap://"+my_base+"/fp/p/3", 60)
execute_get("coap://"+my_base+"/fp/p", 40)
# id ==> 0
# ia ==> 12
# path ==> 112
# url ==> 10
# ga ==> 7
def do_sequence_fp_r_int(my_base):
# url, content, accept, contents
content = [ { 0: 1, 12: "r-Ia.IA1", 112: "r-path1", 7:[2222,3333]} ]
execute_post("coap://"+my_base+"/fp/r", 60, 60, content)
execute_get("coap://"+my_base+"/fp/r/1", 60)
execute_get("coap://"+my_base+"/fp/r", 40)
content = [ { 0: 2, 12: "r-Ia.IA2", 10: "url2", 112: "r-path2", 7:[44,55,33]},
{0: 3, 12: "r-Ia.IA3", 112: "r-path3", 7:[44,55,33]} ]
execute_post("coap://"+my_base+"/fp/r", 60, 60, content)
execute_get("coap://"+my_base+"/fp/r/2", 60)
execute_get("coap://"+my_base+"/fp/r/3", 60)
execute_get("coap://"+my_base+"/fp/r", 40)
execute_del("coap://"+my_base+"/fp/r/3", 60)
execute_get("coap://"+my_base+"/fp/r/3", 60)
execute_get("coap://"+my_base+"/fp/r", 40)
def do_sequence_fp_r(my_base):
# url, content, accept, contents
content = [ {"id": 1, "ia": "r-Ia.IA1", "path": "r-path1", "ga":[2222,3333]} ]
execute_post("coap://"+my_base+"/fp/r", 60, 60, content)
execute_get("coap://"+my_base+"/fp/r/1", 60)
execute_get("coap://"+my_base+"/fp/r", 40)
content = [ {"id": 2, "ia": "r-Ia.IA2", "path": "r-path2", "ga":[44,55,33]}, {"id": 3, "ia": "r-Ia.IA3", "path": "r-path3", "ga":[44,55,33]} ]
execute_post("coap://"+my_base+"/fp/r", 60, 60, content)
execute_get("coap://"+my_base+"/fp/r/2", 60)
execute_get("coap://"+my_base+"/fp/r/3", 60)
execute_get("coap://"+my_base+"/fp/r", 40)
execute_del("coap://"+my_base+"/fp/r/3", 60)
execute_get("coap://"+my_base+"/fp/r/3", 60)
execute_get("coap://"+my_base+"/fp/r", 40)
# cmd ==> 2
def do_sequence_lsm_int(my_base):
# url, content, accept, contents
execute_get("coap://"+my_base+"/a/lsm", 60)
content = {2 : "startLoading"}
execute_post("coap://"+my_base+"/a/lsm", 60, 60, content)
execute_get("coap://"+my_base+"/a/lsm", 60)
content = {2 : "loadComplete"}
execute_post("coap://"+my_base+"/a/lsm", 60, 60, content)
execute_get("coap://"+my_base+"/a/lsm", 60)
content = {2 : "unload"}
execute_post("coap://"+my_base+"/a/lsm", 60, 60, content)
execute_get("coap://"+my_base+"/a/lsm", 60)
def do_sequence_lsm(my_base):
# url, content, accept, contents
execute_get("coap://"+my_base+"/a/lsm", 60)
content = {"cmd": "startLoading"}
execute_post("coap://"+my_base+"/a/lsm", 60, 60, content)
execute_get("coap://"+my_base+"/a/lsm", 60)
content = {"cmd": "loadComplete"}
execute_post("coap://"+my_base+"/a/lsm", 60, 60, content)
execute_get("coap://"+my_base+"/a/lsm", 60)
content = {"cmd": "unload"}
execute_post("coap://"+my_base+"/a/lsm", 60, 60, content)
execute_get("coap://"+my_base+"/a/lsm", 60)
# ./knx resource
# sia ==> 4
# ga ==> 7
# st 6
def do_sequence_knx_knx_int(my_base):
# url, content, accept, contents
execute_get("coap://"+my_base+"/.knx", 60)
content = {"value": { 4 : 5, 7: 7777 , 6 : "rp"}}
execute_post("coap://"+my_base+"/.knx", 60, 60, content)
execute_get("coap://"+my_base+"/.knx", 60)
# ./knx resource
def do_sequence_knx_knx(my_base):
# url, content, accept, contents
execute_get("coap://"+my_base+"/.knx", 60)
content = {"value": { "sia" : 5, "ga": 7, "st": "rp"}}
execute_post("coap://"+my_base+"/.knx", 60, 60, content)
execute_get("coap://"+my_base+"/.knx", 60)
def do_sequence_knx_spake(my_base):
# url, content, accept, contents
# sequence:
# - parameter exchange: 15 (rnd)- return value
# - credential exchange: 10 - return value
# - pase verification exchange: 14 - no return value
content = { 15: b"a-15-sdfsdred"}
execute_post("coap://"+my_base+"/.well-known/knx/spake", 60, 60, content)
# pa
content = { 10: b"s10dfsdfsfs" }
execute_post("coap://"+my_base+"/.well-known/knx/spake", 60, 60, content)
# ca
content = { 14: b"a15sdfsdred"}
execute_post("coap://"+my_base+"/.well-known/knx/spake", 60, 60, content)
# expecting return
def do_sequence_knx_idevid(my_base):
# url, content, accept, contents
execute_get("coap://"+my_base+"/.well-known/knx/idevid", 282)
def do_sequence_knx_ldevid(my_base):
# url, content, accept, contents
execute_get("coap://"+my_base+"/.well-known/knx/ldevid", 282)
def do_sequence_knx_osn(my_base):
# url, content, accept, contents
execute_get("coap://"+my_base+"/.well-known/knx/osn", 60)
def do_sequence_knx_crc(my_base):
# url, content, accept, contents
execute_get("coap://"+my_base+"/.well-known/knx/crc", 60)
def do_sequence_oscore(my_base):
# url, content, accept, contents
execute_get("coap://"+my_base+"/f/oscore", 40)
execute_get("coap://"+my_base+"/p/oscore/replwdo", 60)
content = 105
execute_put("coap://"+my_base+"/p/oscore/replwdo", 60, 60, content)
execute_get("coap://"+my_base+"/p/oscore/replwdo", 60)
execute_get("coap://"+my_base+"/p/oscore/osndelay", 60)
content = 1050
execute_put("coap://"+my_base+"/p/oscore/osndelay", 60, 60, content)
execute_get("coap://"+my_base+"/p/oscore/osndelay", 60)
def do_sequence_core_knx(my_base):
# url, content, accept, contents
execute_get("coap://"+my_base+"/.well-known/knx", 60)
content = { 1 : 5, 2: "reset"}
execute_post("coap://"+my_base+"/.well-known/knx", 60, 60, content)
def do_sequence_a_sen(my_base):
# url, content, accept, contents
content = {2: "reset"}
execute_post("coap://"+my_base+"/a/sen", 60, 60, content)
def do_sequence_auth(my_base):
# url, content, accept, contents
execute_get("coap://"+my_base+"/auth", 40)
def do_sequence_auth_at(my_base):
# url, content, accept, contents
execute_get("coap://"+my_base+"/auth/at", 40)
#
content = {0: b"id", 1 : 20, 2:b"ms",3:"hkdf", 4:"alg", 5:b"salt", 6:b"contextId"}
execute_post("coap://"+my_base+"/auth/at", 60, 60, content)
content = {0: b"id2", 1 : 20, 2:b"ms",3:"hkdf", 4:"alg", 5:b"salt", 6:b"contextId2"}
execute_post("coap://"+my_base+"/auth/at", 60, 60, content)
execute_get("coap://"+my_base+"/auth/at", 40)
execute_get("coap://"+my_base+"/auth/at/id", 60)
execute_del("coap://"+my_base+"/auth/at/id", 60)
def do_sequence_f(my_base):
# url, content, accept, contents
execute_get("coap://"+my_base+"/f", 40)
# note this one is a bit dirty hard coded...
execute_get("coap://"+my_base+"/f/417", 40)
execute_get("coap://"+my_base+"/.well-known/core", 40)
def do_sequence(my_base):
#sn = get_sn(my_base)
install(my_base)
return
do_sequence_dev(my_base)
#return
do_sequence_fp_g_int(my_base)
#do_sequence_fp_g(my_base)
do_sequence_fp_p_int(my_base)
#do_sequence_fp_p(my_base)
do_sequence_fp_r_int(my_base)
#do_sequence_fp_r(my_base)
do_sequence_lsm_int(my_base)
#do_sequence_lsm(my_base)
do_sequence_lsm_int(my_base)
# .knx
do_sequence_knx_knx_int(my_base)
#do_sequence_knx_knx(my_base)
do_sequence_knx_spake(my_base)
do_sequence_knx_idevid(my_base)
do_sequence_knx_ldevid(my_base)
do_sequence_knx_crc(my_base)
do_sequence_knx_osn(my_base)
do_sequence_oscore(my_base)
do_sequence_core_knx(my_base)
do_sequence_a_sen(my_base)
do_sequence_auth(my_base)
do_sequence_auth_at(my_base)
do_sequence_f(my_base)
def client_callback_discovery(response, checkdata=None):
print(" --- Discovery Callback ---")
global my_base
if response is not None:
print ("response code:",response.code)
print ("response type:",response.content_type)
if response.code > 100:
print("+++returned error+++")
return
if response.content_type == defines.Content_types["application/link-format"]:
print (response.payload.decode())
my_base = get_base_from_link(response.payload.decode())
do_sequence(my_base)
def code2string(code):
if code == 68:
return "(Changed)"
if code == 69:
return "(Content)"
if code == 132:
return "(Not Found)"
if code == 133:
return "(METHOD_NOT_ALLOWED)"
if code == 160:
return "(INTERNAL_SERVER_ERROR)"
return ""
def client_callback(response, checkdata=None):
print(" --- Callback ---")
if response is not None:
print ("response code:",response.code, code2string(response.code))
print ("response type:",response.content_type)
if response.code > 100:
print("+++returned error+++")
return
#print(response.pretty_print())
if response.content_type == defines.Content_types["text/plain"]:
if response.payload is not None:
print (type(response.payload), len(response.payload))
print ("=========")
print (response.payload)
print ("=========")
else:
print ("payload: none")
elif response.content_type == defines.Content_types["application/cbor"]:
print (type(response.payload), len(response.payload))
print ("=========")
print (response.payload)
print ("=========")
#json_data = loads(response.payload)
#print(json_data)
#print ("=========")
json_string = ""
try:
json_data = cbor.loads(response.payload)
json_string = json.dumps(json_data, indent=2, sort_keys=True)
except:
print("error in cbor..")
print (json_string)
print ("===+++===")
if checkdata is not None:
check_data = cbor.loads(checkdata)
check_string = json.dumps(check_data, indent=2, sort_keys=True)
print(" check: ")
print (check_string)
if check_string == json_string:
print(" =+++===> OK ")
else:
print(" =+++===> NOT OK ")
print (json_string)
elif response.content_type == defines.Content_types["application/vnd.ocf+cbor"]:
print ("application/vnd.ocf+cbor")
try:
print (type(response.payload), len(response.payload))
print ("=========")
print (response.payload)
print ("=========")
json_data = cbor.loads(response.payload)
print (json_data)
print ("---------")
except:
traceback.print_exc()
json_string = json.dumps(json_data, indent=2, sort_keys=True)
print (json_string)
elif response.content_type == defines.Content_types["application/link-format"]:
print (response.payload.decode())
else:
if response.payload is not None:
print ("type, len", type(response.payload), len(response.payload))
print (response.payload)
#else:
# print (" not handled: ", response)
else:
print (" Response : None")
#check = True
#while check:
# chosen = eval(input("Stop observing? [y/N]: "))
# if chosen != "" and not (chosen == "n" or chosen == "N" or chosen == "y" or chosen == "Y"):
# print("Unrecognized choose.")
# continue
def client_callback_observe(response): # pragma: no cover
global client
print("Callback_observe")
check = True
while check:
chosen = eval(input("Stop observing? [y/N]: "))
if chosen != "" and not (chosen == "n" or chosen == "N" or chosen == "y" or chosen == "Y"):
print("Unrecognized choose.")
continue
elif chosen == "y" or chosen == "Y":
while True:
rst = eval(input("Send RST message? [Y/n]: "))
if rst != "" and not (rst == "n" or rst == "N" or rst == "y" or rst == "Y"):
print("Unrecognized choose.")
continue
elif rst == "" or rst == "y" or rst == "Y":
client.cancel_observing(response, True)
else:
client.cancel_observing(response, False)
check = False
break
else:
break
def execute_get(mypath, ct_value):
print ("---------------------------")
print ("execute_get: ", ct_value, mypath)
print (type(mypath))
if (mypath is None or len(mypath) < 5):
return
if mypath.startswith("coap://") == False:
print(" not executing: ", mypath);
return;
ct = {}
ct['accept'] = ct_value
host, port, path = parse_uri(mypath)
try:
tmp = socket.gethostbyname(host)
host = tmp
except socket.gaierror:
pass
nclient = HelperClient(server=(host, port))
response = nclient.get(path, None, None, **ct)
client_callback(response)
nclient.stop()
return response
def execute_del(mypath, ct_value):
print ("---------------------------")
print ("execute_del: ", ct_value, mypath)
do_exit = False
ct = {}
ct['accept'] = ct_value
ct['content_type'] = ct_value
if mypath.startswith("coap://") == False:
print(" not executing: ", mypath);
return;
host, port, path = parse_uri(mypath)
try:
tmp = socket.gethostbyname(host)
host = tmp
except socket.gaierror:
pass
nclient = HelperClient(server=(host, port))
nclientcheck = HelperClient(server=(host, port))
payload = 0
response = nclient.delete(path, None, None, **ct)
client_callback(response)
#nclient.stop()
#sys.exit(2)
print ("=======")
def execute_put(mypath, ct_value, accept, content):
print ("---------------------------")
print ("execute_put: ", ct_value, mypath)
do_exit = False
ct = {}
ct['accept'] = accept
ct['content_type'] = ct_value
if mypath.startswith("coap://") == False:
print(" not executing: ", mypath);
return
host, port, path = parse_uri(mypath)
try:
tmp = socket.gethostbyname(host)
host = tmp
except socket.gaierror:
pass
nclient = HelperClient(server=(host, port))
nclientcheck = HelperClient(server=(host, port))
payload = 0
if accept == 60:
payload = cbor.dumps(content)
else:
payload = content
print ("payload: ", payload)
response = nclient.put(path, payload, None, None , None, **ct)
client_callback(response)
nclient.stop()
def execute_post(mypath, ct_value, accept, content):
print ("---------------------------")
print ("execute_post: ", ct_value, mypath)
print (content)
print (" ---------------------")
do_exit = False
ct = {}
ct['accept'] = accept
ct['content_type'] = ct_value
if mypath.startswith("coap://") == False:
print(" not executing: ", mypath);
return
host, port, path = parse_uri(mypath)
try:
tmp = socket.gethostbyname(host)
host = tmp
except socket.gaierror:
pass
nclient = HelperClient(server=(host, port))
#nclientcheck = HelperClient(server=(host, port))
payload = 0
if accept == 60:
#print(" content :", content)
payload = cbor.dumps(content)
else:
payload = content
response = nclient.post(path, payload, None, None , None, **ct)
client_callback(response)
nclient.stop()
def main(): # pragma: no cover
global client
op = None
path = None
payload = None
content_type = None
#ct = {'content_type': defines.Content_types["application/link-format"]}
ct = {}
ct['accept'] = 40
try:
opts, args = getopt.getopt(sys.argv[1:], "ho:p:P:f:c:", ["help", "operation=", "path=", "payload=",
"payload_file=","content-type"])
except getopt.GetoptError as err:
# print help information and exit:
print((str(err))) # will print something like "option -a not recognized"
usage()
sys.exit(2)
for o, a in opts:
if o in ("-o", "--operation"):
op = a
elif o in ("-p", "--path"):
path = a
elif o in ("-P", "--payload"):
payload = a
elif o in ("-c", "--content-type"):
ct['accept'] = a
print ("content type request : ", ct)
elif o in ("-f", "--payload-file"):
with open(a, 'r') as f:
payload = f.read()
elif o in ("-h", "--help"):
usage()
sys.exit()
else:
usage()
sys.exit(2)
if op is None:
print("Operation must be specified")
usage()
sys.exit(2)
if path is None:
print("Path must be specified")
usage()
sys.exit(2)
if not path.startswith("coap://"):
print("Path must be conform to coap://host[:port]/path")
usage()
sys.exit(2)
host, port, path = parse_uri(path)
try:
tmp = socket.gethostbyname(host)
host = tmp
except socket.gaierror:
pass
client = HelperClient(server=(host, port))
if op == "GET":
if path is None:
print("Path cannot be empty for a GET request")
usage()
sys.exit(2)
response = client.get(path, None, None, **ct)
print((response.pretty_print()))
if response.content_type == defines.Content_types["application/json"]:
json_data = json.loads(response.payload)
json_string = json.dumps(json_data, indent=2, sort_keys=True)
print ("JSON ::")
print (json_string)
if response.content_type == defines.Content_types["application/cbor"]:
json_data = cbor.loads(response.payload)
json_string = json.dumps(json_data, indent=2, sort_keys=True)
print ("JSON ::")
print (json_string)
if response.content_type == defines.Content_types["application/link-format"]:
#json_data = cbor.loads(response.payload)
#json_string = json.dumps(json_data, indent=2, sort_keys=True)
#print ("JSON ::")
print (response.payload.decode())
print ("\n\n")
if response.content_type == defines.Content_types["application/vnd.ocf+cbor"]:
json_data = cbor.loads(response.payload)
json_string = json.dumps(json_data, indent=2, sort_keys=True)
print ("JSON ::")
print (json_string)
client.stop()
elif op == "GETNONE":
if path is None:
print("Path cannot be empty for a GET-None request")
usage()
sys.exit(2)
response = client.get_non(path, None, None, **ct)
print((response.pretty_print()))
if response.content_type == defines.Content_types["application/json"]:
json_data = json.loads(response.payload)
json_string = json.dumps(json_data, indent=2, sort_keys=True)
print ("JSON ::")
print (json_string)
if response.content_type == defines.Content_types["application/cbor"]:
json_data = cbor.loads(response.payload)
json_string = json.dumps(json_data, indent=2, sort_keys=True)
print ("JSON ::")
print (json_string)
if response.content_type == defines.Content_types["application/vnd.ocf+cbor"]:
json_data = cbor.loads(response.payload)
json_string = json.dumps(json_data, indent=2, sort_keys=True)
print ("JSON ::")
print (json_string)
client.stop()
elif op == "OBSERVE":
if path is None:
print("Path cannot be empty for a GET request")
usage()
sys.exit(2)
client.observe(path, client_callback_observe)
elif op == "DELETE":
if path is None:
print("Path cannot be empty for a DELETE request")
usage()
sys.exit(2)
response = client.delete(path)
print((response.pretty_print()))
client.stop()
elif op == "POST":
if path is None:
print("Path cannot be empty for a POST request")
usage()
sys.exit(2)
if payload is None:
print("Payload cannot be empty for a POST request")
usage()
sys.exit(2)
print ( "payload for POST (ascii):", payload )
print (ct['accept'] )
if ct['accept'] == str(defines.Content_types["application/cbor"]):
json_data = json.loads(payload)
cbor_data = cbor.dumps(json_data)
payload = bytes(cbor_data)
if ct['accept'] == str(defines.Content_types["application/vnd.ocf+cbor"]):
json_data = json.loads(payload)
cbor_data = cbor.loads(json_data)
payload = cbor_data
response = client.post(path, payload, None, None, **ct)
print((response.pretty_print()))
if response.content_type == defines.Content_types["application/cbor"]:
json_data = cbor.loads(response.payload)
json_string = json.dumps(json_data, indent=2, sort_keys=True)
print (json_string)
if response.content_type == defines.Content_types["application/vnd.ocf+cbor"]:
json_data = cbor.loads(response.payload)
json_string = json.dumps(json_data, indent=2, sort_keys=True)
print (json_string)
client.stop()
elif op == "PUT":
if path is None:
print("Path cannot be empty for a PUT request")
usage()
sys.exit(2)
if payload is None:
print("Payload cannot be empty for a PUT request")
usage()
sys.exit(2)
response = client.put(path, payload)
print((response.pretty_print()))
client.stop()
elif op == "DISCOVER":
#response = client.discover( path, client_callback, None, **ct)
response = client.discover( path, None, None, **ct)
if response is not None:
print(response.pretty_print())
if response.content_type == defines.Content_types["application/cbor"]:
json_data = cbor.loads(response.payload)
json_string = json.dumps(json_data, indent=2, sort_keys=True)
print (json_string)
if response.content_type == defines.Content_types["application/vnd.ocf+cbor"]:
json_data = cbor.loads(response.payload)
json_string = json.dumps(json_data, indent=2, sort_keys=True)
print (json_string)
if response.content_type == defines.Content_types["application/link-format"]:
#json_data = cbor.loads(response.payload)
#json_string = json.dumps(json_data, indent=2, sort_keys=True)
print (response.payload.decode())
# do_get(response.payload.decode(), client)
client_callback_discovery(response)
counter = 2
try:
while counter > 0:
time.sleep(1)
counter = counter - 1
#client.stop()
except KeyboardInterrupt:
print("Client Shutdown")
#client.stop()
#execute_list()
client.stop()
else:
print("Operation not recognized")
usage()
sys.exit(2)
if __name__ == '__main__': # pragma: no cover
main()
| 2.25 | 2 |
SWIM-Executables/Windows/pyinstaller-2.0 for windows/PyInstaller/hooks/hook-PyQt4.phonon.py | alexsigaras/SWIM | 47 | 2141 | hiddenimports = ['sip', 'PyQt4.QtGui', 'PyQt4._qt']
from PyInstaller.hooks.hookutils import qt4_plugins_binaries
def hook(mod):
mod.binaries.extend(qt4_plugins_binaries('phonon_backend'))
return mod
| 1.226563 | 1 |
PyTradier/data.py | zlopez101/PyTradier | 1 | 2142 | from PyTradier.base import BasePyTradier
from typing import Union
from datetime import datetime
class MarketData(BasePyTradier):
"""All Methods currently only support string API calls, no datetime, bools, etc
"""
def quotes(self, symbols: Union[str, list], greeks: bool = False) -> dict:
"""Get a list of symbols using a keyword lookup on the symbols description. Results are in descending order by average volume of the security. This can be used for simple search functions
:param symbols: Comma-delimited list of symbols (equity or option)
:type symbols: Union[str, list]
:param greeks: Add greeks and volatility information (option only), defaults to False
:type greeks: bool, optional
:return: quotes for requested symbols
:rtype: dict
"""
symbols = self._symbol_prep(symbols)
return self._get(
"/v1/markets/quotes",
params=self.create_params(locals()),
dict_args=("quotes", "quotes"),
)
def option_chain(
self,
symbol: str,
expiration: Union[str, datetime],
greeks: Union[str, bool] = "false",
) -> dict:
"""Get all quotes in an option chain. Greek and IV data is included courtesy of ORATS. Please check out their APIs for more in-depth options data.
:param symbol: Underlying symbol of the chain
:type symbol: str
:param expiration: Expiration for the chain
:type expiration: Union[str, datetime]
:param greeks: Add greeks and volatility information, defaults to "false"
:type greeks: Union[str, bool], optional
:return: Get all quotes in an option chain
:rtype: dict
"""
return self._get(
"/v1/markets/options/chains",
params=self.create_params(locals()),
dict_args=("options", "option"),
)
def option_strike(self, symbol: str, expiration: Union[str, datetime]) -> list:
"""Get an options strike prices for a specified expiration date.
:param symbol: Underlying symbol of the chain
:type symbol: str
:param expiration: Expiration for the chain
:type expiration: Union[str, datetime]
:return: [description]
:rtype: list
"""
return self._get(
"/v1/markets/options/strikes", params=self.create_params(locals())
)
def option_lookup(self, underlying: str) -> dict:
"""Get all options symbols for the given underlying. This will include additional option roots (ex. SPXW, RUTW) if applicable.
:param underlying: Underlying symbol of the chain
:type underlying: str
:return: dict {"rootSymbol": underlying, "options": [list of option symbols]}
:rtype: dict
"""
return self._get(
"/v1/markets/options/lookup", params=self.create_params(locals())
)
def option_expirations(
self,
symbol: str,
includeAllRoots: Union[str, bool] = "",
strikes: Union[str, bool] = "",
) -> list:
"""Get expiration dates for a particular underlying.
Note that some underlying securities use a different symbol for their weekly options (RUT/RUTW, SPX/SPXW). To make sure you see all expirations, make sure to send the includeAllRoots parameter. This will also ensure any unique options due to corporate actions (AAPL1) are returned.
:param symbol: Underlying symbol of the chain
:type symbol: str
:param includeAllRoots: Send expirations related to all option roots, defaults to ''
:type includeAllRoots: Union[str, bool], optional
:param strikes: Add strike prices to each expiration, defaults to ''
:type strikes: Union[str, bool], optional
:return: list of expiration dates as str %Y-%m-%d
:rtype: list
"""
response = self._get(
"/v1/markets/options/expirations", params=self.create_params(locals())
)
return response
def historic_quotes(
self, symbol: str, interval: str = "daily", start: str = None, end: str = None
) -> list:
"""Get historical pricing for a security. This data will usually cover the entire lifetime of the company if sending reasonable start/end times. You can fetch historical pricing for options by passing the OCC option symbol (ex. AAPL220617C00270000) as the symbol.
:param symbol: Symbol to query
:type symbol: str
:param interval: Interval of time per timesale. One of: daily, weekly, monthly, defaults to "daily"
:type interval: str, optional
:param start: Start date represented as YYYY-MM-DD, defaults to None
:type start: str, optional
:param end: End date represented as YYYY-MM-DD, defaults to None
:type end: str, optional
:return: [description]
:rtype: list
"""
return self._get(
"/v1/markets/history",
params=self.create_params(locals()),
dict_args=("history", "day"),
)
def time_and_sales(
self, symbol: str, start: str, end: str, interval: str = "1min"
) -> list:
"""Time and Sales (timesales) is typically used for charting purposes. It captures pricing across a time slice at predefined intervals.
Tick data is also available through this endpoint. This results in a very large data set for high-volume symbols, so the time slice needs to be much smaller to keep downloads time reasonable.`
:param symbol: A single security symbol.
:type symbol: str
:param start: Start date/time for timesales range represented as YYYY-MM-DD HH:MM
:type start: str
:param end: Start date/time for timesales range represented as YYYY-MM-DD HH:MM
:type end: str
:param interval: Interval of time per timesale. One of: tick, 1min, 5min, 15min, defaults to "1min"
:type interval: str, optional
:return: list of dictionaries containing keys of ['time', 'timestamp', 'price', 'open', 'high', 'close', low', 'volume', 'vwap']
:rtype: list
"""
return self._get(
"/v1/markets/timesales",
params=self.create_params(locals()),
dict_args=("series", "data"),
)
if __name__ == "__main__":
from utils import printer
data = MarketData()
symbol = "AAPL"
response = data.option_lookup(symbol)
# response = data.option_strike(symbol, dates[0])
printer(response)
| 2.890625 | 3 |
joulescope_ui/meter_widget.py | Axel-Jacobsen/pyjoulescope_ui | 1 | 2143 | <filename>joulescope_ui/meter_widget.py
# Copyright 2018 Jetperch LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from PySide2 import QtCore, QtWidgets
from . import joulescope_rc
from .meter_value_widget import MeterValueWidget
import logging
log = logging.getLogger(__name__)
FIELDS = [
('current', 'A', 'Amps'),
('voltage', 'V', 'Volts'),
('power', 'W', 'Watts'),
('energy', 'J', 'Joules'),
]
class MeterWidget(QtWidgets.QWidget):
def __init__(self, *args, **kwargs):
QtWidgets.QWidget.__init__(self, *args, **kwargs)
self.verticalLayout = QtWidgets.QVBoxLayout(self)
self.verticalLayout.setObjectName("verticalLayout")
self.verticalLayout.setSpacing(0)
self.controlWidget = QtWidgets.QWidget(self)
self.controlLayout = QtWidgets.QHBoxLayout(self.controlWidget)
self.verticalLayout.addWidget(self.controlWidget)
self.accumulateButton = QtWidgets.QPushButton(self.controlWidget)
self.accumulateButton.setCheckable(True)
self.accumulateButton.setObjectName("accumulateButton")
self.controlLayout.addWidget(self.accumulateButton)
self.accumulateButton.toggled.connect(self.on_accumulate_toggled)
self.controlSpacer = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.controlLayout.addItem(self.controlSpacer)
self.values = {}
for name, units_short, units_long in FIELDS:
w = MeterValueWidget(self)
w.setStyleSheet("QWidget { background-color : black; color : green; }")
w.configure(name.capitalize(), units_short, units_long)
self.values[name] = w
w.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.addWidget(w)
self.values['energy'].configure_energy()
self.sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
self.sizePolicy.setHorizontalStretch(0)
self.sizePolicy.setVerticalStretch(0)
self.setSizePolicy(self.sizePolicy)
self.retranslateUi()
@QtCore.Slot(bool)
def on_accumulate_toggled(self, checked):
self.values['current'].accumulate_enable = checked
self.values['voltage'].accumulate_enable = checked
self.values['power'].accumulate_enable = checked
def update(self, statistics):
"""Update the multimeter display
:param statistics: The statistics data structure
"""
for name, field in statistics['signals'].items():
d = field['statistics']
self.values[name].update_value(mean=d['μ'], variance=d['σ2'], v_min=d['min'], v_max=d['max'])
energy = statistics['accumulators']['energy']['value']
charge = statistics['accumulators']['charge']['value']
self.values['energy'].update_energy(energy, charge)
def retranslateUi(self):
_translate = QtCore.QCoreApplication.translate
self.accumulateButton.setText(_translate("meter_widget", "Accumulate"))
| 2.046875 | 2 |
rpyc/core/service.py | bbonf/rpyc | 0 | 2144 | """
Services are the heart of RPyC: each side of the connection exposes a *service*,
which define the capabilities available to the other side.
Note that the services by both parties need not be symmetric, e.g., one side may
exposed *service A*, while the other may expose *service B*. As long as the two
can interoperate, you're good to go.
"""
from functools import partial
from rpyc.lib import hybridmethod
from rpyc.lib.compat import execute, is_py3k
from rpyc.core.protocol import Connection
class Service(object):
"""The service base-class. Derive from this class to implement custom RPyC
services:
* The name of the class implementing the ``Foo`` service should match the
pattern ``FooService`` (suffixed by the word 'Service') ::
class FooService(Service):
pass
FooService.get_service_name() # 'FOO'
FooService.get_service_aliases() # ['FOO']
* To supply a different name or aliases, use the ``ALIASES`` class attribute ::
class Foobar(Service):
ALIASES = ["foo", "bar", "lalaland"]
Foobar.get_service_name() # 'FOO'
Foobar.get_service_aliases() # ['FOO', 'BAR', 'LALALAND']
* Override :func:`on_connect` to perform custom initialization
* Override :func:`on_disconnect` to perform custom finalization
* To add exposed methods or attributes, simply define them normally,
but prefix their name by ``exposed_``, e.g. ::
class FooService(Service):
def exposed_add(self, x, y):
return x + y
* All other names (not prefixed by ``exposed_``) are local (not accessible
to the other party)
.. note::
You can override ``_rpyc_getattr``, ``_rpyc_setattr`` and ``_rpyc_delattr``
to change attribute lookup -- but beware of possible **security implications!**
"""
__slots__ = ()
ALIASES = ()
_protocol = Connection
def on_connect(self, conn):
"""called when the connection is established"""
pass
def on_disconnect(self, conn):
"""called when the connection had already terminated for cleanup
(must not perform any IO on the connection)"""
pass
# Using default defined in 'protocol.Connection._access_attr' for:
# def _rpyc_getattr(self, name):
def _rpyc_delattr(self, name):
raise AttributeError("access denied")
def _rpyc_setattr(self, name, value):
raise AttributeError("access denied")
@classmethod
def get_service_aliases(cls):
"""returns a list of the aliases of this service"""
if cls.ALIASES:
return tuple(str(n).upper() for n in cls.ALIASES)
name = cls.__name__.upper()
if name.endswith("SERVICE"):
name = name[:-7]
return (name,)
@classmethod
def get_service_name(cls):
"""returns the canonical name of the service (which is its first
alias)"""
return cls.get_service_aliases()[0]
exposed_get_service_aliases = get_service_aliases
exposed_get_service_name = get_service_name
@hybridmethod
def _connect(self, channel, config={}):
"""Setup a connection via the given channel."""
if isinstance(self, type): # autovivify if accessed as class method
self = self()
# Note that we are here passing in `self` as root object for backward
# compatibility and convenience. You could pass in a different root if
# you wanted:
conn = self._protocol(self, channel, config)
self.on_connect(conn)
return conn
class VoidService(Service):
"""void service - an do-nothing service"""
__slots__ = ()
class ModuleNamespace(object):
"""used by the :class:`SlaveService` to implement the magical
'module namespace'"""
__slots__ = ["__getmodule", "__cache", "__weakref__"]
def __init__(self, getmodule):
self.__getmodule = getmodule
self.__cache = {}
def __contains__(self, name):
try:
self[name]
except ImportError:
return False
else:
return True
def __getitem__(self, name):
if type(name) is tuple:
name = ".".join(name)
if name not in self.__cache:
self.__cache[name] = self.__getmodule(name)
return self.__cache[name]
def __getattr__(self, name):
return self[name]
class Slave(object):
__slots__ = ["_conn", "namespace"]
def __init__(self):
self._conn = None
self.namespace = {}
def execute(self, text):
"""execute arbitrary code (using ``exec``)"""
execute(text, self.namespace)
def eval(self, text):
"""evaluate arbitrary code (using ``eval``)"""
return eval(text, self.namespace)
def getmodule(self, name):
"""imports an arbitrary module"""
return __import__(name, None, None, "*")
def getconn(self):
"""returns the local connection instance to the other side"""
return self._conn
class SlaveService(Slave, Service):
"""The SlaveService allows the other side to perform arbitrary imports and
execution arbitrary code on the server. This is provided for compatibility
with the classic RPyC (2.6) modus operandi.
This service is very useful in local, secure networks, but it exposes
a **major security risk** otherwise."""
__slots__ = ()
def on_connect(self, conn):
self._conn = conn
self._conn._config.update(dict(
allow_all_attrs = True,
allow_pickle = True,
allow_getattr = True,
allow_setattr = True,
allow_delattr = True,
allow_exposed_attrs = False,
import_custom_exceptions = True,
instantiate_custom_exceptions = True,
instantiate_oldstyle_exceptions = True,
))
super(SlaveService, self).on_connect(conn)
class FakeSlaveService(VoidService):
"""VoidService that can be used for connecting to peers that operate a
:class:`MasterService`, :class:`ClassicService`, or the old
``SlaveService`` (pre v3.5) without exposing any functionality to them."""
__slots__ = ()
exposed_namespace = None
exposed_execute = None
exposed_eval = None
exposed_getmodule = None
exposed_getconn = None
class MasterService(Service):
"""Peer for a new-style (>=v3.5) :class:`SlaveService`. Use this service
if you want to connect to a ``SlaveService`` without exposing any
functionality to them."""
__slots__ = ()
def on_connect(self, conn):
super(MasterService, self).on_connect(conn)
self._install(conn, conn.root)
@staticmethod
def _install(conn, slave):
modules = ModuleNamespace(slave.getmodule)
builtin = modules.builtins if is_py3k else modules.__builtin__
conn.modules = modules
conn.eval = slave.eval
conn.execute = slave.execute
conn.namespace = slave.namespace
conn.builtin = builtin
conn.builtins = builtin
from rpyc.utils.classic import teleport_function
conn.teleport = partial(teleport_function, conn)
class ClassicService(MasterService, SlaveService):
"""Full duplex master/slave service, i.e. both parties have full control
over the other. Must be used by both parties."""
__slots__ = ()
class ClassicClient(MasterService, FakeSlaveService):
"""MasterService that can be used for connecting to peers that operate a
:class:`MasterService`, :class:`ClassicService` without exposing any
functionality to them."""
__slots__ = ()
| 2.75 | 3 |
tests/task/manager_test.py | altenia/taskmator | 2 | 2145 | <filename>tests/task/manager_test.py
import unittest
from testbase import TaskmatorTestBase
from taskmator.task import core, util
from taskmator import context
class ManagerTest(TaskmatorTestBase):
def testManager(self):
print ("Pending")
def main():
unittest.main()
if __name__ == '__main__':
unittest.main()
| 2.0625 | 2 |
tests/components/zwave_js/test_discovery.py | tbarbette/core | 1 | 2146 | <filename>tests/components/zwave_js/test_discovery.py
"""Test discovery of entities for device-specific schemas for the Z-Wave JS integration."""
async def test_iblinds_v2(hass, client, iblinds_v2, integration):
"""Test that an iBlinds v2.0 multilevel switch value is discovered as a cover."""
node = iblinds_v2
assert node.device_class.specific.label == "Unused"
state = hass.states.get("light.window_blind_controller")
assert not state
state = hass.states.get("cover.window_blind_controller")
assert state
async def test_ge_12730(hass, client, ge_12730, integration):
"""Test GE 12730 Fan Controller v2.0 multilevel switch is discovered as a fan."""
node = ge_12730
assert node.device_class.specific.label == "Multilevel Power Switch"
state = hass.states.get("light.in_wall_smart_fan_control")
assert not state
state = hass.states.get("fan.in_wall_smart_fan_control")
assert state
async def test_inovelli_lzw36(hass, client, inovelli_lzw36, integration):
"""Test LZW36 Fan Controller multilevel switch endpoint 2 is discovered as a fan."""
node = inovelli_lzw36
assert node.device_class.specific.label == "Unused"
state = hass.states.get("light.family_room_combo")
assert state.state == "off"
state = hass.states.get("fan.family_room_combo_2")
assert state
| 1.9375 | 2 |
boto3_type_annotations/boto3_type_annotations/guardduty/client.py | cowboygneox/boto3_type_annotations | 119 | 2147 | <filename>boto3_type_annotations/boto3_type_annotations/guardduty/client.py
from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from typing import Union
from botocore.paginate import Paginator
from botocore.waiter import Waiter
from typing import List
class Client(BaseClient):
def accept_invitation(self, DetectorId: str, InvitationId: str, MasterId: str) -> Dict:
pass
def archive_findings(self, DetectorId: str, FindingIds: List) -> Dict:
pass
def can_paginate(self, operation_name: str = None):
pass
def create_detector(self, Enable: bool, ClientToken: str = None, FindingPublishingFrequency: str = None) -> Dict:
pass
def create_filter(self, DetectorId: str, FindingCriteria: Dict, Name: str, Action: str = None, ClientToken: str = None, Description: str = None, Rank: int = None) -> Dict:
pass
def create_ip_set(self, Activate: bool, DetectorId: str, Format: str, Location: str, Name: str, ClientToken: str = None) -> Dict:
pass
def create_members(self, AccountDetails: List, DetectorId: str) -> Dict:
pass
def create_sample_findings(self, DetectorId: str, FindingTypes: List = None) -> Dict:
pass
def create_threat_intel_set(self, Activate: bool, DetectorId: str, Format: str, Location: str, Name: str, ClientToken: str = None) -> Dict:
pass
def decline_invitations(self, AccountIds: List) -> Dict:
pass
def delete_detector(self, DetectorId: str) -> Dict:
pass
def delete_filter(self, DetectorId: str, FilterName: str) -> Dict:
pass
def delete_invitations(self, AccountIds: List) -> Dict:
pass
def delete_ip_set(self, DetectorId: str, IpSetId: str) -> Dict:
pass
def delete_members(self, AccountIds: List, DetectorId: str) -> Dict:
pass
def delete_threat_intel_set(self, DetectorId: str, ThreatIntelSetId: str) -> Dict:
pass
def disassociate_from_master_account(self, DetectorId: str) -> Dict:
pass
def disassociate_members(self, AccountIds: List, DetectorId: str) -> Dict:
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
pass
def get_detector(self, DetectorId: str) -> Dict:
pass
def get_filter(self, DetectorId: str, FilterName: str) -> Dict:
pass
def get_findings(self, DetectorId: str, FindingIds: List, SortCriteria: Dict = None) -> Dict:
pass
def get_findings_statistics(self, DetectorId: str, FindingStatisticTypes: List, FindingCriteria: Dict = None) -> Dict:
pass
def get_invitations_count(self) -> Dict:
pass
def get_ip_set(self, DetectorId: str, IpSetId: str) -> Dict:
pass
def get_master_account(self, DetectorId: str) -> Dict:
pass
def get_members(self, AccountIds: List, DetectorId: str) -> Dict:
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
pass
def get_threat_intel_set(self, DetectorId: str, ThreatIntelSetId: str) -> Dict:
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
pass
def invite_members(self, AccountIds: List, DetectorId: str, DisableEmailNotification: bool = None, Message: str = None) -> Dict:
pass
def list_detectors(self, MaxResults: int = None, NextToken: str = None) -> Dict:
pass
def list_filters(self, DetectorId: str, MaxResults: int = None, NextToken: str = None) -> Dict:
pass
def list_findings(self, DetectorId: str, FindingCriteria: Dict = None, MaxResults: int = None, NextToken: str = None, SortCriteria: Dict = None) -> Dict:
pass
def list_invitations(self, MaxResults: int = None, NextToken: str = None) -> Dict:
pass
def list_ip_sets(self, DetectorId: str, MaxResults: int = None, NextToken: str = None) -> Dict:
pass
def list_members(self, DetectorId: str, MaxResults: int = None, NextToken: str = None, OnlyAssociated: str = None) -> Dict:
pass
def list_threat_intel_sets(self, DetectorId: str, MaxResults: int = None, NextToken: str = None) -> Dict:
pass
def start_monitoring_members(self, AccountIds: List, DetectorId: str) -> Dict:
pass
def stop_monitoring_members(self, AccountIds: List, DetectorId: str) -> Dict:
pass
def unarchive_findings(self, DetectorId: str, FindingIds: List) -> Dict:
pass
def update_detector(self, DetectorId: str, Enable: bool = None, FindingPublishingFrequency: str = None) -> Dict:
pass
def update_filter(self, DetectorId: str, FilterName: str, Action: str = None, Description: str = None, FindingCriteria: Dict = None, Rank: int = None) -> Dict:
pass
def update_findings_feedback(self, DetectorId: str, Feedback: str, FindingIds: List, Comments: str = None) -> Dict:
pass
def update_ip_set(self, DetectorId: str, IpSetId: str, Activate: bool = None, Location: str = None, Name: str = None) -> Dict:
pass
def update_threat_intel_set(self, DetectorId: str, ThreatIntelSetId: str, Activate: bool = None, Location: str = None, Name: str = None) -> Dict:
pass
| 2.140625 | 2 |
test/workload/tpch_loop_workload_test.py | ChenYi015/Raven | 1 | 2148 | <gh_stars>1-10
# Copyright 2021 Raven Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from queue import Queue
from threading import Thread
from benchmark.workload.tpch import TpchLoopWorkload
def print_queries(queue: Queue):
while True:
query = queue.get()
print(query)
if __name__ == '__main__':
workload = TpchLoopWorkload()
print(workload)
queue = Queue()
generate_thread = Thread(
target=workload.generate_one_loop_queries,
args=(queue,),
name='QueryGenerator'
)
generate_thread.start()
print_thread = Thread(
target=print_queries,
args=(queue,),
name='QueryPrinter'
)
print_thread.start()
| 2.421875 | 2 |
Final-Project/server/art/serializers.py | wendy006/Web-Dev-Course | 0 | 2149 | from rest_framework import serializers
from .models import *
class CollectionSerializer(serializers.ModelSerializer):
class Meta:
model = Collection
fields = ('collectionID', 'name', 'display_name', 'description', 'img_url')
class ArtSerializer(serializers.ModelSerializer):
img_url = serializers.ReadOnlyField()
thumb_url = serializers.ReadOnlyField()
class Meta:
model = Art
fields = ('artID', 'title', 'filename', 'rarity', 'collection', 'img_url', 'thumb_url')
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'email', 'password', 'coins', 'art')
extra_kwargs = {
'password': {'<PASSWORD>}
}
def create(self, validated_data):
password = validated_data.pop('password', None)
instance = self.Meta.model(**validated_data)
if password is not None:
instance.set_password(password)
instance.save()
return instance
class OwnSerializer(serializers.ModelSerializer):
duplicates = serializers.ReadOnlyField()
class Meta:
model = Own
fields = ('ownID', 'user', 'art', 'duplicates')
class SaleSerializer(serializers.ModelSerializer):
class Meta:
model = Sale
fields = ('saleID', 'seller', 'buyer', 'ownership', 'art', 'price', 'available', 'sold', 'postDate', 'purchaseDate') | 2.40625 | 2 |
google/cloud/google_cloud_cpp_common_unit_tests.bzl | joezqren/google-cloud-cpp | 0 | 2150 | <filename>google/cloud/google_cloud_cpp_common_unit_tests.bzl
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# DO NOT EDIT -- GENERATED BY CMake -- Change the CMakeLists.txt file if needed
"""Automatically generated unit tests list - DO NOT EDIT."""
google_cloud_cpp_common_unit_tests = [
"common_options_test.cc",
"future_generic_test.cc",
"future_generic_then_test.cc",
"future_void_test.cc",
"future_void_then_test.cc",
"iam_bindings_test.cc",
"internal/algorithm_test.cc",
"internal/api_client_header_test.cc",
"internal/backoff_policy_test.cc",
"internal/base64_transforms_test.cc",
"internal/big_endian_test.cc",
"internal/compiler_info_test.cc",
"internal/credentials_impl_test.cc",
"internal/env_test.cc",
"internal/filesystem_test.cc",
"internal/format_time_point_test.cc",
"internal/future_impl_test.cc",
"internal/invoke_result_test.cc",
"internal/log_impl_test.cc",
"internal/pagination_range_test.cc",
"internal/parse_rfc3339_test.cc",
"internal/random_test.cc",
"internal/retry_policy_test.cc",
"internal/status_payload_keys_test.cc",
"internal/strerror_test.cc",
"internal/throw_delegate_test.cc",
"internal/tuple_test.cc",
"internal/type_list_test.cc",
"internal/user_agent_prefix_test.cc",
"internal/utility_test.cc",
"kms_key_name_test.cc",
"log_test.cc",
"options_test.cc",
"polling_policy_test.cc",
"project_test.cc",
"status_or_test.cc",
"status_test.cc",
"stream_range_test.cc",
"terminate_handler_test.cc",
"tracing_options_test.cc",
]
| 1.210938 | 1 |
api/tests/ver1/test_base.py | codacy-badger/politico-api | 0 | 2151 | <filename>api/tests/ver1/test_base.py
import unittest
from api import create_app
class TestBase(unittest.TestCase):
"""Default super class for api ver 1 tests"""
# setup testing
def setUp(self):
self.app = create_app('testing')
self.client = self.app.test_client()
self.item_list = []
# deconstructs test elements
def tearDown(self):
self.app = None
self.item_list.clear()
| 2.53125 | 3 |
socialdistribution/app/templatetags/filters.py | CMPUT404-Project-Group/CMPUT404-Group-Project | 0 | 2152 | from django import template
from django.template.defaultfilters import stringfilter
from django.utils.safestring import SafeString
import markdown
import urllib
register = template.Library()
@register.filter
def strip_space(value):
return value.replace(' ', '')
@register.filter
@stringfilter
def commonmark(value):
return markdown.Markdown().convert(value)
@register.filter(name="getID")
def get_ID(value):
if not type(value) is str:
return value
return value.split('/')[-1]
@register.filter(name="getNav")
def get_nav(value):
return value.split('/')[-2]
@register.filter(name="encode_url")
def encode_url(value):
return urllib.parse.quote(value)
@register.filter
def get_post_id(url):
"""
gets the post id from the comment page url
"""
return urllib.parse.urlparse(url.get_full_path()).path.rsplit('/', 1)[0] | 2.28125 | 2 |
alipay/aop/api/domain/MetroOdItem.py | antopen/alipay-sdk-python-all | 213 | 2153 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.CloudbusUserInfo import CloudbusUserInfo
class MetroOdItem(object):
def __init__(self):
self._dest_geo = None
self._od = None
self._time = None
self._user_info = None
self._week_od = None
self._work_od = None
@property
def dest_geo(self):
return self._dest_geo
@dest_geo.setter
def dest_geo(self, value):
self._dest_geo = value
@property
def od(self):
return self._od
@od.setter
def od(self, value):
self._od = value
@property
def time(self):
return self._time
@time.setter
def time(self, value):
self._time = value
@property
def user_info(self):
return self._user_info
@user_info.setter
def user_info(self, value):
if isinstance(value, CloudbusUserInfo):
self._user_info = value
else:
self._user_info = CloudbusUserInfo.from_alipay_dict(value)
@property
def week_od(self):
return self._week_od
@week_od.setter
def week_od(self, value):
self._week_od = value
@property
def work_od(self):
return self._work_od
@work_od.setter
def work_od(self, value):
self._work_od = value
def to_alipay_dict(self):
params = dict()
if self.dest_geo:
if hasattr(self.dest_geo, 'to_alipay_dict'):
params['dest_geo'] = self.dest_geo.to_alipay_dict()
else:
params['dest_geo'] = self.dest_geo
if self.od:
if hasattr(self.od, 'to_alipay_dict'):
params['od'] = self.od.to_alipay_dict()
else:
params['od'] = self.od
if self.time:
if hasattr(self.time, 'to_alipay_dict'):
params['time'] = self.time.to_alipay_dict()
else:
params['time'] = self.time
if self.user_info:
if hasattr(self.user_info, 'to_alipay_dict'):
params['user_info'] = self.user_info.to_alipay_dict()
else:
params['user_info'] = self.user_info
if self.week_od:
if hasattr(self.week_od, 'to_alipay_dict'):
params['week_od'] = self.week_od.to_alipay_dict()
else:
params['week_od'] = self.week_od
if self.work_od:
if hasattr(self.work_od, 'to_alipay_dict'):
params['work_od'] = self.work_od.to_alipay_dict()
else:
params['work_od'] = self.work_od
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = MetroOdItem()
if 'dest_geo' in d:
o.dest_geo = d['dest_geo']
if 'od' in d:
o.od = d['od']
if 'time' in d:
o.time = d['time']
if 'user_info' in d:
o.user_info = d['user_info']
if 'week_od' in d:
o.week_od = d['week_od']
if 'work_od' in d:
o.work_od = d['work_od']
return o
| 2.046875 | 2 |
djangocms_redirect/migrations/0003_auto_20190810_1009.py | vsalat/djangocms-redirect | 0 | 2154 | <reponame>vsalat/djangocms-redirect
# Generated by Django 2.2.4 on 2019-08-10 08:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djangocms_redirect', '0002_auto_20170321_1807'),
]
operations = [
migrations.AddField(
model_name='redirect',
name='catchall_redirect',
field=models.BooleanField(default=False, help_text='If selected all the pages starting with the given string will be redirected to the given redirect path', verbose_name='Catchall redirect'),
),
migrations.AddField(
model_name='redirect',
name='subpath_match',
field=models.BooleanField(default=False, help_text='If selected all the pages starting with the given string will be redirected by replacing the matching subpath with the provided redirect path.', verbose_name='Subpath match'),
),
]
| 1.867188 | 2 |
octopart/scrape_octopart.py | nicholaschiang/dl-datasheets | 0 | 2155 | #! /usr/bin/env python
import sys
import json
import urllib
import urllib2
import time
import argparse
import re
# Category ID for Discrete Semiconductors > Transistors > BJTs
TRANSISTOR_ID = b814751e89ff63d3
def find_total_hits(search_query):
"""
Function: find_total_hits
--------------------
Returns the number of hits that correspond to the search query.
"""
url = "http://octopart.com/api/v3/categories/"
# NOTE: Use your API key here (https://octopart.com/api/register)
url += "?apikey=<KEY>"
args = [
('q', search_query),
('start', 0),
('limit', 1), #change to increase number of datasheets
('include[]','datasheets')
]
url += '&' + urllib.urlencode(args)
data = urllib.urlopen(url).read() # perform a SearchRequest
search_response = json.loads(data) # Grab the SearchResponse
# return number of hits
return search_response['hits']
def download_datasheets(search_query):
"""
Function: download_datasheets
--------------------
Uses the OctoPart API to download all datasheets associated with a given
set of search keywords.
"""
MAX_RESULTS = 100
counter = 0
total_hits = find_total_hits(search_query)
# print number of hits
print "[info] Search Response Hits: %s" % (total_hits)
# Calculate how many multiples of 100s of hits there are
num_hundreds = total_hits / MAX_RESULTS
print "[info] Performing %s iterations of %s results." % (num_hundreds, MAX_RESULTS)
for i in range(num_hundreds+1):
url = "http://octopart.com/api/v3/parts/search"
# NOTE: Use your API key here (https://octopart.com/api/register)
url += "?apikey=09b32c6c"
args = [
('q', search_query),
('start', (i * MAX_RESULTS)),
('limit', MAX_RESULTS), # change to edit number of datasheets
('include[]','datasheets')
# ('include[]','specs'),
# ('include[]','descriptions')
]
url += '&' + urllib.urlencode(args)
data = urllib.urlopen(url).read() # perform a SearchRequest
search_response = json.loads(data) # Grab the SearchResponse
# Iterate through the SearchResults in the SearchResponse
if not search_response.get('results'):
print "[error] no results returned in outer loop: " + str(i)
continue
for result in search_response['results']:
part = result['item'] # Grab the Part in the SearchResult
print ("[info] %s_%s..." % (part['brand']['name'].replace(" ", ""), part['mpn'])),
sys.stdout.flush()
# Iterate through list of datasheets for the given part
for datasheet in part['datasheets']:
# Grab the Datasheet URL
pdflink = datasheet['url']
if pdflink is not None:
# Download the PDF
try:
response = urllib2.urlopen(pdflink)
except urllib2.HTTPError, err:
if err.code == 404:
print "[error] Page not found!...",
elif err.code == 403:
print "[error] Access Denied!...",
else:
print "[error] HTTP Error code ", err.code,
continue; # advance to next datasheet rather than crashing
try:
filename = re.search('([^/]*)\.[^.]*$', datasheet['url']).group(1)
except AttributeError:
continue; # skip to next datasheet rather than crashing
file = open("../datasheets/%s.pdf" % filename, 'w')
file.write(response.read())
file.close()
counter += 1 # Increment the counter of files downloaded
# NOTE: Not sure if this is necessary. Just a precaution.
time.sleep(0.4) # Limit ourselves to 3 HTTP Requests/second
print("DONE")
print("[info] %s Parts Completed." % MAX_RESULTS)
print("[info] COMPLETED: %s datasheets for the query were downloaded." % counter)
def parse_args():
"""
Function: parse_args
--------------------
Parse the arguments for the Octopart Datasheet Scraper
"""
# Define what commandline arguments can be accepted
parser = argparse.ArgumentParser()
parser.add_argument('query',metavar="\"SEARCH_KEYWORDS\"",
help="keywords to query in quotes (required)")
parser.add_argument('--version', action='version', version='%(prog)s 0.1.0')
args = parser.parse_args()
return args.query
# Main Function
if __name__ == "__main__":
reload(sys)
sys.setdefaultencoding('utf-8')
search_query = parse_args() # Parse commandline arguments
start_time = time.time()
print "[info] Download datasheets for %s" % search_query
download_datasheets(search_query)
finish_time = time.time()
print '[info] Took', finish_time - start_time, 'sec total.'
| 3.1875 | 3 |
extras/python/fogbench/__main__.py | foglamp/FogLAMP | 65 | 2156 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
""" fogbench -- a Python script used to test FogLAMP.
The objective is to simulate payloads for input, REST and other requests against one or
more FogLAMP instances. This version of fogbench is meant to test the CoAP and HTTP plugins
interface of FogLAMP southbound services.
fogbench
[IN] -h --help Print this help
-i --interval The interval in seconds between each iteration (default: 0)
[IN] -k --keep Do not delete (keep) the running sample (default: no)
[IN] -o --output Set the output file for statistics
[IN] -p --payload Type of payload and protocol (default: coap)
[IN] -t --template Set the template to use
[IN] -v --version Display the version and exit
[IN] -H --host The FogLAMP host (default: localhost)
-I --iterations The number of iterations of the test (default: 1)
[IN] -O --occurrences The number of occurrences of the template (default: 1)
[IN] -P --port The FogLAMP port. Default depends on payload and protocol
[IN] -S --statistic The type of statistics to collect
Example:
$ cd $FOGLAMP_ROOT/bin
$ ./fogbench
Help:
$ ./fogbench -h
* Create reading objects from given template, as per the json file name specified with -t
* Save those objects to the file, as per the file name specified with -o
* Read those objects
* Send those to CoAP or HTTP south plugin server, on specific host and port
.. todo::
* Try generators
"""
import sys
import os
import random
import json
from datetime import datetime, timezone
import argparse
import collections
import asyncio
import aiohttp
from .exceptions import *
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
_FOGBENCH_VERSION = u"0.1.1"
_start_time = []
_end_time = []
_tot_msgs_transferred = []
_tot_byte_transferred = []
_num_iterated = 0
"""Statistics to be collected"""
# _logger = logger.setup(__name__)
def local_timestamp():
"""
:return: str - current time stamp with microseconds and machine timezone info
:example '2018-05-08 14:06:40.517313+05:30'
"""
return str(datetime.now(timezone.utc).astimezone())
def read_templates():
templates = []
return templates
def parse_template_and_prepare_json(_template_file,
_write_to_file=None, _occurrences=1):
# template_file = os.path.join(os.path.dirname(__file__), "templates/" + _template_file)
with open(_template_file) as data_file:
data = json.load(data_file)
supported_format_types = ["number", "enum"]
for _ in range(_occurrences):
readings_ = _prepare_sensor_reading(data, supported_format_types)
for r in readings_:
_write_readings_to_file(_write_to_file, r)
def _write_readings_to_file(to_file, r):
with open(to_file, 'a') as the_file:
json.dump(r, the_file)
the_file.write(os.linesep)
def _prepare_sensor_reading(data, supported_format_types):
readings = []
for d in data:
x_sensor_values = dict()
_sensor_value_object_formats = d["sensor_values"]
for fmt in _sensor_value_object_formats:
if fmt["type"] not in supported_format_types:
raise InvalidSensorValueObjectTemplateFormat(u"Invalid format, "
u"Can not parse type {}".format(fmt["type"]))
if fmt["type"] == "number":
# check float precision if any
precision = fmt.get("precision", None)
min_val = fmt.get("min", None)
max_val = fmt.get("max", None)
if min_val is None or max_val is None:
raise InvalidSensorValueObjectTemplateFormat(u"Invalid format, "
u"Min and Max values must be defined for type number.")
# print(precision)
# print(min_val)
# print(max_val)
reading = round(random.uniform(min_val, max_val), precision)
elif fmt["type"] == "enum":
reading = random.choice(fmt["list"])
# print(fmt["name"], reading)
x_sensor_values[fmt["name"]] = reading
# print(d["name"])
sensor_value_object = dict()
sensor_value_object["asset"] = d['name']
sensor_value_object["readings"] = x_sensor_values
sensor_value_object["timestamp"] = "{!s}".format(local_timestamp())
# print(json.dumps(sensor_value_object))
ord_dict = collections.OrderedDict(sorted(sensor_value_object.items()))
readings.append(ord_dict)
return readings
def read_out_file(_file=None, _keep=False, _iterations=1, _interval=0, send_to='coap'):
global _start_time
global _end_time
global _tot_msgs_transferred
global _tot_byte_transferred
global _num_iterated
# from pprint import pprint
import time
# _file = os.path.join(os.path.dirname(__file__), "out/{}".format(outfile))
with open(_file) as f:
readings_list = [json.loads(line) for line in f]
loop = asyncio.get_event_loop()
while _iterations > 0:
# Pre-calculate the messages and size
msg_transferred_itr = 0 # Messages transferred in every iteration
byte_transferred_itr = 0 # Bytes transferred in every iteration
for r in readings_list:
msg_transferred_itr += 1
byte_transferred_itr += sys.getsizeof(r)
if send_to == 'coap':
_start_time.append(datetime.now())
for r in readings_list:
is_sent = loop.run_until_complete(send_to_coap(r))
if not is_sent:
break
elif send_to == 'http':
_start_time.append(datetime.now())
loop.run_until_complete(send_to_http(readings_list))
_end_time.append(datetime.now()) # End time of every iteration
_tot_msgs_transferred.append(msg_transferred_itr)
_tot_byte_transferred.append(byte_transferred_itr)
_iterations -= 1
_num_iterated += 1
if _iterations != 0:
# print(u"Iteration {} completed, waiting for {} seconds".format(_iterations, _interval))
time.sleep(_interval)
if not _keep:
os.remove(_file)
async def send_to_coap(payload):
"""
POST request to:
localhost
port 5683 (official IANA assigned CoAP port),
URI "/other/sensor-values".
"""
from aiocoap import Context, Message
from aiocoap.numbers.codes import Code
from cbor2 import dumps
context = await Context.create_client_context()
request = Message(payload=dumps(payload), code=Code.POST)
request.opt.uri_host = arg_host
request.opt.uri_port = arg_port
request.opt.uri_path = ("other", "sensor-values")
response = await context.request(request).response
str_res = str(response.code)
status_code = str_res[:4] # or str_res.split()[0]
if status_code == "4.00" or status_code == "5.00":
print("Error: ", str_res)
return False
return True
async def send_to_http(payload):
"""
POST request to:
host localhost
port 6683 (default HTTP south plugin port),
uri sensor-reading
"""
headers = {'content-type': 'application/json'}
url = 'http://{}:{}/sensor-reading'.format(arg_host, arg_port)
async with aiohttp.ClientSession() as session:
async with session.post(url, data=json.dumps(payload), headers=headers) as resp:
await resp.text()
status_code = resp.status
if status_code in range(400, 500):
print("Bad request error | code:{}, reason: {}".format(status_code, resp.reason))
return False
if status_code in range(500, 600):
print("Server error | code:{}, reason: {}".format(status_code, resp.reason))
return False
return True
def get_statistics(_stats_type=None, _out_file=None):
stat = ''
global _start_time
global _end_time
global _tot_msgs_transferred
global _tot_byte_transferred
global _num_iterated
if _stats_type == 'total':
stat += u"Total Statistics:\n"
stat += (u"\nStart Time: {}".format(datetime.strftime(_start_time[0], "%Y-%m-%d %H:%M:%S.%f")))
stat += (u"\nEnd Time: {}\n".format(datetime.strftime(_end_time[-1], "%Y-%m-%d %H:%M:%S.%f")))
stat += (u"\nTotal Messages Transferred: {}".format(sum(_tot_msgs_transferred)))
stat += (u"\nTotal Bytes Transferred: {}\n".format(sum(_tot_byte_transferred)))
stat += (u"\nTotal Iterations: {}".format(_num_iterated))
stat += (u"\nTotal Messages per Iteration: {}".format(sum(_tot_msgs_transferred)/_num_iterated))
stat += (u"\nTotal Bytes per Iteration: {}\n".format(sum(_tot_byte_transferred)/_num_iterated))
_msg_rate = []
_byte_rate = []
for itr in range(_num_iterated):
time_taken = _end_time[itr] - _start_time[itr]
_msg_rate.append(_tot_msgs_transferred[itr]/(time_taken.seconds+time_taken.microseconds/1E6))
_byte_rate.append(_tot_byte_transferred[itr] / (time_taken.seconds+time_taken.microseconds/1E6))
stat += (u"\nMin messages/second: {}".format(min(_msg_rate)))
stat += (u"\nMax messages/second: {}".format(max(_msg_rate)))
stat += (u"\nAvg messages/second: {}\n".format(sum(_msg_rate)/_num_iterated))
stat += (u"\nMin Bytes/second: {}".format(min(_byte_rate)))
stat += (u"\nMax Bytes/second: {}".format(max(_byte_rate)))
stat += (u"\nAvg Bytes/second: {}".format(sum(_byte_rate)/_num_iterated))
if _out_file:
with open(_out_file, 'w') as f:
f.write(stat)
else:
print(stat)
# should we also show total time diff? end_time - start_time
def check_server(payload_type='coap'):
template_str = ">>> Make sure south {} plugin service is running \n & listening on specified host and port \n"
if payload_type == 'coap':
print(template_str.format("CoAP"))
elif payload_type == 'http':
print(template_str.format("HTTP"))
parser = argparse.ArgumentParser(prog='fogbench')
parser.description = '%(prog)s -- a Python script used to test FogLAMP (simulate payloads)'
parser.epilog = 'The initial version of %(prog)s is meant to test the south plugin interface of ' \
'FogLAMP using CoAP or HTTP'
parser.add_argument('-v', '--version', action='version', version='%(prog)s {0!s}'.format(_FOGBENCH_VERSION))
parser.add_argument('-k', '--keep', default=False, choices=['y', 'yes', 'n', 'no'],
help='Do not delete the running sample (default: no)')
parser.add_argument('-t', '--template', required=True, help='Set the template file, json extension')
parser.add_argument('-o', '--output', default=None, help='Set the statistics output file')
parser.add_argument('-p', '--payload', default='coap', choices=['coap', 'http'], help='Type of payload '
'and protocol (default: coap)')
parser.add_argument('-I', '--iterations', help='The number of iterations of the test (default: 1)')
parser.add_argument('-O', '--occurrences', help='The number of occurrences of the template (default: 1)')
parser.add_argument('-H', '--host', help='Server host address (default: localhost)')
parser.add_argument('-P', '--port', help='The FogLAMP port. (default: 5683)')
parser.add_argument('-i', '--interval', default=0, help='The interval in seconds for each iteration (default: 0)')
parser.add_argument('-S', '--statistics', default='total', choices=['total'], help='The type of statistics to collect '
'(default: total)')
namespace = parser.parse_args(sys.argv[1:])
infile = '{0}'.format(namespace.template if namespace.template else '')
statistics_file = os.path.join(os.path.dirname(__file__), "out/{}".format(namespace.output)) if namespace.output else None
keep_the_file = True if namespace.keep in ['y', 'yes'] else False
# iterations and occurrences
arg_iterations = int(namespace.iterations) if namespace.iterations else 1
arg_occurrences = int(namespace.occurrences) if namespace.occurrences else 1
# interval between each iteration
arg_interval = int(namespace.interval) if namespace.interval else 0
arg_stats_type = '{0}'.format(namespace.statistics) if namespace.statistics else 'total'
if namespace.payload:
arg_payload_protocol = namespace.payload
arg_host = '{0}'.format(namespace.host) if namespace.host else 'localhost'
default_port = 6683 if arg_payload_protocol == 'http' else 5683
arg_port = int(namespace.port) if namespace.port else default_port
check_server(arg_payload_protocol)
sample_file = os.path.join("/tmp", "foglamp_running_sample.{}".format(os.getpid()))
parse_template_and_prepare_json(_template_file=infile, _write_to_file=sample_file, _occurrences=arg_occurrences)
read_out_file(_file=sample_file, _keep=keep_the_file, _iterations=arg_iterations, _interval=arg_interval,
send_to=arg_payload_protocol)
get_statistics(_stats_type=arg_stats_type, _out_file=statistics_file)
# TODO: Change below per local_timestamp() values
""" Expected output from given template
{
"timestamp" : "2017-08-04T06:59:57.503Z",
"asset" : "TI sensorTag/luxometer",
"sensor_values" : { "lux" : 49 }
}
{
"timestamp" : "2017-08-04T06:59:57.863Z",
"asset" : "TI sensorTag/pressure",
"sensor_values" : { "pressure" : 1021.2 }
}
{
"timestamp" : "2017-08-04T06:59:58.863Z",
"asset" : "TI sensorTag/humidity",
"sensor_values" : { "humidity" : 71.2, "temperature" : 18.6 }
}
{
"timestamp" : "2017-08-04T06:59:59.863Z",
"asset" : "TI sensorTag/temperature",
"sensor_values" : { "object" : 18.2, "ambient" : 21.6 }
}
{
"timestamp" : "2017-08-04T07:00:00.863Z",
"asset" : "TI sensorTag/accelerometer",
"sensor_values" : { "x" : 1.2, "y" : 0.0, "z" : -0.6 }
}
{
"timestamp" : "2017-08-04T07:00:01.863Z",
"asset" : "TI sensorTag/gyroscope",
"sensor_values" : { "x" : 101.2, "y" : 46.2, "z" : -12.6 }
}
{
"timestamp" : "2017-08-04T07:00:02.863Z",
"asset" : "TI sensorTag/magnetometer",
"sensor_values" : { "x" : 101.2, "y" : 46.2, "z" : -12.6 }
}
{
"timestamp" : "2017-08-04T07:00:03.863Z",
"asset" : "mouse",
"sensor_values" : { "button" : "down" }
}
{
"timestamp" : "2017-08-04T07:00:04.863Z",
"asset" : "wall clock",
"sensor_values" : { "tick" : "tock" }
}
"""
| 2.28125 | 2 |
qiskit/ignis/mitigation/measurement/filters.py | paulineollitrault/qiskit-ignis | 182 | 2157 | <reponame>paulineollitrault/qiskit-ignis<gh_stars>100-1000
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=cell-var-from-loop,invalid-name
"""
Measurement correction filters.
"""
from typing import List, Union
from copy import deepcopy
from scipy.optimize import minimize
import scipy.linalg as la
import numpy as np
import qiskit
from qiskit import QiskitError
from qiskit.tools import parallel_map
from qiskit.ignis.verification.tomography import count_keys
class MeasurementFilter():
"""
Measurement error mitigation filter.
Produced from a measurement calibration fitter and can be applied
to data.
"""
def __init__(self,
cal_matrix: np.matrix,
state_labels: list):
"""
Initialize a measurement error mitigation filter using the cal_matrix
from a measurement calibration fitter.
Args:
cal_matrix: the calibration matrix for applying the correction
state_labels: the states for the ordering of the cal matrix
"""
self._cal_matrix = cal_matrix
self._state_labels = state_labels
@property
def cal_matrix(self):
"""Return cal_matrix."""
return self._cal_matrix
@property
def state_labels(self):
"""return the state label ordering of the cal matrix"""
return self._state_labels
@state_labels.setter
def state_labels(self, new_state_labels):
"""set the state label ordering of the cal matrix"""
self._state_labels = new_state_labels
@cal_matrix.setter
def cal_matrix(self, new_cal_matrix):
"""Set cal_matrix."""
self._cal_matrix = new_cal_matrix
def apply(self,
raw_data,
method='least_squares'):
"""Apply the calibration matrix to results.
Args:
raw_data (dict or list): The data to be corrected. Can be in a number of forms:
Form 1: a counts dictionary from results.get_counts
Form 2: a list of counts of `length==len(state_labels)`
Form 3: a list of counts of `length==M*len(state_labels)` where M is an
integer (e.g. for use with the tomography data)
Form 4: a qiskit Result
method (str): fitting method. If `None`, then least_squares is used.
``pseudo_inverse``: direct inversion of the A matrix
``least_squares``: constrained to have physical probabilities
Returns:
dict or list: The corrected data in the same form as `raw_data`
Raises:
QiskitError: if `raw_data` is not an integer multiple
of the number of calibrated states.
"""
# check forms of raw_data
if isinstance(raw_data, dict):
# counts dictionary
for data_label in raw_data.keys():
if data_label not in self._state_labels:
raise QiskitError("Unexpected state label '" + data_label +
"', verify the fitter's state labels "
"correspond to the input data")
data_format = 0
# convert to form2
raw_data2 = [np.zeros(len(self._state_labels), dtype=float)]
for stateidx, state in enumerate(self._state_labels):
raw_data2[0][stateidx] = raw_data.get(state, 0)
elif isinstance(raw_data, list):
size_ratio = len(raw_data)/len(self._state_labels)
if len(raw_data) == len(self._state_labels):
data_format = 1
raw_data2 = [raw_data]
elif int(size_ratio) == size_ratio:
data_format = 2
size_ratio = int(size_ratio)
# make the list into chunks the size of state_labels for easier
# processing
raw_data2 = np.zeros([size_ratio, len(self._state_labels)])
for i in range(size_ratio):
raw_data2[i][:] = raw_data[
i * len(self._state_labels):(i + 1)*len(
self._state_labels)]
else:
raise QiskitError("Data list is not an integer multiple "
"of the number of calibrated states")
elif isinstance(raw_data, qiskit.result.result.Result):
# extract out all the counts, re-call the function with the
# counts and push back into the new result
new_result = deepcopy(raw_data)
new_counts_list = parallel_map(
self._apply_correction,
[resultidx for resultidx, _ in enumerate(raw_data.results)],
task_args=(raw_data, method))
for resultidx, new_counts in new_counts_list:
new_result.results[resultidx].data.counts = new_counts
return new_result
else:
raise QiskitError("Unrecognized type for raw_data.")
if method == 'pseudo_inverse':
pinv_cal_mat = la.pinv(self._cal_matrix)
# Apply the correction
for data_idx, _ in enumerate(raw_data2):
if method == 'pseudo_inverse':
raw_data2[data_idx] = np.dot(
pinv_cal_mat, raw_data2[data_idx])
elif method == 'least_squares':
nshots = sum(raw_data2[data_idx])
def fun(x):
return sum(
(raw_data2[data_idx] - np.dot(self._cal_matrix, x))**2)
x0 = np.random.rand(len(self._state_labels))
x0 = x0 / sum(x0)
cons = ({'type': 'eq', 'fun': lambda x: nshots - sum(x)})
bnds = tuple((0, nshots) for x in x0)
res = minimize(fun, x0, method='SLSQP',
constraints=cons, bounds=bnds, tol=1e-6)
raw_data2[data_idx] = res.x
else:
raise QiskitError("Unrecognized method.")
if data_format == 2:
# flatten back out the list
raw_data2 = raw_data2.flatten()
elif data_format == 0:
# convert back into a counts dictionary
new_count_dict = {}
for stateidx, state in enumerate(self._state_labels):
if raw_data2[0][stateidx] != 0:
new_count_dict[state] = raw_data2[0][stateidx]
raw_data2 = new_count_dict
else:
# TODO: should probably change to:
# raw_data2 = raw_data2[0].tolist()
raw_data2 = raw_data2[0]
return raw_data2
def _apply_correction(self, resultidx, raw_data, method):
"""Wrapper to call apply with a counts dictionary."""
new_counts = self.apply(
raw_data.get_counts(resultidx), method=method)
return resultidx, new_counts
class TensoredFilter():
"""
Tensored measurement error mitigation filter.
Produced from a tensored measurement calibration fitter and can be applied
to data.
"""
def __init__(self,
cal_matrices: np.matrix,
substate_labels_list: list,
mit_pattern: list):
"""
Initialize a tensored measurement error mitigation filter using
the cal_matrices from a tensored measurement calibration fitter.
A simple usage this class is explained [here]
(https://qiskit.org/documentation/tutorials/noise/3_measurement_error_mitigation.html).
Args:
cal_matrices: the calibration matrices for applying the correction.
substate_labels_list: for each calibration matrix
a list of the states (as strings, states in the subspace)
mit_pattern: for each calibration matrix
a list of the logical qubit indices (as int, states in the subspace)
"""
self._cal_matrices = cal_matrices
self._qubit_list_sizes = []
self._indices_list = []
self._substate_labels_list = []
self.substate_labels_list = substate_labels_list
self._mit_pattern = mit_pattern
@property
def cal_matrices(self):
"""Return cal_matrices."""
return self._cal_matrices
@cal_matrices.setter
def cal_matrices(self, new_cal_matrices):
"""Set cal_matrices."""
self._cal_matrices = deepcopy(new_cal_matrices)
@property
def substate_labels_list(self):
"""Return _substate_labels_list"""
return self._substate_labels_list
@substate_labels_list.setter
def substate_labels_list(self, new_substate_labels_list):
"""Return _substate_labels_list"""
self._substate_labels_list = new_substate_labels_list
# get the number of qubits in each subspace
self._qubit_list_sizes = []
for _, substate_label_list in enumerate(self._substate_labels_list):
self._qubit_list_sizes.append(
int(np.log2(len(substate_label_list))))
# get the indices in the calibration matrix
self._indices_list = []
for _, sub_labels in enumerate(self._substate_labels_list):
self._indices_list.append(
{lab: ind for ind, lab in enumerate(sub_labels)})
@property
def qubit_list_sizes(self):
"""Return _qubit_list_sizes."""
return self._qubit_list_sizes
@property
def nqubits(self):
"""Return the number of qubits. See also MeasurementFilter.apply() """
return sum(self._qubit_list_sizes)
def apply(self,
raw_data: Union[qiskit.result.result.Result, dict],
method: str = 'least_squares',
meas_layout: List[int] = None):
"""
Apply the calibration matrices to results.
Args:
raw_data (dict or Result): The data to be corrected. Can be in one of two forms:
* A counts dictionary from results.get_counts
* A Qiskit Result
method (str): fitting method. The following methods are supported:
* 'pseudo_inverse': direct inversion of the cal matrices.
Mitigated counts can contain negative values
and the sum of counts would not equal to the shots.
Mitigation is conducted qubit wise:
For each qubit, mitigate the whole counts using the calibration matrices
which affect the corresponding qubit.
For example, assume we are mitigating the 3rd bit of the 4-bit counts
using '2\times 2' calibration matrix `A_3`.
When mitigating the count of '0110' in this step,
the following formula is applied:
`count['0110'] = A_3^{-1}[1, 0]*count['0100'] + A_3^{-1}[1, 1]*count['0110']`.
The total time complexity of this method is `O(m2^{n + t})`,
where `n` is the size of calibrated qubits,
`m` is the number of sets in `mit_pattern`,
and `t` is the size of largest set of mit_pattern.
If the `mit_pattern` is shaped like `[[0], [1], [2], ..., [n-1]]`,
which corresponds to the tensor product noise model without cross-talk,
then the time complexity would be `O(n2^n)`.
If the `mit_pattern` is shaped like `[[0, 1, 2, ..., n-1]]`,
which exactly corresponds to the complete error mitigation,
then the time complexity would be `O(2^(n+n)) = O(4^n)`.
* 'least_squares': constrained to have physical probabilities.
Instead of directly applying inverse calibration matrices,
this method solve a constrained optimization problem to find
the closest probability vector to the result from 'pseudo_inverse' method.
Sequential least square quadratic programming (SLSQP) is used
in the internal process.
Every updating step in SLSQP takes `O(m2^{n+t})` time.
Since this method is using the SLSQP optimization over
the vector with lenght `2^n`, the mitigation for 8 bit counts
with the `mit_pattern = [[0], [1], [2], ..., [n-1]]` would
take 10 seconds or more.
* If `None`, 'least_squares' is used.
meas_layout (list of int): the mapping from classical registers to qubits
* If you measure qubit `2` to clbit `0`, `0` to `1`, and `1` to `2`,
the list becomes `[2, 0, 1]`
* If `None`, flatten(mit_pattern) is used.
Returns:
dict or Result: The corrected data in the same form as raw_data
Raises:
QiskitError: if raw_data is not in a one of the defined forms.
"""
all_states = count_keys(self.nqubits)
num_of_states = 2**self.nqubits
if meas_layout is None:
meas_layout = []
for qubits in self._mit_pattern:
meas_layout += qubits
# check forms of raw_data
if isinstance(raw_data, dict):
# counts dictionary
# convert to list
raw_data2 = [np.zeros(num_of_states, dtype=float)]
for state, count in raw_data.items():
stateidx = int(state, 2)
raw_data2[0][stateidx] = count
elif isinstance(raw_data, qiskit.result.result.Result):
# extract out all the counts, re-call the function with the
# counts and push back into the new result
new_result = deepcopy(raw_data)
new_counts_list = parallel_map(
self._apply_correction,
[resultidx for resultidx, _ in enumerate(raw_data.results)],
task_args=(raw_data, method, meas_layout))
for resultidx, new_counts in new_counts_list:
new_result.results[resultidx].data.counts = new_counts
return new_result
else:
raise QiskitError("Unrecognized type for raw_data.")
if method == 'pseudo_inverse':
pinv_cal_matrices = []
for cal_mat in self._cal_matrices:
pinv_cal_matrices.append(la.pinv(cal_mat))
meas_layout = meas_layout[::-1] # reverse endian
qubits_to_clbits = [-1 for _ in range(max(meas_layout) + 1)]
for i, qubit in enumerate(meas_layout):
qubits_to_clbits[qubit] = i
# Apply the correction
for data_idx, _ in enumerate(raw_data2):
if method == 'pseudo_inverse':
for pinv_cal_mat, pos_qubits, indices in zip(pinv_cal_matrices,
self._mit_pattern,
self._indices_list):
inv_mat_dot_x = np.zeros([num_of_states], dtype=float)
pos_clbits = [qubits_to_clbits[qubit] for qubit in pos_qubits]
for state_idx, state in enumerate(all_states):
first_index = self.compute_index_of_cal_mat(state, pos_clbits, indices)
for i in range(len(pinv_cal_mat)): # i is index of pinv_cal_mat
source_state = self.flip_state(state, i, pos_clbits)
second_index = self.compute_index_of_cal_mat(source_state,
pos_clbits,
indices)
inv_mat_dot_x[state_idx] += pinv_cal_mat[first_index, second_index]\
* raw_data2[data_idx][int(source_state, 2)]
raw_data2[data_idx] = inv_mat_dot_x
elif method == 'least_squares':
def fun(x):
mat_dot_x = deepcopy(x)
for cal_mat, pos_qubits, indices in zip(self._cal_matrices,
self._mit_pattern,
self._indices_list):
res_mat_dot_x = np.zeros([num_of_states], dtype=float)
pos_clbits = [qubits_to_clbits[qubit] for qubit in pos_qubits]
for state_idx, state in enumerate(all_states):
second_index = self.compute_index_of_cal_mat(state, pos_clbits, indices)
for i in range(len(cal_mat)):
target_state = self.flip_state(state, i, pos_clbits)
first_index =\
self.compute_index_of_cal_mat(target_state, pos_clbits, indices)
res_mat_dot_x[int(target_state, 2)]\
+= cal_mat[first_index, second_index] * mat_dot_x[state_idx]
mat_dot_x = res_mat_dot_x
return sum((raw_data2[data_idx] - mat_dot_x) ** 2)
x0 = np.random.rand(num_of_states)
x0 = x0 / sum(x0)
nshots = sum(raw_data2[data_idx])
cons = ({'type': 'eq', 'fun': lambda x: nshots - sum(x)})
bnds = tuple((0, nshots) for x in x0)
res = minimize(fun, x0, method='SLSQP',
constraints=cons, bounds=bnds, tol=1e-6)
raw_data2[data_idx] = res.x
else:
raise QiskitError("Unrecognized method.")
# convert back into a counts dictionary
new_count_dict = {}
for state_idx, state in enumerate(all_states):
if raw_data2[0][state_idx] != 0:
new_count_dict[state] = raw_data2[0][state_idx]
return new_count_dict
def flip_state(self, state: str, mat_index: int, flip_poses: List[int]) -> str:
"""Flip the state according to the chosen qubit positions"""
flip_poses = [pos for i, pos in enumerate(flip_poses) if (mat_index >> i) & 1]
flip_poses = sorted(flip_poses)
new_state = ""
pos = 0
for flip_pos in flip_poses:
new_state += state[pos:flip_pos]
new_state += str(int(state[flip_pos], 2) ^ 1) # flip the state
pos = flip_pos + 1
new_state += state[pos:]
return new_state
def compute_index_of_cal_mat(self, state: str, pos_qubits: List[int], indices: dict) -> int:
"""Return the index of (pseudo inverse) calibration matrix for the input quantum state"""
sub_state = ""
for pos in pos_qubits:
sub_state += state[pos]
return indices[sub_state]
def _apply_correction(self,
resultidx: int,
raw_data: qiskit.result.result.Result,
method: str,
meas_layout: List[int]):
"""Wrapper to call apply with a counts dictionary."""
new_counts = self.apply(
raw_data.get_counts(resultidx), method=method, meas_layout=meas_layout)
return resultidx, new_counts
| 2.09375 | 2 |
pymatgen/analysis/graphs.py | Roy027/pymatgen | 0 | 2158 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Module for graph representations of crystals.
"""
import copy
import logging
import os.path
import subprocess
import warnings
from collections import defaultdict, namedtuple
from itertools import combinations
from operator import itemgetter
import networkx as nx
import networkx.algorithms.isomorphism as iso
import numpy as np
from monty.json import MSONable
from monty.os.path import which
from networkx.drawing.nx_agraph import write_dot
from networkx.readwrite import json_graph
from scipy.spatial import KDTree
from scipy.stats import describe
from pymatgen.core import Lattice, Molecule, PeriodicSite, Structure
from pymatgen.core.structure import FunctionalGroups
from pymatgen.util.coord import lattice_points_in_supercell
from pymatgen.vis.structure_vtk import EL_COLORS
try:
import igraph
IGRAPH_AVAILABLE = True
except ImportError:
IGRAPH_AVAILABLE = False
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
__author__ = "<NAME>, <NAME>, <NAME>"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
__date__ = "August 2017"
ConnectedSite = namedtuple("ConnectedSite", "site, jimage, index, weight, dist")
def _compare(g1, g2, i1, i2):
"""
Helper function called by isomorphic to ensure comparison of node identities.
"""
return g1.vs[i1]["species"] == g2.vs[i2]["species"]
def _igraph_from_nxgraph(graph):
"""
Helper function that converts a networkx graph object into an igraph graph object.
"""
nodes = graph.nodes(data=True)
new_igraph = igraph.Graph()
for node in nodes:
new_igraph.add_vertex(name=str(node[0]), species=node[1]["specie"], coords=node[1]["coords"])
new_igraph.add_edges([(str(edge[0]), str(edge[1])) for edge in graph.edges()])
return new_igraph
def _isomorphic(frag1, frag2):
"""
Internal function to check if two graph objects are isomorphic, using igraph if
if is available and networkx if it is not.
"""
f1_nodes = frag1.nodes(data=True)
f2_nodes = frag2.nodes(data=True)
if len(f1_nodes) != len(f2_nodes):
return False
f2_edges = frag2.edges()
if len(f2_edges) != len(f2_edges):
return False
f1_comp_dict = {}
f2_comp_dict = {}
for node in f1_nodes:
if node[1]["specie"] not in f1_comp_dict:
f1_comp_dict[node[1]["specie"]] = 1
else:
f1_comp_dict[node[1]["specie"]] += 1
for node in f2_nodes:
if node[1]["specie"] not in f2_comp_dict:
f2_comp_dict[node[1]["specie"]] = 1
else:
f2_comp_dict[node[1]["specie"]] += 1
if f1_comp_dict != f2_comp_dict:
return False
if IGRAPH_AVAILABLE:
ifrag1 = _igraph_from_nxgraph(frag1)
ifrag2 = _igraph_from_nxgraph(frag2)
return ifrag1.isomorphic_vf2(ifrag2, node_compat_fn=_compare)
nm = iso.categorical_node_match("specie", "ERROR")
return nx.is_isomorphic(frag1.to_undirected(), frag2.to_undirected(), node_match=nm)
class StructureGraph(MSONable):
"""
This is a class for annotating a Structure with
bond information, stored in the form of a graph. A "bond" does
not necessarily have to be a chemical bond, but can store any
kind of information that connects two Sites.
"""
def __init__(self, structure, graph_data=None):
"""
If constructing this class manually, use the `with_empty_graph`
method or `with_local_env_strategy` method (using an algorithm
provided by the `local_env` module, such as O'Keeffe).
This class that contains connection information:
relationships between sites represented by a Graph structure,
and an associated structure object.
This class uses the NetworkX package to store and operate
on the graph itself, but contains a lot of helper methods
to make associating a graph with a given crystallographic
structure easier.
Use cases for this include storing bonding information,
NMR J-couplings, Heisenberg exchange parameters, etc.
For periodic graphs, class stores information on the graph
edges of what lattice image the edge belongs to.
:param structure: a Structure object
:param graph_data: dict containing graph information in
dict format (not intended to be constructed manually,
see as_dict method for format)
"""
if isinstance(structure, StructureGraph):
# just make a copy from input
graph_data = structure.as_dict()["graphs"]
self.structure = structure
self.graph = nx.readwrite.json_graph.adjacency_graph(graph_data)
# tidy up edge attr dicts, reading to/from json duplicates
# information
for u, v, k, d in self.graph.edges(keys=True, data=True):
if "id" in d:
del d["id"]
if "key" in d:
del d["key"]
# ensure images are tuples (conversion to lists happens
# when serializing back from json), it's important images
# are hashable/immutable
if "to_jimage" in d:
d["to_jimage"] = tuple(d["to_jimage"])
if "from_jimage" in d:
d["from_jimage"] = tuple(d["from_jimage"])
@classmethod
def with_empty_graph(cls, structure, name="bonds", edge_weight_name=None, edge_weight_units=None):
"""
Constructor for StructureGraph, returns a StructureGraph
object with an empty graph (no edges, only nodes defined
that correspond to Sites in Structure).
:param structure (Structure):
:param name (str): name of graph, e.g. "bonds"
:param edge_weight_name (str): name of edge weights,
e.g. "bond_length" or "exchange_constant"
:param edge_weight_units (str): name of edge weight units
e.g. "Å" or "eV"
:return (StructureGraph):
"""
if edge_weight_name and (edge_weight_units is None):
raise ValueError(
"Please specify units associated "
"with your edge weights. Can be "
"empty string if arbitrary or "
"dimensionless."
)
# construct graph with one node per site
# graph attributes don't change behavior of graph,
# they're just for book-keeping
graph = nx.MultiDiGraph(
edge_weight_name=edge_weight_name,
edge_weight_units=edge_weight_units,
name=name,
)
graph.add_nodes_from(range(len(structure)))
graph_data = json_graph.adjacency_data(graph)
return cls(structure, graph_data=graph_data)
@staticmethod
def with_edges(structure, edges):
"""
Constructor for MoleculeGraph, using pre-existing or pre-defined edges
with optional edge parameters.
:param molecule: Molecule object
:param edges: dict representing the bonds of the functional
group (format: {(from_index, to_index, from_image, to_image): props},
where props is a dictionary of properties, including weight.
Props should be None if no additional properties are to be
specified.
:return: sg, a StructureGraph
"""
sg = StructureGraph.with_empty_graph(structure, name="bonds", edge_weight_name="weight", edge_weight_units="")
for edge, props in edges.items():
try:
from_index = edge[0]
to_index = edge[1]
from_image = edge[2]
to_image = edge[3]
except TypeError:
raise ValueError("Edges must be given as (from_index, to_index," " from_image, to_image) tuples")
if props is not None:
if "weight" in props.keys():
weight = props["weight"]
del props["weight"]
else:
weight = None
if len(props.items()) == 0:
props = None
else:
weight = None
nodes = sg.graph.nodes
if not (from_index in nodes and to_index in nodes):
raise ValueError(
"Edges cannot be added if nodes are not" " present in the graph. Please check your" " indices."
)
sg.add_edge(
from_index,
to_index,
from_jimage=from_image,
to_jimage=to_image,
weight=weight,
edge_properties=props,
)
sg.set_node_attributes()
return sg
@staticmethod
def with_local_env_strategy(structure, strategy, weights=False):
"""
Constructor for StructureGraph, using a strategy
from :Class: `pymatgen.analysis.local_env`.
:param structure: Structure object
:param strategy: an instance of a
:Class: `pymatgen.analysis.local_env.NearNeighbors` object
:param weights: if True, use weights from local_env class
(consult relevant class for their meaning)
:return:
"""
if not strategy.structures_allowed:
raise ValueError(
"Chosen strategy is not designed for use with structures! " "Please choose another strategy."
)
sg = StructureGraph.with_empty_graph(structure, name="bonds")
for n, neighbors in enumerate(strategy.get_all_nn_info(structure)):
for neighbor in neighbors:
# local_env will always try to add two edges
# for any one bond, one from site u to site v
# and another form site v to site u: this is
# harmless, so warn_duplicates=False
sg.add_edge(
from_index=n,
from_jimage=(0, 0, 0),
to_index=neighbor["site_index"],
to_jimage=neighbor["image"],
weight=neighbor["weight"] if weights else None,
warn_duplicates=False,
)
return sg
@property
def name(self):
"""
:return: Name of graph
"""
return self.graph.graph["name"]
@property
def edge_weight_name(self):
"""
:return: Name of the edge weight property of graph
"""
return self.graph.graph["edge_weight_name"]
@property
def edge_weight_unit(self):
"""
:return: Units of the edge weight property of graph
"""
return self.graph.graph["edge_weight_units"]
def add_edge(
self,
from_index,
to_index,
from_jimage=(0, 0, 0),
to_jimage=None,
weight=None,
warn_duplicates=True,
edge_properties=None,
):
"""
Add edge to graph.
Since physically a 'bond' (or other connection
between sites) doesn't have a direction, from_index,
from_jimage can be swapped with to_index, to_jimage.
However, images will always always be shifted so that
from_index < to_index and from_jimage becomes (0, 0, 0).
:param from_index: index of site connecting from
:param to_index: index of site connecting to
:param from_jimage (tuple of ints): lattice vector of periodic
image, e.g. (1, 0, 0) for periodic image in +x direction
:param to_jimage (tuple of ints): lattice vector of image
:param weight (float): e.g. bond length
:param warn_duplicates (bool): if True, will warn if
trying to add duplicate edges (duplicate edges will not
be added in either case)
:param edge_properties (dict): any other information to
store on graph edges, similar to Structure's site_properties
:return:
"""
# this is not necessary for the class to work, but
# just makes it neater
if to_index < from_index:
to_index, from_index = from_index, to_index
to_jimage, from_jimage = from_jimage, to_jimage
# constrain all from_jimages to be (0, 0, 0),
# initial version of this class worked even if
# from_jimage != (0, 0, 0), but making this
# assumption simplifies logic later
if not np.array_equal(from_jimage, (0, 0, 0)):
shift = from_jimage
from_jimage = np.subtract(from_jimage, shift)
to_jimage = np.subtract(to_jimage, shift)
# automatic detection of to_jimage if user doesn't specify
# will try and detect all equivalent images and add multiple
# edges if appropriate
if to_jimage is None:
# assume we want the closest site
warnings.warn("Please specify to_jimage to be unambiguous, " "trying to automatically detect.")
dist, to_jimage = self.structure[from_index].distance_and_image(self.structure[to_index])
if dist == 0:
# this will happen when from_index == to_index,
# typically in primitive single-atom lattices
images = [1, 0, 0], [0, 1, 0], [0, 0, 1]
dists = []
for image in images:
dists.append(
self.structure[from_index].distance_and_image(self.structure[from_index], jimage=image)[0]
)
dist = min(dists)
equiv_sites = self.structure.get_neighbors_in_shell(
self.structure[from_index].coords, dist, dist * 0.01, include_index=True
)
for nnsite in equiv_sites:
to_jimage = np.subtract(nnsite.frac_coords, self.structure[from_index].frac_coords)
to_jimage = np.round(to_jimage).astype(int)
self.add_edge(
from_index=from_index,
from_jimage=(0, 0, 0),
to_jimage=to_jimage,
to_index=nnsite.index,
)
return
# sanitize types
from_jimage, to_jimage = (
tuple(map(int, from_jimage)),
tuple(map(int, to_jimage)),
)
from_index, to_index = int(from_index), int(to_index)
# check we're not trying to add a duplicate edge
# there should only ever be at most one edge
# between a given (site, jimage) pair and another
# (site, jimage) pair
existing_edge_data = self.graph.get_edge_data(from_index, to_index)
if existing_edge_data:
for key, d in existing_edge_data.items():
if d["to_jimage"] == to_jimage:
if warn_duplicates:
warnings.warn(
"Trying to add an edge that already exists from "
"site {} to site {} in {}.".format(from_index, to_index, to_jimage)
)
return
# generic container for additional edge properties,
# similar to site properties
edge_properties = edge_properties or {}
if weight:
self.graph.add_edge(from_index, to_index, to_jimage=to_jimage, weight=weight, **edge_properties)
else:
self.graph.add_edge(from_index, to_index, to_jimage=to_jimage, **edge_properties)
def insert_node(
self,
i,
species,
coords,
coords_are_cartesian=False,
validate_proximity=False,
site_properties=None,
edges=None,
):
"""
A wrapper around Molecule.insert(), which also incorporates the new
site into the MoleculeGraph.
:param i: Index at which to insert the new site
:param species: Species for the new site
:param coords: 3x1 array representing coordinates of the new site
:param coords_are_cartesian: Whether coordinates are cartesian.
Defaults to False.
:param validate_proximity: For Molecule.insert(); if True (default
False), distance will be checked to ensure that site can be safely
added.
:param site_properties: Site properties for Molecule
:param edges: List of dicts representing edges to be added to the
MoleculeGraph. These edges must include the index of the new site i,
and all indices used for these edges should reflect the
MoleculeGraph AFTER the insertion, NOT before. Each dict should at
least have a "to_index" and "from_index" key, and can also have a
"weight" and a "properties" key.
:return:
"""
self.structure.insert(
i,
species,
coords,
coords_are_cartesian=coords_are_cartesian,
validate_proximity=validate_proximity,
properties=site_properties,
)
mapping = {}
for j in range(len(self.structure) - 1):
if j < i:
mapping[j] = j
else:
mapping[j] = j + 1
nx.relabel_nodes(self.graph, mapping, copy=False)
self.graph.add_node(i)
self.set_node_attributes()
if edges is not None:
for edge in edges:
try:
self.add_edge(
edge["from_index"],
edge["to_index"],
from_jimage=(0, 0, 0),
to_jimage=edge["to_jimage"],
weight=edge.get("weight", None),
edge_properties=edge.get("properties", None),
)
except KeyError:
raise RuntimeError("Some edges are invalid.")
def set_node_attributes(self):
"""
Gives each node a "specie" and a "coords" attribute, updated with the
current species and coordinates.
:return:
"""
species = {}
coords = {}
properties = {}
for node in self.graph.nodes():
species[node] = self.structure[node].specie.symbol
coords[node] = self.structure[node].coords
properties[node] = self.structure[node].properties
nx.set_node_attributes(self.graph, species, "specie")
nx.set_node_attributes(self.graph, coords, "coords")
nx.set_node_attributes(self.graph, properties, "properties")
def alter_edge(
self,
from_index,
to_index,
to_jimage=None,
new_weight=None,
new_edge_properties=None,
):
"""
Alters either the weight or the edge_properties of
an edge in the StructureGraph.
:param from_index: int
:param to_index: int
:param to_jimage: tuple
:param new_weight: alter_edge does not require
that weight be altered. As such, by default, this
is None. If weight is to be changed, it should be a
float.
:param new_edge_properties: alter_edge does not require
that edge_properties be altered. As such, by default,
this is None. If any edge properties are to be changed,
it should be a dictionary of edge properties to be changed.
:return:
"""
existing_edges = self.graph.get_edge_data(from_index, to_index)
# ensure that edge exists before attempting to change it
if not existing_edges:
raise ValueError(
"Edge between {} and {} cannot be altered;\
no edge exists between those sites.".format(
from_index, to_index
)
)
if to_jimage is None:
edge_index = 0
else:
for i, properties in existing_edges.items():
if properties["to_jimage"] == to_jimage:
edge_index = i
if new_weight is not None:
self.graph[from_index][to_index][edge_index]["weight"] = new_weight
if new_edge_properties is not None:
for prop in list(new_edge_properties.keys()):
self.graph[from_index][to_index][edge_index][prop] = new_edge_properties[prop]
def break_edge(self, from_index, to_index, to_jimage=None, allow_reverse=False):
"""
Remove an edge from the StructureGraph. If no image is given, this method will fail.
:param from_index: int
:param to_index: int
:param to_jimage: tuple
:param allow_reverse: If allow_reverse is True, then break_edge will
attempt to break both (from_index, to_index) and, failing that,
will attempt to break (to_index, from_index).
:return:
"""
# ensure that edge exists before attempting to remove it
existing_edges = self.graph.get_edge_data(from_index, to_index)
existing_reverse = None
if to_jimage is None:
raise ValueError("Image must be supplied, to avoid ambiguity.")
if existing_edges:
for i, properties in existing_edges.items():
if properties["to_jimage"] == to_jimage:
edge_index = i
self.graph.remove_edge(from_index, to_index, edge_index)
else:
if allow_reverse:
existing_reverse = self.graph.get_edge_data(to_index, from_index)
if existing_reverse:
for i, properties in existing_reverse.items():
if properties["to_jimage"] == to_jimage:
edge_index = i
self.graph.remove_edge(to_index, from_index, edge_index)
else:
raise ValueError(
"Edge cannot be broken between {} and {};\
no edge exists between those sites.".format(
from_index, to_index
)
)
def remove_nodes(self, indices):
"""
A wrapper for Molecule.remove_sites().
:param indices: list of indices in the current Molecule (and graph) to
be removed.
:return:
"""
self.structure.remove_sites(indices)
self.graph.remove_nodes_from(indices)
mapping = {}
for correct, current in enumerate(sorted(self.graph.nodes)):
mapping[current] = correct
nx.relabel_nodes(self.graph, mapping, copy=False)
self.set_node_attributes()
def substitute_group(
self,
index,
func_grp,
strategy,
bond_order=1,
graph_dict=None,
strategy_params=None,
):
"""
Builds off of Structure.substitute to replace an atom in self.structure
with a functional group. This method also amends self.graph to
incorporate the new functional group.
NOTE: Care must be taken to ensure that the functional group that is
substituted will not place atoms to close to each other, or violate the
dimensions of the Lattice.
:param index: Index of atom to substitute.
:param func_grp: Substituent molecule. There are two options:
1. Providing an actual Molecule as the input. The first atom
must be a DummySpecies X, indicating the position of
nearest neighbor. The second atom must be the next
nearest atom. For example, for a methyl group
substitution, func_grp should be X-CH3, where X is the
first site and C is the second site. What the code will
do is to remove the index site, and connect the nearest
neighbor to the C atom in CH3. The X-C bond indicates the
directionality to connect the atoms.
2. A string name. The molecule will be obtained from the
relevant template in func_groups.json.
:param strategy: Class from pymatgen.analysis.local_env.
:param bond_order: A specified bond order to calculate the bond
length between the attached functional group and the nearest
neighbor site. Defaults to 1.
:param graph_dict: Dictionary representing the bonds of the functional
group (format: {(u, v): props}, where props is a dictionary of
properties, including weight. If None, then the algorithm
will attempt to automatically determine bonds using one of
a list of strategies defined in pymatgen.analysis.local_env.
:param strategy_params: dictionary of keyword arguments for strategy.
If None, default parameters will be used.
:return:
"""
def map_indices(grp):
grp_map = {}
# Get indices now occupied by functional group
# Subtracting 1 because the dummy atom X should not count
atoms = len(grp) - 1
offset = len(self.structure) - atoms
for i in range(atoms):
grp_map[i] = i + offset
return grp_map
if isinstance(func_grp, Molecule):
func_grp = copy.deepcopy(func_grp)
else:
try:
func_grp = copy.deepcopy(FunctionalGroups[func_grp])
except Exception:
raise RuntimeError("Can't find functional group in list. " "Provide explicit coordinate instead")
self.structure.substitute(index, func_grp, bond_order=bond_order)
mapping = map_indices(func_grp)
# Remove dummy atom "X"
func_grp.remove_species("X")
if graph_dict is not None:
for (u, v) in graph_dict.keys():
edge_props = graph_dict[(u, v)]
if "to_jimage" in edge_props.keys():
to_jimage = edge_props["to_jimage"]
del edge_props["to_jimage"]
else:
# By default, assume that all edges should stay remain
# inside the initial image
to_jimage = (0, 0, 0)
if "weight" in edge_props.keys():
weight = edge_props["weight"]
del edge_props["weight"]
self.add_edge(
mapping[u],
mapping[v],
to_jimage=to_jimage,
weight=weight,
edge_properties=edge_props,
)
else:
if strategy_params is None:
strategy_params = {}
strat = strategy(**strategy_params)
for site in mapping.values():
neighbors = strat.get_nn_info(self.structure, site)
for neighbor in neighbors:
self.add_edge(
from_index=site,
from_jimage=(0, 0, 0),
to_index=neighbor["site_index"],
to_jimage=neighbor["image"],
weight=neighbor["weight"],
warn_duplicates=False,
)
def get_connected_sites(self, n, jimage=(0, 0, 0)):
"""
Returns a named tuple of neighbors of site n:
periodic_site, jimage, index, weight.
Index is the index of the corresponding site
in the original structure, weight can be
None if not defined.
:param n: index of Site in Structure
:param jimage: lattice vector of site
:return: list of ConnectedSite tuples,
sorted by closest first
"""
connected_sites = set()
connected_site_images = set()
out_edges = [(u, v, d, "out") for u, v, d in self.graph.out_edges(n, data=True)]
in_edges = [(u, v, d, "in") for u, v, d in self.graph.in_edges(n, data=True)]
for u, v, d, dir in out_edges + in_edges:
to_jimage = d["to_jimage"]
if dir == "in":
u, v = v, u
to_jimage = np.multiply(-1, to_jimage)
to_jimage = tuple(map(int, np.add(to_jimage, jimage)))
site_d = self.structure[v].as_dict()
site_d["abc"] = np.add(site_d["abc"], to_jimage).tolist()
site = PeriodicSite.from_dict(site_d)
# from_site if jimage arg != (0, 0, 0)
relative_jimage = np.subtract(to_jimage, jimage)
dist = self.structure[u].distance(self.structure[v], jimage=relative_jimage)
weight = d.get("weight", None)
if (v, to_jimage) not in connected_site_images:
connected_site = ConnectedSite(site=site, jimage=to_jimage, index=v, weight=weight, dist=dist)
connected_sites.add(connected_site)
connected_site_images.add((v, to_jimage))
# return list sorted by closest sites first
connected_sites = list(connected_sites)
connected_sites.sort(key=lambda x: x.dist)
return connected_sites
def get_coordination_of_site(self, n):
"""
Returns the number of neighbors of site n.
In graph terms, simply returns degree
of node corresponding to site n.
:param n: index of site
:return (int):
"""
number_of_self_loops = sum([1 for n, v in self.graph.edges(n) if n == v])
return self.graph.degree(n) - number_of_self_loops
def draw_graph_to_file(
self,
filename="graph",
diff=None,
hide_unconnected_nodes=False,
hide_image_edges=True,
edge_colors=False,
node_labels=False,
weight_labels=False,
image_labels=False,
color_scheme="VESTA",
keep_dot=False,
algo="fdp",
):
"""
Draws graph using GraphViz.
The networkx graph object itself can also be drawn
with networkx's in-built graph drawing methods, but
note that this might give misleading results for
multigraphs (edges are super-imposed on each other).
If visualization is difficult to interpret,
`hide_image_edges` can help, especially in larger
graphs.
:param filename: filename to output, will detect filetype
from extension (any graphviz filetype supported, such as
pdf or png)
:param diff (StructureGraph): an additional graph to
compare with, will color edges red that do not exist in diff
and edges green that are in diff graph but not in the
reference graph
:param hide_unconnected_nodes: if True, hide unconnected
nodes
:param hide_image_edges: if True, do not draw edges that
go through periodic boundaries
:param edge_colors (bool): if True, use node colors to
color edges
:param node_labels (bool): if True, label nodes with
species and site index
:param weight_labels (bool): if True, label edges with
weights
:param image_labels (bool): if True, label edges with
their periodic images (usually only used for debugging,
edges to periodic images always appear as dashed lines)
:param color_scheme (str): "VESTA" or "JMOL"
:param keep_dot (bool): keep GraphViz .dot file for later
visualization
:param algo: any graphviz algo, "neato" (for simple graphs)
or "fdp" (for more crowded graphs) usually give good outputs
:return:
"""
if not which(algo):
raise RuntimeError("StructureGraph graph drawing requires " "GraphViz binaries to be in the path.")
# Developer note: NetworkX also has methods for drawing
# graphs using matplotlib, these also work here. However,
# a dedicated tool like GraphViz allows for much easier
# control over graph appearance and also correctly displays
# mutli-graphs (matplotlib can superimpose multiple edges).
g = self.graph.copy()
g.graph = {"nodesep": 10.0, "dpi": 300, "overlap": "false"}
# add display options for nodes
for n in g.nodes():
# get label by species name
label = "{}({})".format(str(self.structure[n].specie), n) if node_labels else ""
# use standard color scheme for nodes
c = EL_COLORS[color_scheme].get(str(self.structure[n].specie.symbol), [0, 0, 0])
# get contrasting font color
# magic numbers account for perceived luminescence
# https://stackoverflow.com/questions/1855884/determine-font-color-based-on-background-color
fontcolor = "#000000" if 1 - (c[0] * 0.299 + c[1] * 0.587 + c[2] * 0.114) / 255 < 0.5 else "#ffffff"
# convert color to hex string
color = "#{:02x}{:02x}{:02x}".format(c[0], c[1], c[2])
g.add_node(
n,
fillcolor=color,
fontcolor=fontcolor,
label=label,
fontname="Helvetica-bold",
style="filled",
shape="circle",
)
edges_to_delete = []
# add display options for edges
for u, v, k, d in g.edges(keys=True, data=True):
# retrieve from/to images, set as origin if not defined
to_image = d["to_jimage"]
# set edge style
d["style"] = "solid"
if to_image != (0, 0, 0):
d["style"] = "dashed"
if hide_image_edges:
edges_to_delete.append((u, v, k))
# don't show edge directions
d["arrowhead"] = "none"
# only add labels for images that are not the origin
if image_labels:
d["headlabel"] = "" if to_image == (0, 0, 0) else "to {}".format((to_image))
d["arrowhead"] = "normal" if d["headlabel"] else "none"
# optionally color edges using node colors
color_u = g.nodes[u]["fillcolor"]
color_v = g.nodes[v]["fillcolor"]
d["color_uv"] = "{};0.5:{};0.5".format(color_u, color_v) if edge_colors else "#000000"
# optionally add weights to graph
if weight_labels:
units = g.graph.get("edge_weight_units", "")
if d.get("weight"):
d["label"] = "{:.2f} {}".format(d["weight"], units)
# update edge with our new style attributes
g.edges[u, v, k].update(d)
# optionally remove periodic image edges,
# these can be confusing due to periodic boundaries
if hide_image_edges:
for edge_to_delete in edges_to_delete:
g.remove_edge(*edge_to_delete)
# optionally hide unconnected nodes,
# these can appear when removing periodic edges
if hide_unconnected_nodes:
g = g.subgraph([n for n in g.degree() if g.degree()[n] != 0])
# optionally highlight differences with another graph
if diff:
diff = self.diff(diff, strict=True)
green_edges = []
red_edges = []
for u, v, k, d in g.edges(keys=True, data=True):
if (u, v, d["to_jimage"]) in diff["self"]:
# edge has been deleted
red_edges.append((u, v, k))
elif (u, v, d["to_jimage"]) in diff["other"]:
# edge has been added
green_edges.append((u, v, k))
for u, v, k in green_edges:
g.edges[u, v, k].update({"color_uv": "#00ff00"})
for u, v, k in red_edges:
g.edges[u, v, k].update({"color_uv": "#ff0000"})
basename, extension = os.path.splitext(filename)
extension = extension[1:]
write_dot(g, basename + ".dot")
with open(filename, "w") as f:
args = [algo, "-T", extension, basename + ".dot"]
rs = subprocess.Popen(args, stdout=f, stdin=subprocess.PIPE, close_fds=True)
rs.communicate()
if rs.returncode != 0:
raise RuntimeError("{} exited with return code {}.".format(algo, rs.returncode))
if not keep_dot:
os.remove(basename + ".dot")
@property
def types_and_weights_of_connections(self):
"""
Extract a dictionary summarizing the types and weights
of edges in the graph.
:return: A dictionary with keys specifying the
species involved in a connection in alphabetical order
(e.g. string 'Fe-O') and values which are a list of
weights for those connections (e.g. bond lengths).
"""
def get_label(u, v):
u_label = self.structure[u].species_string
v_label = self.structure[v].species_string
return "-".join(sorted((u_label, v_label)))
types = defaultdict(list)
for u, v, d in self.graph.edges(data=True):
label = get_label(u, v)
types[label].append(d["weight"])
return dict(types)
@property
def weight_statistics(self):
"""
Extract a statistical summary of edge weights present in
the graph.
:return: A dict with an 'all_weights' list, 'minimum',
'maximum', 'median', 'mean', 'std_dev'
"""
all_weights = [d.get("weight", None) for u, v, d in self.graph.edges(data=True)]
stats = describe(all_weights, nan_policy="omit")
return {
"all_weights": all_weights,
"min": stats.minmax[0],
"max": stats.minmax[1],
"mean": stats.mean,
"variance": stats.variance,
}
def types_of_coordination_environments(self, anonymous=False):
"""
Extract information on the different co-ordination environments
present in the graph.
:param anonymous: if anonymous, will replace specie names
with A, B, C, etc.
:return: a list of co-ordination environments,
e.g. ['Mo-S(6)', 'S-Mo(3)']
"""
motifs = set()
for idx, site in enumerate(self.structure):
centre_sp = site.species_string
connected_sites = self.get_connected_sites(idx)
connected_species = [connected_site.site.species_string for connected_site in connected_sites]
labels = []
for sp in set(connected_species):
count = connected_species.count(sp)
labels.append((count, sp))
labels = sorted(labels, reverse=True)
if anonymous:
mapping = {centre_sp: "A"}
available_letters = [chr(66 + i) for i in range(25)]
for label in labels:
sp = label[1]
if sp not in mapping:
mapping[sp] = available_letters.pop(0)
centre_sp = "A"
labels = [(label[0], mapping[label[1]]) for label in labels]
labels = ["{}({})".format(label[1], label[0]) for label in labels]
motif = "{}-{}".format(centre_sp, ",".join(labels))
motifs.add(motif)
return sorted(list(motifs))
def as_dict(self):
"""
As in :Class: `pymatgen.core.Structure` except
with using `to_dict_of_dicts` from NetworkX
to store graph information.
"""
d = {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"structure": self.structure.as_dict(),
"graphs": json_graph.adjacency_data(self.graph),
}
return d
@classmethod
def from_dict(cls, d):
"""
As in :Class: `pymatgen.core.Structure` except
restoring graphs using `from_dict_of_dicts`
from NetworkX to restore graph information.
"""
s = Structure.from_dict(d["structure"])
return cls(s, d["graphs"])
def __mul__(self, scaling_matrix):
"""
Replicates the graph, creating a supercell,
intelligently joining together
edges that lie on periodic boundaries.
In principle, any operations on the expanded
graph could also be done on the original
graph, but a larger graph can be easier to
visualize and reason about.
:param scaling_matrix: same as Structure.__mul__
:return:
"""
# Developer note: a different approach was also trialed, using
# a simple Graph (instead of MultiDiGraph), with node indices
# representing both site index and periodic image. Here, the
# number of nodes != number of sites in the Structure. This
# approach has many benefits, but made it more difficult to
# keep the graph in sync with its corresponding Structure.
# Broadly, it would be easier to multiply the Structure
# *before* generating the StructureGraph, but this isn't
# possible when generating the graph using critic2 from
# charge density.
# Multiplication works by looking for the expected position
# of an image node, and seeing if that node exists in the
# supercell. If it does, the edge is updated. This is more
# computationally expensive than just keeping track of the
# which new lattice images present, but should hopefully be
# easier to extend to a general 3x3 scaling matrix.
# code adapted from Structure.__mul__
scale_matrix = np.array(scaling_matrix, np.int16)
if scale_matrix.shape != (3, 3):
scale_matrix = np.array(scale_matrix * np.eye(3), np.int16)
else:
# TODO: test __mul__ with full 3x3 scaling matrices
raise NotImplementedError("Not tested with 3x3 scaling matrices yet.")
new_lattice = Lattice(np.dot(scale_matrix, self.structure.lattice.matrix))
f_lat = lattice_points_in_supercell(scale_matrix)
c_lat = new_lattice.get_cartesian_coords(f_lat)
new_sites = []
new_graphs = []
for v in c_lat:
# create a map of nodes from original graph to its image
mapping = {n: n + len(new_sites) for n in range(len(self.structure))}
for idx, site in enumerate(self.structure):
s = PeriodicSite(
site.species,
site.coords + v,
new_lattice,
properties=site.properties,
coords_are_cartesian=True,
to_unit_cell=False,
)
new_sites.append(s)
new_graphs.append(nx.relabel_nodes(self.graph, mapping, copy=True))
new_structure = Structure.from_sites(new_sites)
# merge all graphs into one big graph
new_g = nx.MultiDiGraph()
for new_graph in new_graphs:
new_g = nx.union(new_g, new_graph)
edges_to_remove = [] # tuple of (u, v, k)
edges_to_add = [] # tuple of (u, v, attr_dict)
# list of new edges inside supercell
# for duplicate checking
edges_inside_supercell = [{u, v} for u, v, d in new_g.edges(data=True) if d["to_jimage"] == (0, 0, 0)]
new_periodic_images = []
orig_lattice = self.structure.lattice
# use k-d tree to match given position to an
# existing Site in Structure
kd_tree = KDTree(new_structure.cart_coords)
# tolerance in Å for sites to be considered equal
# this could probably be a lot smaller
tol = 0.05
for u, v, k, d in new_g.edges(keys=True, data=True):
to_jimage = d["to_jimage"] # for node v
# reduce unnecessary checking
if to_jimage != (0, 0, 0):
# get index in original site
n_u = u % len(self.structure)
n_v = v % len(self.structure)
# get fractional co-ordinates of where atoms defined
# by edge are expected to be, relative to original
# lattice (keeping original lattice has
# significant benefits)
v_image_frac = np.add(self.structure[n_v].frac_coords, to_jimage)
u_frac = self.structure[n_u].frac_coords
# using the position of node u as a reference,
# get relative Cartesian co-ordinates of where
# atoms defined by edge are expected to be
v_image_cart = orig_lattice.get_cartesian_coords(v_image_frac)
u_cart = orig_lattice.get_cartesian_coords(u_frac)
v_rel = np.subtract(v_image_cart, u_cart)
# now retrieve position of node v in
# new supercell, and get asgolute Cartesian
# co-ordinates of where atoms defined by edge
# are expected to be
v_expec = new_structure[u].coords + v_rel
# now search in new structure for these atoms
# query returns (distance, index)
v_present = kd_tree.query(v_expec)
v_present = v_present[1] if v_present[0] <= tol else None
# check if image sites now present in supercell
# and if so, delete old edge that went through
# periodic boundary
if v_present is not None:
new_u = u
new_v = v_present
new_d = d.copy()
# node now inside supercell
new_d["to_jimage"] = (0, 0, 0)
edges_to_remove.append((u, v, k))
# make sure we don't try to add duplicate edges
# will remove two edges for everyone one we add
if {new_u, new_v} not in edges_inside_supercell:
# normalize direction
if new_v < new_u:
new_u, new_v = new_v, new_u
edges_inside_supercell.append({new_u, new_v})
edges_to_add.append((new_u, new_v, new_d))
else:
# want to find new_v such that we have
# full periodic boundary conditions
# so that nodes on one side of supercell
# are connected to nodes on opposite side
v_expec_frac = new_structure.lattice.get_fractional_coords(v_expec)
# find new to_jimage
# use np.around to fix issues with finite precision leading to incorrect image
v_expec_image = np.around(v_expec_frac, decimals=3)
v_expec_image = v_expec_image - v_expec_image % 1
v_expec_frac = np.subtract(v_expec_frac, v_expec_image)
v_expec = new_structure.lattice.get_cartesian_coords(v_expec_frac)
v_present = kd_tree.query(v_expec)
v_present = v_present[1] if v_present[0] <= tol else None
if v_present is not None:
new_u = u
new_v = v_present
new_d = d.copy()
new_to_jimage = tuple(map(int, v_expec_image))
# normalize direction
if new_v < new_u:
new_u, new_v = new_v, new_u
new_to_jimage = tuple(np.multiply(-1, d["to_jimage"]).astype(int))
new_d["to_jimage"] = new_to_jimage
edges_to_remove.append((u, v, k))
if (new_u, new_v, new_to_jimage) not in new_periodic_images:
edges_to_add.append((new_u, new_v, new_d))
new_periodic_images.append((new_u, new_v, new_to_jimage))
logger.debug("Removing {} edges, adding {} new edges.".format(len(edges_to_remove), len(edges_to_add)))
# add/delete marked edges
for edges_to_remove in edges_to_remove:
new_g.remove_edge(*edges_to_remove)
for (u, v, d) in edges_to_add:
new_g.add_edge(u, v, **d)
# return new instance of StructureGraph with supercell
d = {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"structure": new_structure.as_dict(),
"graphs": json_graph.adjacency_data(new_g),
}
sg = StructureGraph.from_dict(d)
return sg
def __rmul__(self, other):
return self.__mul__(other)
@classmethod
def _edges_to_string(cls, g):
header = "from to to_image "
header_line = "---- ---- ------------"
edge_weight_name = g.graph["edge_weight_name"]
if edge_weight_name:
print_weights = ["weight"]
edge_label = g.graph["edge_weight_name"]
edge_weight_units = g.graph["edge_weight_units"]
if edge_weight_units:
edge_label += " ({})".format(edge_weight_units)
header += " {}".format(edge_label)
header_line += " {}".format("-" * max([18, len(edge_label)]))
else:
print_weights = False
s = header + "\n" + header_line + "\n"
edges = list(g.edges(data=True))
# sort edges for consistent ordering
edges.sort(key=itemgetter(0, 1))
if print_weights:
for u, v, data in edges:
s += "{:4} {:4} {:12} {:.3e}\n".format(
u, v, str(data.get("to_jimage", (0, 0, 0))), data.get("weight", 0)
)
else:
for u, v, data in edges:
s += "{:4} {:4} {:12}\n".format(u, v, str(data.get("to_jimage", (0, 0, 0))))
return s
def __str__(self):
s = "Structure Graph"
s += "\nStructure: \n{}".format(self.structure.__str__())
s += "\nGraph: {}\n".format(self.name)
s += self._edges_to_string(self.graph)
return s
def __repr__(self):
s = "Structure Graph"
s += "\nStructure: \n{}".format(self.structure.__repr__())
s += "\nGraph: {}\n".format(self.name)
s += self._edges_to_string(self.graph)
return s
def __len__(self):
"""
:return: length of Structure / number of nodes in graph
"""
return len(self.structure)
def sort(self, key=None, reverse=False):
"""
Same as Structure.sort(), also remaps nodes in graph.
:param key:
:param reverse:
:return:
"""
old_structure = self.structure.copy()
# sort Structure
self.structure._sites = sorted(self.structure._sites, key=key, reverse=reverse)
# apply Structure ordering to graph
mapping = {idx: self.structure.index(site) for idx, site in enumerate(old_structure)}
self.graph = nx.relabel_nodes(self.graph, mapping, copy=True)
# normalize directions of edges
edges_to_remove = []
edges_to_add = []
for u, v, k, d in self.graph.edges(keys=True, data=True):
if v < u:
new_v, new_u, new_d = u, v, d.copy()
new_d["to_jimage"] = tuple(np.multiply(-1, d["to_jimage"]).astype(int))
edges_to_remove.append((u, v, k))
edges_to_add.append((new_u, new_v, new_d))
# add/delete marked edges
for edges_to_remove in edges_to_remove:
self.graph.remove_edge(*edges_to_remove)
for (u, v, d) in edges_to_add:
self.graph.add_edge(u, v, **d)
def __copy__(self):
return StructureGraph.from_dict(self.as_dict())
def __eq__(self, other):
"""
Two StructureGraphs are equal if they have equal Structures,
and have the same edges between Sites. Edge weights can be
different and StructureGraphs can still be considered equal.
:param other: StructureGraph
:return (bool):
"""
# sort for consistent node indices
# PeriodicSite should have a proper __hash__() value,
# using its frac_coords as a convenient key
mapping = {tuple(site.frac_coords): self.structure.index(site) for site in other.structure}
other_sorted = other.__copy__()
other_sorted.sort(key=lambda site: mapping[tuple(site.frac_coords)])
edges = {(u, v, d["to_jimage"]) for u, v, d in self.graph.edges(keys=False, data=True)}
edges_other = {(u, v, d["to_jimage"]) for u, v, d in other_sorted.graph.edges(keys=False, data=True)}
return (edges == edges_other) and (self.structure == other_sorted.structure)
def diff(self, other, strict=True):
"""
Compares two StructureGraphs. Returns dict with
keys 'self', 'other', 'both' with edges that are
present in only one StructureGraph ('self' and
'other'), and edges that are present in both.
The Jaccard distance is a simple measure of the
dissimilarity between two StructureGraphs (ignoring
edge weights), and is defined by 1 - (size of the
intersection / size of the union) of the sets of
edges. This is returned with key 'dist'.
Important note: all node indices are in terms
of the StructureGraph this method is called
from, not the 'other' StructureGraph: there
is no guarantee the node indices will be the
same if the underlying Structures are ordered
differently.
:param other: StructureGraph
:param strict: if False, will compare bonds
from different Structures, with node indices
replaced by Species strings, will not count
number of occurrences of bonds
:return:
"""
if self.structure != other.structure and strict:
return ValueError("Meaningless to compare StructureGraphs if " "corresponding Structures are different.")
if strict:
# sort for consistent node indices
# PeriodicSite should have a proper __hash__() value,
# using its frac_coords as a convenient key
mapping = {tuple(site.frac_coords): self.structure.index(site) for site in other.structure}
other_sorted = other.__copy__()
other_sorted.sort(key=lambda site: mapping[tuple(site.frac_coords)])
edges = {(u, v, d["to_jimage"]) for u, v, d in self.graph.edges(keys=False, data=True)}
edges_other = {(u, v, d["to_jimage"]) for u, v, d in other_sorted.graph.edges(keys=False, data=True)}
else:
edges = {
(str(self.structure[u].specie), str(self.structure[v].specie))
for u, v, d in self.graph.edges(keys=False, data=True)
}
edges_other = {
(str(other.structure[u].specie), str(other.structure[v].specie))
for u, v, d in other.graph.edges(keys=False, data=True)
}
if len(edges) == 0 and len(edges_other) == 0:
jaccard_dist = 0 # by definition
else:
jaccard_dist = 1 - len(edges.intersection(edges_other)) / len(edges.union(edges_other))
return {
"self": edges - edges_other,
"other": edges_other - edges,
"both": edges.intersection(edges_other),
"dist": jaccard_dist,
}
def get_subgraphs_as_molecules(self, use_weights=False):
"""
Retrieve subgraphs as molecules, useful for extracting
molecules from periodic crystals.
Will only return unique molecules, not any duplicates
present in the crystal (a duplicate defined as an
isomorphic subgraph).
:param use_weights (bool): If True, only treat subgraphs
as isomorphic if edges have the same weights. Typically,
this means molecules will need to have the same bond
lengths to be defined as duplicates, otherwise bond
lengths can differ. This is a fairly robust approach,
but will treat e.g. enantiomers as being duplicates.
:return: list of unique Molecules in Structure
"""
# creating a supercell is an easy way to extract
# molecules (and not, e.g., layers of a 2D crystal)
# without adding extra logic
if getattr(self, "_supercell_sg", None) is None:
self._supercell_sg = supercell_sg = self * (3, 3, 3)
# make undirected to find connected subgraphs
supercell_sg.graph = nx.Graph(supercell_sg.graph)
# find subgraphs
all_subgraphs = [supercell_sg.graph.subgraph(c) for c in nx.connected_components(supercell_sg.graph)]
# discount subgraphs that lie across *supercell* boundaries
# these will subgraphs representing crystals
molecule_subgraphs = []
for subgraph in all_subgraphs:
intersects_boundary = any(d["to_jimage"] != (0, 0, 0) for u, v, d in subgraph.edges(data=True))
if not intersects_boundary:
molecule_subgraphs.append(nx.MultiDiGraph(subgraph))
# add specie names to graph to be able to test for isomorphism
for subgraph in molecule_subgraphs:
for n in subgraph:
subgraph.add_node(n, specie=str(supercell_sg.structure[n].specie))
# now define how we test for isomorphism
def node_match(n1, n2):
return n1["specie"] == n2["specie"]
def edge_match(e1, e2):
if use_weights:
return e1["weight"] == e2["weight"]
return True
# prune duplicate subgraphs
unique_subgraphs = []
for subgraph in molecule_subgraphs:
already_present = [
nx.is_isomorphic(subgraph, g, node_match=node_match, edge_match=edge_match) for g in unique_subgraphs
]
if not any(already_present):
unique_subgraphs.append(subgraph)
# get Molecule objects for each subgraph
molecules = []
for subgraph in unique_subgraphs:
coords = [supercell_sg.structure[n].coords for n in subgraph.nodes()]
species = [supercell_sg.structure[n].specie for n in subgraph.nodes()]
molecule = Molecule(species, coords)
# shift so origin is at center of mass
molecule = molecule.get_centered_molecule()
molecules.append(molecule)
return molecules
class MolGraphSplitError(Exception):
"""
Raised when a molecule graph is failed to split into two disconnected
subgraphs
"""
pass
class MoleculeGraph(MSONable):
"""
This is a class for annotating a Molecule with
bond information, stored in the form of a graph. A "bond" does
not necessarily have to be a chemical bond, but can store any
kind of information that connects two Sites.
"""
def __init__(self, molecule, graph_data=None):
"""
If constructing this class manually, use the `with_empty_graph`
method or `with_local_env_strategy` method (using an algorithm
provided by the `local_env` module, such as O'Keeffe).
This class that contains connection information:
relationships between sites represented by a Graph structure,
and an associated structure object.
This class uses the NetworkX package to store and operate
on the graph itself, but contains a lot of helper methods
to make associating a graph with a given molecule easier.
Use cases for this include storing bonding information,
NMR J-couplings, Heisenberg exchange parameters, etc.
:param molecule: Molecule object
:param graph_data: dict containing graph information in
dict format (not intended to be constructed manually,
see as_dict method for format)
"""
if isinstance(molecule, MoleculeGraph):
# just make a copy from input
graph_data = molecule.as_dict()["graphs"]
self.molecule = molecule
self.graph = nx.readwrite.json_graph.adjacency_graph(graph_data)
# tidy up edge attr dicts, reading to/from json duplicates
# information
for u, v, k, d in self.graph.edges(keys=True, data=True):
if "id" in d:
del d["id"]
if "key" in d:
del d["key"]
# ensure images are tuples (conversion to lists happens
# when serializing back from json), it's important images
# are hashable/immutable
if "to_jimage" in d:
d["to_jimage"] = tuple(d["to_jimage"])
if "from_jimage" in d:
d["from_jimage"] = tuple(d["from_jimage"])
self.set_node_attributes()
@classmethod
def with_empty_graph(cls, molecule, name="bonds", edge_weight_name=None, edge_weight_units=None):
"""
Constructor for MoleculeGraph, returns a MoleculeGraph
object with an empty graph (no edges, only nodes defined
that correspond to Sites in Molecule).
:param molecule (Molecule):
:param name (str): name of graph, e.g. "bonds"
:param edge_weight_name (str): name of edge weights,
e.g. "bond_length" or "exchange_constant"
:param edge_weight_units (str): name of edge weight units
e.g. "Å" or "eV"
:return (MoleculeGraph):
"""
if edge_weight_name and (edge_weight_units is None):
raise ValueError(
"Please specify units associated "
"with your edge weights. Can be "
"empty string if arbitrary or "
"dimensionless."
)
# construct graph with one node per site
# graph attributes don't change behavior of graph,
# they're just for book-keeping
graph = nx.MultiDiGraph(
edge_weight_name=edge_weight_name,
edge_weight_units=edge_weight_units,
name=name,
)
graph.add_nodes_from(range(len(molecule)))
graph_data = json_graph.adjacency_data(graph)
return cls(molecule, graph_data=graph_data)
@staticmethod
def with_edges(molecule, edges):
"""
Constructor for MoleculeGraph, using pre-existing or pre-defined edges
with optional edge parameters.
:param molecule: Molecule object
:param edges: dict representing the bonds of the functional
group (format: {(u, v): props}, where props is a dictionary of
properties, including weight. Props should be None if no
additional properties are to be specified.
:return: mg, a MoleculeGraph
"""
mg = MoleculeGraph.with_empty_graph(molecule, name="bonds", edge_weight_name="weight", edge_weight_units="")
for edge, props in edges.items():
try:
from_index = edge[0]
to_index = edge[1]
except TypeError:
raise ValueError("Edges must be given as (from_index, to_index)" "tuples")
if props is not None:
if "weight" in props.keys():
weight = props["weight"]
del props["weight"]
else:
weight = None
if len(props.items()) == 0:
props = None
else:
weight = None
nodes = mg.graph.nodes
if not (from_index in nodes and to_index in nodes):
raise ValueError(
"Edges cannot be added if nodes are not" " present in the graph. Please check your" " indices."
)
mg.add_edge(from_index, to_index, weight=weight, edge_properties=props)
mg.set_node_attributes()
return mg
@staticmethod
def with_local_env_strategy(molecule, strategy):
"""
Constructor for MoleculeGraph, using a strategy
from :Class: `pymatgen.analysis.local_env`.
:param molecule: Molecule object
:param strategy: an instance of a
:Class: `pymatgen.analysis.local_env.NearNeighbors` object
:return: mg, a MoleculeGraph
"""
if not strategy.molecules_allowed:
raise ValueError(
"Chosen strategy is not designed for use with molecules! " "Please choose another strategy."
)
extend_structure = strategy.extend_structure_molecules
mg = MoleculeGraph.with_empty_graph(molecule, name="bonds", edge_weight_name="weight", edge_weight_units="")
# NearNeighbor classes only (generally) work with structures
# molecules have to be boxed first
coords = molecule.cart_coords
if extend_structure:
a = max(coords[:, 0]) - min(coords[:, 0]) + 100
b = max(coords[:, 1]) - min(coords[:, 1]) + 100
c = max(coords[:, 2]) - min(coords[:, 2]) + 100
structure = molecule.get_boxed_structure(a, b, c, no_cross=True, reorder=False)
else:
structure = None
for n in range(len(molecule)):
if structure is None:
neighbors = strategy.get_nn_info(molecule, n)
else:
neighbors = strategy.get_nn_info(structure, n)
for neighbor in neighbors:
# all bonds in molecules should not cross
# (artificial) periodic boundaries
if not np.array_equal(neighbor["image"], [0, 0, 0]):
continue
if n > neighbor["site_index"]:
from_index = neighbor["site_index"]
to_index = n
else:
from_index = n
to_index = neighbor["site_index"]
mg.add_edge(
from_index=from_index,
to_index=to_index,
weight=neighbor["weight"],
warn_duplicates=False,
)
duplicates = []
for edge in mg.graph.edges:
if edge[2] != 0:
duplicates.append(edge)
for duplicate in duplicates:
mg.graph.remove_edge(duplicate[0], duplicate[1], key=duplicate[2])
mg.set_node_attributes()
return mg
@property
def name(self):
"""
:return: Name of graph
"""
return self.graph.graph["name"]
@property
def edge_weight_name(self):
"""
:return: Name of the edge weight property of graph
"""
return self.graph.graph["edge_weight_name"]
@property
def edge_weight_unit(self):
"""
:return: Units of the edge weight property of graph
"""
return self.graph.graph["edge_weight_units"]
def add_edge(
self,
from_index,
to_index,
weight=None,
warn_duplicates=True,
edge_properties=None,
):
"""
Add edge to graph.
Since physically a 'bond' (or other connection
between sites) doesn't have a direction, from_index,
from_jimage can be swapped with to_index, to_jimage.
However, images will always always be shifted so that
from_index < to_index and from_jimage becomes (0, 0, 0).
:param from_index: index of site connecting from
:param to_index: index of site connecting to
:param weight (float): e.g. bond length
:param warn_duplicates (bool): if True, will warn if
trying to add duplicate edges (duplicate edges will not
be added in either case)
:param edge_properties (dict): any other information to
store on graph edges, similar to Structure's site_properties
:return:
"""
# this is not necessary for the class to work, but
# just makes it neater
if to_index < from_index:
to_index, from_index = from_index, to_index
# sanitize types
from_index, to_index = int(from_index), int(to_index)
# check we're not trying to add a duplicate edge
# there should only ever be at most one edge
# between two sites
existing_edge_data = self.graph.get_edge_data(from_index, to_index)
if existing_edge_data and warn_duplicates:
warnings.warn(
"Trying to add an edge that already exists from " "site {} to site {}.".format(from_index, to_index)
)
return
# generic container for additional edge properties,
# similar to site properties
edge_properties = edge_properties or {}
if weight:
self.graph.add_edge(from_index, to_index, weight=weight, **edge_properties)
else:
self.graph.add_edge(from_index, to_index, **edge_properties)
def insert_node(
self,
i,
species,
coords,
validate_proximity=False,
site_properties=None,
edges=None,
):
"""
A wrapper around Molecule.insert(), which also incorporates the new
site into the MoleculeGraph.
:param i: Index at which to insert the new site
:param species: Species for the new site
:param coords: 3x1 array representing coordinates of the new site
:param validate_proximity: For Molecule.insert(); if True (default
False), distance will be checked to ensure that site can be safely
added.
:param site_properties: Site properties for Molecule
:param edges: List of dicts representing edges to be added to the
MoleculeGraph. These edges must include the index of the new site i,
and all indices used for these edges should reflect the
MoleculeGraph AFTER the insertion, NOT before. Each dict should at
least have a "to_index" and "from_index" key, and can also have a
"weight" and a "properties" key.
:return:
"""
self.molecule.insert(
i,
species,
coords,
validate_proximity=validate_proximity,
properties=site_properties,
)
mapping = {}
for j in range(len(self.molecule) - 1):
if j < i:
mapping[j] = j
else:
mapping[j] = j + 1
nx.relabel_nodes(self.graph, mapping, copy=False)
self.graph.add_node(i)
self.set_node_attributes()
if edges is not None:
for edge in edges:
try:
self.add_edge(
edge["from_index"],
edge["to_index"],
weight=edge.get("weight", None),
edge_properties=edge.get("properties", None),
)
except KeyError:
raise RuntimeError("Some edges are invalid.")
def set_node_attributes(self):
"""
Replicates molecule site properties (specie, coords, etc.) in the
MoleculeGraph.
:return:
"""
species = {}
coords = {}
properties = {}
for node in self.graph.nodes():
species[node] = self.molecule[node].specie.symbol
coords[node] = self.molecule[node].coords
properties[node] = self.molecule[node].properties
nx.set_node_attributes(self.graph, species, "specie")
nx.set_node_attributes(self.graph, coords, "coords")
nx.set_node_attributes(self.graph, properties, "properties")
def alter_edge(self, from_index, to_index, new_weight=None, new_edge_properties=None):
"""
Alters either the weight or the edge_properties of
an edge in the MoleculeGraph.
:param from_index: int
:param to_index: int
:param new_weight: alter_edge does not require
that weight be altered. As such, by default, this
is None. If weight is to be changed, it should be a
float.
:param new_edge_properties: alter_edge does not require
that edge_properties be altered. As such, by default,
this is None. If any edge properties are to be changed,
it should be a dictionary of edge properties to be changed.
:return:
"""
existing_edge = self.graph.get_edge_data(from_index, to_index)
# ensure that edge exists before attempting to change it
if not existing_edge:
raise ValueError(
"Edge between {} and {} cannot be altered;\
no edge exists between those sites.".format(
from_index, to_index
)
)
# Third index should always be 0 because there should only be one edge between any two nodes
if new_weight is not None:
self.graph[from_index][to_index][0]["weight"] = new_weight
if new_edge_properties is not None:
for prop in list(new_edge_properties.keys()):
self.graph[from_index][to_index][0][prop] = new_edge_properties[prop]
def break_edge(self, from_index, to_index, allow_reverse=False):
"""
Remove an edge from the MoleculeGraph
:param from_index: int
:param to_index: int
:param allow_reverse: If allow_reverse is True, then break_edge will
attempt to break both (from_index, to_index) and, failing that,
will attempt to break (to_index, from_index).
:return:
"""
# ensure that edge exists before attempting to remove it
existing_edge = self.graph.get_edge_data(from_index, to_index)
existing_reverse = None
if existing_edge:
self.graph.remove_edge(from_index, to_index)
else:
if allow_reverse:
existing_reverse = self.graph.get_edge_data(to_index, from_index)
if existing_reverse:
self.graph.remove_edge(to_index, from_index)
else:
raise ValueError(
"Edge cannot be broken between {} and {};\
no edge exists between those sites.".format(
from_index, to_index
)
)
def remove_nodes(self, indices):
"""
A wrapper for Molecule.remove_sites().
:param indices: list of indices in the current Molecule (and graph) to
be removed.
:return:
"""
self.molecule.remove_sites(indices)
self.graph.remove_nodes_from(indices)
mapping = {}
for correct, current in enumerate(sorted(self.graph.nodes)):
mapping[current] = correct
nx.relabel_nodes(self.graph, mapping, copy=False)
self.set_node_attributes()
def get_disconnected_fragments(self):
"""
Determine if the MoleculeGraph is connected. If it is not, separate the
MoleculeGraph into different MoleculeGraphs, where each resulting
MoleculeGraph is a disconnected subgraph of the original.
Currently, this function naively assigns the charge
of the total molecule to a single submolecule. A
later effort will be to actually accurately assign
charge.
NOTE: This function does not modify the original
MoleculeGraph. It creates a copy, modifies that, and
returns two or more new MoleculeGraph objects.
:return: list of MoleculeGraphs
"""
if nx.is_weakly_connected(self.graph):
return [copy.deepcopy(self)]
original = copy.deepcopy(self)
sub_mols = list()
# Had to use nx.weakly_connected_components because of deprecation
# of nx.weakly_connected_component_subgraphs
subgraphs = [original.graph.subgraph(c) for c in nx.weakly_connected_components(original.graph)]
for subg in subgraphs:
nodes = sorted(list(subg.nodes))
# Molecule indices are essentially list-based, so node indices
# must be remapped, incrementing from 0
mapping = {}
for i, n in enumerate(nodes):
mapping[n] = i
# just give charge to whatever subgraph has node with index 0
# TODO: actually figure out how to distribute charge
if 0 in nodes:
charge = self.molecule.charge
else:
charge = 0
# relabel nodes in graph to match mapping
new_graph = nx.relabel_nodes(subg, mapping)
species = nx.get_node_attributes(new_graph, "specie")
coords = nx.get_node_attributes(new_graph, "coords")
raw_props = nx.get_node_attributes(new_graph, "properties")
properties = {}
for prop_set in raw_props.values():
for prop in prop_set.keys():
if prop in properties:
properties[prop].append(prop_set[prop])
else:
properties[prop] = [prop_set[prop]]
# Site properties must be present for all atoms in the molecule
# in order to be used for Molecule instantiation
for k, v in properties.items():
if len(v) != len(species):
del properties[k]
new_mol = Molecule(species, coords, charge=charge, site_properties=properties)
graph_data = json_graph.adjacency_data(new_graph)
# create new MoleculeGraph
sub_mols.append(MoleculeGraph(new_mol, graph_data=graph_data))
return sub_mols
def split_molecule_subgraphs(self, bonds, allow_reverse=False, alterations=None):
"""
Split MoleculeGraph into two or more MoleculeGraphs by
breaking a set of bonds. This function uses
MoleculeGraph.break_edge repeatedly to create
disjoint graphs (two or more separate molecules).
This function does not only alter the graph
information, but also changes the underlying
Molecules.
If the bonds parameter does not include sufficient
bonds to separate two molecule fragments, then this
function will fail.
Currently, this function naively assigns the charge
of the total molecule to a single submolecule. A
later effort will be to actually accurately assign
charge.
NOTE: This function does not modify the original
MoleculeGraph. It creates a copy, modifies that, and
returns two or more new MoleculeGraph objects.
:param bonds: list of tuples (from_index, to_index)
representing bonds to be broken to split the MoleculeGraph.
:param alterations: a dict {(from_index, to_index): alt},
where alt is a dictionary including weight and/or edge
properties to be changed following the split.
:param allow_reverse: If allow_reverse is True, then break_edge will
attempt to break both (from_index, to_index) and, failing that,
will attempt to break (to_index, from_index).
:return: list of MoleculeGraphs
"""
self.set_node_attributes()
original = copy.deepcopy(self)
for bond in bonds:
original.break_edge(bond[0], bond[1], allow_reverse=allow_reverse)
if nx.is_weakly_connected(original.graph):
raise MolGraphSplitError(
"Cannot split molecule; \
MoleculeGraph is still connected."
)
# alter any bonds before partition, to avoid remapping
if alterations is not None:
for (u, v) in alterations.keys():
if "weight" in alterations[(u, v)]:
weight = alterations[(u, v)]["weight"]
del alterations[(u, v)]["weight"]
edge_properties = alterations[(u, v)] if len(alterations[(u, v)]) != 0 else None
original.alter_edge(u, v, new_weight=weight, new_edge_properties=edge_properties)
else:
original.alter_edge(u, v, new_edge_properties=alterations[(u, v)])
return original.get_disconnected_fragments()
def build_unique_fragments(self):
"""
Find all possible fragment combinations of the MoleculeGraphs (in other
words, all connected induced subgraphs)
:return:
"""
self.set_node_attributes()
graph = self.graph.to_undirected()
# find all possible fragments, aka connected induced subgraphs
frag_dict = {}
for ii in range(1, len(self.molecule)):
for combination in combinations(graph.nodes, ii):
mycomp = []
for idx in combination:
mycomp.append(str(self.molecule[idx].specie))
mycomp = "".join(sorted(mycomp))
subgraph = nx.subgraph(graph, combination)
if nx.is_connected(subgraph):
mykey = mycomp + str(len(subgraph.edges()))
if mykey not in frag_dict:
frag_dict[mykey] = [copy.deepcopy(subgraph)]
else:
frag_dict[mykey].append(copy.deepcopy(subgraph))
# narrow to all unique fragments using graph isomorphism
unique_frag_dict = {}
for key in frag_dict:
unique_frags = []
for frag in frag_dict[key]:
found = False
for f in unique_frags:
if _isomorphic(frag, f):
found = True
break
if not found:
unique_frags.append(frag)
unique_frag_dict[key] = copy.deepcopy(unique_frags)
# convert back to molecule graphs
unique_mol_graph_dict = {}
for key in unique_frag_dict:
unique_mol_graph_list = []
for fragment in unique_frag_dict[key]:
mapping = {e: i for i, e in enumerate(sorted(fragment.nodes))}
remapped = nx.relabel_nodes(fragment, mapping)
species = nx.get_node_attributes(remapped, "specie")
coords = nx.get_node_attributes(remapped, "coords")
edges = {}
for from_index, to_index, key in remapped.edges:
edge_props = fragment.get_edge_data(from_index, to_index, key=key)
edges[(from_index, to_index)] = edge_props
unique_mol_graph_list.append(
self.with_edges(
Molecule(species=species, coords=coords, charge=self.molecule.charge),
edges,
)
)
frag_key = (
str(unique_mol_graph_list[0].molecule.composition.alphabetical_formula)
+ " E"
+ str(len(unique_mol_graph_list[0].graph.edges()))
)
unique_mol_graph_dict[frag_key] = copy.deepcopy(unique_mol_graph_list)
return unique_mol_graph_dict
def substitute_group(
self,
index,
func_grp,
strategy,
bond_order=1,
graph_dict=None,
strategy_params=None,
):
"""
Builds off of Molecule.substitute to replace an atom in self.molecule
with a functional group. This method also amends self.graph to
incorporate the new functional group.
NOTE: using a MoleculeGraph will generally produce a different graph
compared with using a Molecule or str (when not using graph_dict).
:param index: Index of atom to substitute.
:param func_grp: Substituent molecule. There are three options:
1. Providing an actual molecule as the input. The first atom
must be a DummySpecies X, indicating the position of
nearest neighbor. The second atom must be the next
nearest atom. For example, for a methyl group
substitution, func_grp should be X-CH3, where X is the
first site and C is the second site. What the code will
do is to remove the index site, and connect the nearest
neighbor to the C atom in CH3. The X-C bond indicates the
directionality to connect the atoms.
2. A string name. The molecule will be obtained from the
relevant template in func_groups.json.
3. A MoleculeGraph object.
:param strategy: Class from pymatgen.analysis.local_env.
:param bond_order: A specified bond order to calculate the bond
length between the attached functional group and the nearest
neighbor site. Defaults to 1.
:param graph_dict: Dictionary representing the bonds of the functional
group (format: {(u, v): props}, where props is a dictionary of
properties, including weight. If None, then the algorithm
will attempt to automatically determine bonds using one of
a list of strategies defined in pymatgen.analysis.local_env.
:param strategy_params: dictionary of keyword arguments for strategy.
If None, default parameters will be used.
:return:
"""
def map_indices(grp):
grp_map = {}
# Get indices now occupied by functional group
# Subtracting 1 because the dummy atom X should not count
atoms = len(grp) - 1
offset = len(self.molecule) - atoms
for i in range(atoms):
grp_map[i] = i + offset
return grp_map
# Work is simplified if a graph is already in place
if isinstance(func_grp, MoleculeGraph):
self.molecule.substitute(index, func_grp.molecule, bond_order=bond_order)
mapping = map_indices(func_grp.molecule)
for (u, v) in list(func_grp.graph.edges()):
edge_props = func_grp.graph.get_edge_data(u, v)[0]
weight = None
if "weight" in edge_props.keys():
weight = edge_props["weight"]
del edge_props["weight"]
self.add_edge(mapping[u], mapping[v], weight=weight, edge_properties=edge_props)
else:
if isinstance(func_grp, Molecule):
func_grp = copy.deepcopy(func_grp)
else:
try:
func_grp = copy.deepcopy(FunctionalGroups[func_grp])
except Exception:
raise RuntimeError("Can't find functional group in list. " "Provide explicit coordinate instead")
self.molecule.substitute(index, func_grp, bond_order=bond_order)
mapping = map_indices(func_grp)
# Remove dummy atom "X"
func_grp.remove_species("X")
if graph_dict is not None:
for (u, v) in graph_dict.keys():
edge_props = graph_dict[(u, v)]
if "weight" in edge_props.keys():
weight = edge_props["weight"]
del edge_props["weight"]
self.add_edge(
mapping[u],
mapping[v],
weight=weight,
edge_properties=edge_props,
)
else:
if strategy_params is None:
strategy_params = {}
strat = strategy(**strategy_params)
graph = self.with_local_env_strategy(func_grp, strat)
for (u, v) in list(graph.graph.edges()):
edge_props = graph.graph.get_edge_data(u, v)[0]
weight = None
if "weight" in edge_props.keys():
weight = edge_props["weight"]
del edge_props["weight"]
if 0 not in list(graph.graph.nodes()):
# If graph indices have different indexing
u, v = (u - 1), (v - 1)
self.add_edge(
mapping[u],
mapping[v],
weight=weight,
edge_properties=edge_props,
)
def replace_group(
self,
index,
func_grp,
strategy,
bond_order=1,
graph_dict=None,
strategy_params=None,
):
"""
Builds off of Molecule.substitute and MoleculeGraph.substitute_group
to replace a functional group in self.molecule with a functional group.
This method also amends self.graph to incorporate the new functional
group.
TODO: Figure out how to replace into a ring structure.
:param index: Index of atom to substitute.
:param func_grp: Substituent molecule. There are three options:
1. Providing an actual molecule as the input. The first atom
must be a DummySpecies X, indicating the position of
nearest neighbor. The second atom must be the next
nearest atom. For example, for a methyl group
substitution, func_grp should be X-CH3, where X is the
first site and C is the second site. What the code will
do is to remove the index site, and connect the nearest
neighbor to the C atom in CH3. The X-C bond indicates the
directionality to connect the atoms.
2. A string name. The molecule will be obtained from the
relevant template in func_groups.json.
3. A MoleculeGraph object.
:param strategy: Class from pymatgen.analysis.local_env.
:param bond_order: A specified bond order to calculate the bond
length between the attached functional group and the nearest
neighbor site. Defaults to 1.
:param graph_dict: Dictionary representing the bonds of the functional
group (format: {(u, v): props}, where props is a dictionary of
properties, including weight. If None, then the algorithm
will attempt to automatically determine bonds using one of
a list of strategies defined in pymatgen.analysis.local_env.
:param strategy_params: dictionary of keyword arguments for strategy.
If None, default parameters will be used.
:return:
"""
self.set_node_attributes()
neighbors = self.get_connected_sites(index)
# If the atom at index is terminal
if len(neighbors) == 1:
self.substitute_group(
index,
func_grp,
strategy,
bond_order=bond_order,
graph_dict=graph_dict,
strategy_params=strategy_params,
)
else:
rings = self.find_rings(including=[index])
if len(rings) != 0:
raise RuntimeError(
"Currently functional group replacement" "cannot occur at an atom within a ring" "structure."
)
to_remove = set()
sizes = dict()
disconnected = self.graph.to_undirected()
disconnected.remove_node(index)
for neighbor in neighbors:
sizes[neighbor[2]] = len(nx.descendants(disconnected, neighbor[2]))
keep = max(sizes, key=lambda x: sizes[x])
for i in sizes.keys():
if i != keep:
to_remove.add(i)
self.remove_nodes(list(to_remove))
self.substitute_group(
index,
func_grp,
strategy,
bond_order=bond_order,
graph_dict=graph_dict,
strategy_params=strategy_params,
)
def find_rings(self, including=None):
"""
Find ring structures in the MoleculeGraph.
:param including: list of site indices. If
including is not None, then find_rings will
only return those rings including the specified
sites. By default, this parameter is None, and
all rings will be returned.
:return: dict {index:cycle}. Each
entry will be a ring (cycle, in graph theory terms) including the index
found in the Molecule. If there is no cycle including an index, the
value will be an empty list.
"""
# Copies self.graph such that all edges (u, v) matched by edges (v, u)
undirected = self.graph.to_undirected()
directed = undirected.to_directed()
cycles_nodes = []
cycles_edges = []
# Remove all two-edge cycles
all_cycles = [c for c in nx.simple_cycles(directed) if len(c) > 2]
# Using to_directed() will mean that each cycle always appears twice
# So, we must also remove duplicates
unique_sorted = []
unique_cycles = []
for cycle in all_cycles:
if sorted(cycle) not in unique_sorted:
unique_sorted.append(sorted(cycle))
unique_cycles.append(cycle)
if including is None:
cycles_nodes = unique_cycles
else:
for i in including:
for cycle in unique_cycles:
if i in cycle and cycle not in cycles_nodes:
cycles_nodes.append(cycle)
for cycle in cycles_nodes:
edges = []
for i, e in enumerate(cycle):
edges.append((cycle[i - 1], e))
cycles_edges.append(edges)
return cycles_edges
def get_connected_sites(self, n):
"""
Returns a named tuple of neighbors of site n:
periodic_site, jimage, index, weight.
Index is the index of the corresponding site
in the original structure, weight can be
None if not defined.
:param n: index of Site in Molecule
:param jimage: lattice vector of site
:return: list of ConnectedSite tuples,
sorted by closest first
"""
connected_sites = set()
out_edges = list(self.graph.out_edges(n, data=True))
in_edges = list(self.graph.in_edges(n, data=True))
for u, v, d in out_edges + in_edges:
weight = d.get("weight", None)
if v == n:
site = self.molecule[u]
dist = self.molecule[v].distance(self.molecule[u])
connected_site = ConnectedSite(site=site, jimage=(0, 0, 0), index=u, weight=weight, dist=dist)
else:
site = self.molecule[v]
dist = self.molecule[u].distance(self.molecule[v])
connected_site = ConnectedSite(site=site, jimage=(0, 0, 0), index=v, weight=weight, dist=dist)
connected_sites.add(connected_site)
# return list sorted by closest sites first
connected_sites = list(connected_sites)
connected_sites.sort(key=lambda x: x.dist)
return connected_sites
def get_coordination_of_site(self, n):
"""
Returns the number of neighbors of site n.
In graph terms, simply returns degree
of node corresponding to site n.
:param n: index of site
:return (int):
"""
number_of_self_loops = sum([1 for n, v in self.graph.edges(n) if n == v])
return self.graph.degree(n) - number_of_self_loops
def draw_graph_to_file(
self,
filename="graph",
diff=None,
hide_unconnected_nodes=False,
hide_image_edges=True,
edge_colors=False,
node_labels=False,
weight_labels=False,
image_labels=False,
color_scheme="VESTA",
keep_dot=False,
algo="fdp",
):
"""
Draws graph using GraphViz.
The networkx graph object itself can also be drawn
with networkx's in-built graph drawing methods, but
note that this might give misleading results for
multigraphs (edges are super-imposed on each other).
If visualization is difficult to interpret,
`hide_image_edges` can help, especially in larger
graphs.
:param filename: filename to output, will detect filetype
from extension (any graphviz filetype supported, such as
pdf or png)
:param diff (StructureGraph): an additional graph to
compare with, will color edges red that do not exist in diff
and edges green that are in diff graph but not in the
reference graph
:param hide_unconnected_nodes: if True, hide unconnected
nodes
:param hide_image_edges: if True, do not draw edges that
go through periodic boundaries
:param edge_colors (bool): if True, use node colors to
color edges
:param node_labels (bool): if True, label nodes with
species and site index
:param weight_labels (bool): if True, label edges with
weights
:param image_labels (bool): if True, label edges with
their periodic images (usually only used for debugging,
edges to periodic images always appear as dashed lines)
:param color_scheme (str): "VESTA" or "JMOL"
:param keep_dot (bool): keep GraphViz .dot file for later
visualization
:param algo: any graphviz algo, "neato" (for simple graphs)
or "fdp" (for more crowded graphs) usually give good outputs
:return:
"""
if not which(algo):
raise RuntimeError("StructureGraph graph drawing requires " "GraphViz binaries to be in the path.")
# Developer note: NetworkX also has methods for drawing
# graphs using matplotlib, these also work here. However,
# a dedicated tool like GraphViz allows for much easier
# control over graph appearance and also correctly displays
# mutli-graphs (matplotlib can superimpose multiple edges).
g = self.graph.copy()
g.graph = {"nodesep": 10.0, "dpi": 300, "overlap": "false"}
# add display options for nodes
for n in g.nodes():
# get label by species name
label = "{}({})".format(str(self.molecule[n].specie), n) if node_labels else ""
# use standard color scheme for nodes
c = EL_COLORS[color_scheme].get(str(self.molecule[n].specie.symbol), [0, 0, 0])
# get contrasting font color
# magic numbers account for perceived luminescence
# https://stackoverflow.com/questions/1855884/determine-font-color-based-on-background-color
fontcolor = "#000000" if 1 - (c[0] * 0.299 + c[1] * 0.587 + c[2] * 0.114) / 255 < 0.5 else "#ffffff"
# convert color to hex string
color = "#{:02x}{:02x}{:02x}".format(c[0], c[1], c[2])
g.add_node(
n,
fillcolor=color,
fontcolor=fontcolor,
label=label,
fontname="Helvetica-bold",
style="filled",
shape="circle",
)
edges_to_delete = []
# add display options for edges
for u, v, k, d in g.edges(keys=True, data=True):
# retrieve from/to images, set as origin if not defined
if "to_image" in d:
to_image = d["to_jimage"]
else:
to_image = (0, 0, 0)
# set edge style
d["style"] = "solid"
if to_image != (0, 0, 0):
d["style"] = "dashed"
if hide_image_edges:
edges_to_delete.append((u, v, k))
# don't show edge directions
d["arrowhead"] = "none"
# only add labels for images that are not the origin
if image_labels:
d["headlabel"] = "" if to_image == (0, 0, 0) else "to {}".format((to_image))
d["arrowhead"] = "normal" if d["headlabel"] else "none"
# optionally color edges using node colors
color_u = g.node[u]["fillcolor"]
color_v = g.node[v]["fillcolor"]
d["color_uv"] = "{};0.5:{};0.5".format(color_u, color_v) if edge_colors else "#000000"
# optionally add weights to graph
if weight_labels:
units = g.graph.get("edge_weight_units", "")
if d.get("weight"):
d["label"] = "{:.2f} {}".format(d["weight"], units)
# update edge with our new style attributes
g.edges[u, v, k].update(d)
# optionally remove periodic image edges,
# these can be confusing due to periodic boundaries
if hide_image_edges:
for edge_to_delete in edges_to_delete:
g.remove_edge(*edge_to_delete)
# optionally hide unconnected nodes,
# these can appear when removing periodic edges
if hide_unconnected_nodes:
g = g.subgraph([n for n in g.degree() if g.degree()[n] != 0])
# optionally highlight differences with another graph
if diff:
diff = self.diff(diff, strict=True)
green_edges = []
red_edges = []
for u, v, k, d in g.edges(keys=True, data=True):
if (u, v, d["to_jimage"]) in diff["self"]:
# edge has been deleted
red_edges.append((u, v, k))
elif (u, v, d["to_jimage"]) in diff["other"]:
# edge has been added
green_edges.append((u, v, k))
for u, v, k in green_edges:
g.edges[u, v, k].update({"color_uv": "#00ff00"})
for u, v, k in red_edges:
g.edges[u, v, k].update({"color_uv": "#ff0000"})
basename, extension = os.path.splitext(filename)
extension = extension[1:]
write_dot(g, basename + ".dot")
with open(filename, "w") as f:
args = [algo, "-T", extension, basename + ".dot"]
rs = subprocess.Popen(args, stdout=f, stdin=subprocess.PIPE, close_fds=True)
rs.communicate()
if rs.returncode != 0:
raise RuntimeError("{} exited with return code {}.".format(algo, rs.returncode))
if not keep_dot:
os.remove(basename + ".dot")
def as_dict(self):
"""
As in :Class: `pymatgen.core.Molecule` except
with using `to_dict_of_dicts` from NetworkX
to store graph information.
"""
d = {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"molecule": self.molecule.as_dict(),
"graphs": json_graph.adjacency_data(self.graph),
}
return d
@classmethod
def from_dict(cls, d):
"""
As in :Class: `pymatgen.core.Molecule` except
restoring graphs using `from_dict_of_dicts`
from NetworkX to restore graph information.
"""
m = Molecule.from_dict(d["molecule"])
return cls(m, d["graphs"])
@classmethod
def _edges_to_string(cls, g):
header = "from to to_image "
header_line = "---- ---- ------------"
edge_weight_name = g.graph["edge_weight_name"]
if edge_weight_name:
print_weights = ["weight"]
edge_label = g.graph["edge_weight_name"]
edge_weight_units = g.graph["edge_weight_units"]
if edge_weight_units:
edge_label += " ({})".format(edge_weight_units)
header += " {}".format(edge_label)
header_line += " {}".format("-" * max([18, len(edge_label)]))
else:
print_weights = False
s = header + "\n" + header_line + "\n"
edges = list(g.edges(data=True))
# sort edges for consistent ordering
edges.sort(key=itemgetter(0, 1))
if print_weights:
for u, v, data in edges:
s += "{:4} {:4} {:12} {:.3e}\n".format(
u, v, str(data.get("to_jimage", (0, 0, 0))), data.get("weight", 0)
)
else:
for u, v, data in edges:
s += "{:4} {:4} {:12}\n".format(u, v, str(data.get("to_jimage", (0, 0, 0))))
return s
def __str__(self):
s = "Molecule Graph"
s += "\nMolecule: \n{}".format(self.molecule.__str__())
s += "\nGraph: {}\n".format(self.name)
s += self._edges_to_string(self.graph)
return s
def __repr__(self):
s = "Molecule Graph"
s += "\nMolecule: \n{}".format(self.molecule.__repr__())
s += "\nGraph: {}\n".format(self.name)
s += self._edges_to_string(self.graph)
return s
def __len__(self):
"""
:return: length of Molecule / number of nodes in graph
"""
return len(self.molecule)
def sort(self, key=None, reverse=False):
"""
Same as Molecule.sort(), also remaps nodes in graph.
:param key:
:param reverse:
:return:
"""
old_molecule = self.molecule.copy()
# sort Molecule
self.molecule._sites = sorted(self.molecule._sites, key=key, reverse=reverse)
# apply Molecule ordering to graph
mapping = {idx: self.molecule.index(site) for idx, site in enumerate(old_molecule)}
self.graph = nx.relabel_nodes(self.graph, mapping, copy=True)
# normalize directions of edges
edges_to_remove = []
edges_to_add = []
for u, v, k, d in self.graph.edges(keys=True, data=True):
if v < u:
new_v, new_u, new_d = u, v, d.copy()
new_d["to_jimage"] = (0, 0, 0)
edges_to_remove.append((u, v, k))
edges_to_add.append((new_u, new_v, new_d))
# add/delete marked edges
for edges_to_remove in edges_to_remove:
self.graph.remove_edge(*edges_to_remove)
for (u, v, d) in edges_to_add:
self.graph.add_edge(u, v, **d)
def __copy__(self):
return MoleculeGraph.from_dict(self.as_dict())
def __eq__(self, other):
"""
Two MoleculeGraphs are equal if they have equal Molecules,
and have the same edges between Sites. Edge weights can be
different and MoleculeGraphs can still be considered equal.
:param other: MoleculeGraph
:return (bool):
"""
# sort for consistent node indices
# PeriodicSite should have a proper __hash__() value,
# using its frac_coords as a convenient key
try:
mapping = {tuple(site.coords): self.molecule.index(site) for site in other.molecule}
except ValueError:
return False
other_sorted = other.__copy__()
other_sorted.sort(key=lambda site: mapping[tuple(site.coords)])
edges = {(u, v) for u, v, d in self.graph.edges(keys=False, data=True)}
edges_other = {(u, v) for u, v, d in other_sorted.graph.edges(keys=False, data=True)}
return (edges == edges_other) and (self.molecule == other_sorted.molecule)
def isomorphic_to(self, other):
"""
Checks if the graphs of two MoleculeGraphs are isomorphic to one
another. In order to prevent problems with misdirected edges, both
graphs are converted into undirected nx.Graph objects.
:param other: MoleculeGraph object to be compared.
:return: bool
"""
if len(self.molecule) != len(other.molecule):
return False
if self.molecule.composition.alphabetical_formula != other.molecule.composition.alphabetical_formula:
return False
if len(self.graph.edges()) != len(other.graph.edges()):
return False
return _isomorphic(self.graph, other.graph)
def diff(self, other, strict=True):
"""
Compares two MoleculeGraphs. Returns dict with
keys 'self', 'other', 'both' with edges that are
present in only one MoleculeGraph ('self' and
'other'), and edges that are present in both.
The Jaccard distance is a simple measure of the
dissimilarity between two MoleculeGraphs (ignoring
edge weights), and is defined by 1 - (size of the
intersection / size of the union) of the sets of
edges. This is returned with key 'dist'.
Important note: all node indices are in terms
of the MoleculeGraph this method is called
from, not the 'other' MoleculeGraph: there
is no guarantee the node indices will be the
same if the underlying Molecules are ordered
differently.
:param other: MoleculeGraph
:param strict: if False, will compare bonds
from different Molecules, with node indices
replaced by Species strings, will not count
number of occurrences of bonds
:return:
"""
if self.molecule != other.molecule and strict:
return ValueError("Meaningless to compare MoleculeGraphs if " "corresponding Molecules are different.")
if strict:
# sort for consistent node indices
# PeriodicSite should have a proper __hash__() value,
# using its frac_coords as a convenient key
mapping = {tuple(site.frac_coords): self.molecule.index(site) for site in other.molecule}
other_sorted = other.__copy__()
other_sorted.sort(key=lambda site: mapping[tuple(site.frac_coords)])
edges = {(u, v, d.get("to_jimage", (0, 0, 0))) for u, v, d in self.graph.edges(keys=False, data=True)}
edges_other = {
(u, v, d.get("to_jimage", (0, 0, 0))) for u, v, d in other_sorted.graph.edges(keys=False, data=True)
}
else:
edges = {
(str(self.molecule[u].specie), str(self.molecule[v].specie))
for u, v, d in self.graph.edges(keys=False, data=True)
}
edges_other = {
(str(other.structure[u].specie), str(other.structure[v].specie))
for u, v, d in other.graph.edges(keys=False, data=True)
}
if len(edges) == 0 and len(edges_other) == 0:
jaccard_dist = 0 # by definition
else:
jaccard_dist = 1 - len(edges.intersection(edges_other)) / len(edges.union(edges_other))
return {
"self": edges - edges_other,
"other": edges_other - edges,
"both": edges.intersection(edges_other),
"dist": jaccard_dist,
}
| 2.1875 | 2 |
maple/backend/singularity/__init__.py | akashdhruv/maple | 0 | 2159 | from . import image
from . import container
from . import system
| 1.101563 | 1 |
articles/views.py | Ahmed-skb/blogyfy | 0 | 2160 | from django.shortcuts import render, redirect
from django.http import HttpResponse
from .models import Article
from django.contrib.auth.decorators import login_required
from . import forms
def Articles(request):
articles = Article.objects.all().order_by('date')
return render(request, 'articles/article_list.html', {'articles': articles})
def article_detail(request, slug):
# return HttpResponse(slug)
article = Article.objects.get(slug=slug)
return render(request, 'articles/article_details.html', {'article': article})
@login_required(login_url="/accounts/login")
def article_create(request):
if request.method == 'POST':
form = forms.CreateArticle(request.POST, request.FILES)
if form.is_valid():
#save article to DB
instance = form.save(commit=False)
instance.author = request.user
instance.save( )
return redirect ('articles:list')
else:
form = forms.CreateArticle()
return render(request, 'articles/article_create.html', {'form':form})
| 2.265625 | 2 |
sifter/grammar/grammar.py | russell/sifter | 0 | 2161 | <gh_stars>0
# Parser based on RFC 5228, especially the grammar as defined in section 8. All
# references are to sections in RFC 5228 unless stated otherwise.
import ply.yacc
import sifter.grammar
from sifter.grammar.lexer import tokens
import sifter.handler
import logging
__all__ = ('parser',)
def parser(**kwargs):
return ply.yacc.yacc(**kwargs)
def p_commands_list(p):
"""commands : commands command"""
p[0] = p[1]
# section 3.2: REQUIRE command must come before any other commands
if p[2].RULE_IDENTIFIER == 'REQUIRE':
if any(command.RULE_IDENTIFIER != 'REQUIRE'
for command in p[0].commands):
log = logging.getLogger("sifter")
log.error(("REQUIRE command on line %d must come before any "
"other non-REQUIRE commands" % p.lineno(2)))
raise SyntaxError
# section 3.1: ELSIF and ELSE must follow IF or another ELSIF
elif p[2].RULE_IDENTIFIER in ('ELSIF', 'ELSE'):
if p[0].commands[-1].RULE_IDENTIFIER not in ('IF', 'ELSIF'):
log = logging.getLogger("sifter")
log.error(("ELSIF/ELSE command on line %d must follow an IF/ELSIF "
"command" % p.lineno(2)))
raise SyntaxError
p[0].commands.append(p[2])
def p_commands_empty(p):
"""commands : """
p[0] = sifter.grammar.CommandList()
def p_command(p):
"""command : IDENTIFIER arguments ';'
| IDENTIFIER arguments block"""
#print("COMMAND:", p[1], p[2], p[3])
tests = p[2].get('tests')
block = None
if p[3] != ';': block = p[3]
handler = sifter.handler.get('command', p[1])
if handler is None:
log = logging.getLogger("sifter")
log.error(("No handler registered for command '%s' on line %d" %
(p[1], p.lineno(1))))
raise SyntaxError
p[0] = handler(arguments=p[2]['args'], tests=tests, block=block)
def p_command_error(p):
"""command : IDENTIFIER error ';'
| IDENTIFIER error block"""
log = logging.getLogger("sifter")
log.error(("Syntax error in command definition after %s on line %d" %
(p[1], p.lineno(1))))
raise SyntaxError
def p_block(p):
"""block : '{' commands '}' """
# section 3.2: REQUIRE command must come before any other commands,
# which means it can't be in the block of another command
if any(command.RULE_IDENTIFIER == 'REQUIRE'
for command in p[2].commands):
log = logging.getLogger("sifter")
log.error(("REQUIRE command not allowed inside of a block (line %d)" %
(p.lineno(2))))
raise SyntaxError
p[0] = p[2]
def p_block_error(p):
"""block : '{' error '}'"""
log = logging.getLogger("sifter")
log.error(("Syntax error in command block that starts on line %d" %
(p.lineno(1),)))
raise SyntaxError
def p_arguments(p):
"""arguments : argumentlist
| argumentlist test
| argumentlist '(' testlist ')'"""
p[0] = { 'args' : p[1], }
if len(p) > 2:
if p[2] == '(':
p[0]['tests'] = p[3]
else:
p[0]['tests'] = [ p[2] ]
def p_testlist_error(p):
"""arguments : argumentlist '(' error ')'"""
log = logging.getLogger("sifter")
log.error(("Syntax error in test list that starts on line %d" % p.lineno(2)))
raise SyntaxError
def p_argumentlist_list(p):
"""argumentlist : argumentlist argument"""
p[0] = p[1]
p[0].append(p[2])
def p_argumentlist_empty(p):
"""argumentlist : """
p[0] = []
def p_test(p):
"""test : IDENTIFIER arguments"""
#print("TEST:", p[1], p[2])
tests = p[2].get('tests')
handler = sifter.handler.get('test', p[1])
if handler is None:
log = logging.getLogger("sifter")
log.error(("No handler registered for test '%s' on line %d" %
(p[1], p.lineno(1))))
raise SyntaxError
p[0] = handler(arguments=p[2]['args'], tests=tests)
def p_testlist_list(p):
"""testlist : test ',' testlist"""
p[0] = p[3]
p[0].insert(0, p[1])
def p_testlist_single(p):
"""testlist : test"""
p[0] = [ p[1] ]
def p_argument_stringlist(p):
"""argument : '[' stringlist ']'"""
p[0] = p[2]
def p_argument_string(p):
"""argument : string"""
# for simplicity, we treat all single strings as a string list
p[0] = [ p[1] ]
def p_argument_number(p):
"""argument : NUMBER"""
p[0] = p[1]
def p_argument_tag(p):
"""argument : TAG"""
p[0] = sifter.grammar.Tag(p[1])
def p_stringlist_error(p):
"""argument : '[' error ']'"""
log = logging.getLogger("sifter")
log.error(("Syntax error in string list that starts on line %d" %
p.lineno(1)))
raise SyntaxError
def p_stringlist_list(p):
"""stringlist : string ',' stringlist"""
p[0] = p[3]
p[0].insert(0, p[1])
def p_stringlist_single(p):
"""stringlist : string"""
p[0] = [ p[1] ]
def p_string(p):
"""string : QUOTED_STRING"""
p[0] = sifter.grammar.String(p[1])
| 2.59375 | 3 |
multidoc_mnb.py | dropofwill/author-attr-experiments | 2 | 2162 | <gh_stars>1-10
from sklearn import datasets
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import cross_val_score
from sklearn.cross_validation import ShuffleSplit
from sklearn.cross_validation import Bootstrap
from sklearn.naive_bayes import MultinomialNB
from sklearn.grid_search import GridSearchCV
from scipy.stats import sem
from pprint import pprint
import numpy as np
import pylab as pl
import string
import matplotlib.pyplot as plt
# Calculates the mean of the scores with the standard deviation
def mean_sem(scores):
return ("Mean score: {0:.3f} (+/-{1:.3f})").format(np.mean(scores), sem(scores))
def test_docs(dir):
# Load documents
docs = datasets.load_files(container_path="../../sklearn_data/"+dir)
X, y = docs.data, docs.target
baseline = 1/float(len(list(np.unique(y))))
# Select Features via Bag of Words approach without stop words
#X = CountVectorizer(charset_error='ignore', stop_words='english', strip_accents='unicode', ).fit_transform(X)
X = TfidfVectorizer(charset_error='ignore', stop_words='english', analyzer='char', ngram_range=(2,4), strip_accents='unicode', sublinear_tf=True, max_df=0.5).fit_transform(X)
n_samples, n_features = X.shape
# sklearn's grid search
parameters = { 'alpha': np.logspace(-100,0,10)}
bv = Bootstrap(n_samples, n_iter=10, test_size=0.3, random_state=42)
mnb_gv = GridSearchCV(MultinomialNB(), parameters, cv=bv,)
#scores = cross_val_score(mnb_gv, X, y, cv=bv)
mnb_gv.fit(X, y)
mnb_gv_best_params = mnb_gv.best_params_.values()[0]
print mnb_gv.best_score_
print mnb_gv_best_params
# CV with Bootstrap
mnb = MultinomialNB(alpha=mnb_gv_best_params)
boot_scores = cross_val_score(mnb, X, y, cv=bv)
print mean_sem(boot_scores)
improvement = (mnb_gv.best_score_ - baseline) / baseline
rand_baseline.append(baseline)
test_results.append([mnb_gv.best_score_])
com_results.append(improvement)
sem_results.append(sem(boot_scores))
def graph(base_list, results_list, com_list, arange):
N=arange
base=np.array(base_list)
res=np.array(results_list)
com = np.array(com_list)
ind = np.arange(N) # the x locations for the groups
width = 0.3 # the width of the bars: can also be len(x) sequence
#fig, ax = plt.sublots()
p1 = plt.bar(ind, base, width, color='r')
p2 = plt.bar(ind+0.3, res, width, color='y')
p3 = plt.bar(ind+0.6, com, width, color='b')
plt.rcParams['figure.figsize'] = 10, 7.5
plt.rcParams['axes.grid'] = True
plt.gray()
plt.ylabel('Accuracy')
plt.title('AAAC Problem Accuracy')
plt.yticks(np.arange(0,3,30))
plt.xticks(np.arange(0,13,13))
#plt.set_xticks(('A','B','C','D','E','F','G','H','I','J','K','L','M'))
plt.legend( (p1[0], p2[0], p3[0]), ('Baseline', 'Algorithm', 'Improvement'))
plt.show()
rand_baseline = list()
test_results = list()
sem_results = list()
com_results = list()
#test_docs("problemA")
for i in string.uppercase[:13]:
test_docs("problem"+i)
#graph(rand_baseline,test_results,com_results,13)
import os
import time as tm
sub_dir = "Results/"
location = "multiDoc" + tm.strftime("%Y%m%d-%H%M%S") + ".txt"
with open(os.path.join(sub_dir, location), 'w') as myFile:
myFile.write(str(rand_baseline))
myFile.write("\n")
myFile.write(str(test_results))
myFile.write("\n")
myFile.write(str(sem_results))
myFile.write("\n")
myFile.write(str(com_results))
# CV with ShuffleSpit
'''
cv = ShuffleSplit(n_samples, n_iter=100, test_size=0.2, random_state=0)
test_scores = cross_val_score(mnb, X, y, cv=cv)
print np.mean(test_scores)
'''
# Single run through
'''
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
print X_train.shape
print y_train.shape
print X_test.shape
print y_test.shape
mnb = MultinomialNB().fit(X_train, y_train)
print mnb.score(X_test, y_test)
''' | 2.625 | 3 |
tabular/__init__.py | yamins81/tabular | 6 | 2163 | import io
import fast
import spreadsheet
import tab
import utils
import web
from io import *
from fast import *
from spreadsheet import *
from tab import *
from utils import *
from web import *
__all__ = []
__all__.extend(io.__all__)
__all__.extend(fast.__all__)
__all__.extend(spreadsheet.__all__)
__all__.extend(tab.__all__)
__all__.extend(utils.__all__)
__all__.extend(web.__all__) | 1.320313 | 1 |
smipyping/_targetstable.py | KSchopmeyer/smipyping | 0 | 2164 | # (C) Copyright 2017 Inova Development Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Define the base of targets (i.e. systems to be tested)
TargetID = Column(Integer(11), primary_key=True)
IPAddress = Column(String(15), nullable=False)
CompanyID = Column(Integer(11), ForeignKey("Companies.CompanyID"))
Namespace = Column(String(30), nullable=False)
SMIVersion = Column(String(15), nullable=False)
Product = Column(String(30), nullable=False)
Principal = Column(String(30), nullable=False)
Credential = Column(String(30), nullable=False)
CimomVersion = Column(String(30), nullable=False)
InteropNamespace = Column(String(30), nullable=False)
Notify = Column(Enum('Enabled', 'Disabled'), default='Disabled')
NotifyUsers = Column(String(12), nullable=False)
ScanEnabled = Column(Enum('Enabled', 'Disabled'), default='Enabled')
Protocol = Column(String(10), default='http')
Port = Column(String(10), nullable=False)
"""
# TODO change ip_address to hostname where host name is name : port
from __future__ import print_function, absolute_import
import os
import csv
import re
from collections import OrderedDict
from textwrap import wrap
import six
from mysql.connector import Error as mysqlerror
from ._dbtablebase import DBTableBase
from ._mysqldbmixin import MySQLDBMixin
from ._common import get_url_str
from ._logging import AUDIT_LOGGER_NAME, get_logger
from ._companiestable import CompaniesTable
__all__ = ['TargetsTable']
class TargetsTable(DBTableBase):
"""
Class representing the targets db table.
This base contains information on the targets, host systems, etc. in the
environment.
The factory method should be used to construct a new TargetsTable object
since that creates the correct object for the defined database type.
"""
table_name = 'Targets'
key_field = 'TargetID'
# Fields that are required to create new records
required_fields = [
'IPAddress', 'CompanyID', 'Namespace',
'SMIVersion', 'Product', 'Principal', 'Credential',
'CimomVersion', 'InteropNamespace', 'Notify', 'NotifyUsers',
'ScanEnabled', 'Protocol', 'Port']
# All fields in each record.
fields = [key_field] + required_fields
join_fields = ['CompanyName']
all_fields = fields + join_fields
hints = {
'IPAddress': "Host name or ip address",
'CompanyID': "DB id of company",
'Namespace': "User namespace",
'SMIVersion': "SMI version",
'Product': "Product name",
'Principal': "User Name to access target",
'Credential': "User password to access target",
'CimomVersion': "Version of CIMOM",
'InteropNamespace': "Interop Namespace name",
'Notify': "'Enabled' if users to be notified of issues, else "
"'Disabled'",
'NotifyUsers': "List of UserIDs to notify",
'ScanEnabled': "Enabled if this target to be scanned",
'Protocol': '"http" or "https"',
'Port': "Integer defining WBEM server port."}
# # Defines each record for the data base and outputs.
# # The Name is the database name for the property
# # The value tuple is display name and max width for the record
table_format_dict = OrderedDict([
('TargetID', ('ID', 2, int)),
('CompanyName', ('CompanyName', 12, str)),
('Namespace', ('Namespace', 12, str)),
('SMIVersion', ('SMIVersion', 12, str)),
('Product', ('Product', 15, str)),
('Principal', ('Principal', 12, str)),
('Credential', ('Credential', 12, str)),
('CimomVersion', ('CimomVersion', 15, str)),
('IPAddress', ('IPAddress', 12, str)),
('InteropNamespace', ('Interop', 8, str)),
('Notify', ('Notify', 12, str)),
('NotifyUsers', ('NotifyUsers', 12, str)),
('Protocol', ('Prot', 5, str)),
('Port', ('Port', 4, int)),
('ScanEnabled', ('Enabled', 6, str)),
]) # noqa: E123
def __init__(self, db_dict, db_type, verbose, output_format):
"""Initialize the abstract Targets instance.
This controls all other
target bases. This defines the common definition of all targets bases
including field names, and common methods.
Parameters:
db_dict (:term: `dictionary')
Dictionary containing all of the parameters to open the database
defined by the db_dict attribute.
db_type (:term: `string`)
String defining one of the allowed database types for the
target database.
verbose (:class:`py:bool`)
Boolean. If true detailed info is displayed on the processing
of the TargetData class
output_format (:term:`string`)
String defining one of the legal report output formats. If not
provided, the default is a simple report format.
"""
super(TargetsTable, self).__init__(db_dict, db_type, verbose)
self.output_format = output_format
# def __str__(self):
# # # TODO this and __repr__ do not really match.
# # """String info on targetdata. TODO. Put more info here"""
# # return ('type=%s db=%s, len=%s' % (self.db_type, self.get_dbdict(),
# # # len(self.data_dict)))
# def __repr__(self):
# # """Rep of target data"""
# # return ('Targetdata db_type %s, rep count=%s' %
# # # (self.db_type, len(self.data_dict)))
def test_fieldnames(self, fields):
"""Test a list of field names. This test generates an exception,
KeyError if a field in fields is not in the table
"""
for field in fields:
self.table_format_dict[field] # pylint: disable=pointless-statement
def get_dbdict(self):
"""Get string for the db_dict"""
return '%s' % self.db_dict
@classmethod
def factory(cls, db_dict, db_type, verbose, output_format='simple'):
"""Factory method to select subclass based on database type (db_type).
Currently the types sql and csv are supported.
Returns instance object of the defined provider type.
"""
inst = None
if verbose:
print('targetdata factory datafile %s dbtype %s verbose %s'
% (db_dict, db_type, verbose))
if db_type == ('csv'):
inst = CsvTargetsTable(db_dict, db_type, verbose,
output_format=output_format)
elif db_type == ('mysql'):
inst = MySQLTargetsTable(db_dict, db_type, verbose,
output_format=output_format)
else:
ValueError('Invalid targets factory db_type %s' % db_type)
if verbose:
print('Resulting targets factory inst %r' % inst)
return inst
def get_field_list(self):
"""Return a list of the base table field names in the order defined."""
return list(self.table_format_dict)
def get_format_dict(self, name):
"""Return tuple of display name and length for name."""
return self.table_format_dict[name]
def get_enabled_targetids(self):
"""Get list of target ids that are marked enabled."""
return [x for x in self.data_dict if not self.disabled_target_id(x)]
def get_disabled_targetids(self):
"""Get list of target ids that are marked disabled"""
return [x for x in self.data_dict
if self.disabled_target_id(x)]
# TODO we have multiple of these. See get dict_for_host,get_hostid_list
def get_targets_host(self, host_data):
"""
If an record for `host_data` exists return that record,
otherwise return None.
There may be multiple ipaddress, port entries for a
single ipaddress, port in the database
Parameters:
host_id(tuple of hostname or ipaddress and port)
Returns list of targetdata keys
"""
# TODO clean up for PY 3
return_list = []
for key, value in self.data_dict.items():
port = value["Port"]
# TODO port from database is a string. Should be int internal.
if value["IPAddress"] == host_data[0] and int(port) == host_data[1]:
return_list.append(key)
return return_list
def get_target(self, targetid):
"""
Get the target data for the parameter target_id.
This is alternate to using [id] directly. It does an additonal check
for correct type for target_id
Returns:
target as dictionary
Exceptions:
KeyError if target not in targets dictionary
"""
if not isinstance(targetid, six.integer_types):
targetid = int(targetid)
return self.data_dict[targetid]
def filter_targets(self, ip_filter=None, company_name_filter=None):
"""
Filter for match of ip_filter and companyname filter if they exist
and return list of any targets that match.
The filters are regex strings.
"""
rtn = OrderedDict()
for key, value in self.data_dict.items():
if ip_filter and re.match(ip_filter, value['IPAddress']):
rtn[key] = value
if company_name_filter and \
re.match(value['CompanyName'], company_name_filter):
rtn[key] = value
return rtn
def build_url(self, targetid):
"""Get the string representing the url for targetid. Gets the
Protocol, IPaddress and port and uses the common get_url_str to
create a string. Port info is included only if it is not the
WBEM CIM-XML standard definitions.
"""
target = self[targetid]
return get_url_str(target['Protocol'], target['IPAddress'],
target['Port'])
def get_hostid_list(self, ip_filter=None, company_name_filter=None):
"""
Get all WBEM Server ipaddresses in the targets base.
Returns list of IP addresses:port entries.
TODO: Does not include port right now.
"""
output_list = []
# TODO clean up for python 3
for _id, value in self.data_dict.items():
if self.verbose:
print('get_hostid_list value %s' % (value,))
output_list.append(value['IPAddress'])
return output_list
def tbl_hdr(self, record_list):
"""Return a list of all the column headers from the record_list."""
hdr = []
for name in record_list:
value = self.get_format_dict(name)
hdr.append(value[0])
return hdr
def get_notifyusers(self, targetid):
"""
Get list of entries in the notify users field and split into python
list and return the list of integers representing the userids.
This list stored in db as string of integers separated by commas.
Returns None if there is no data in NotifyUsers.
"""
notify_users = self[targetid]['NotifyUsers']
if notify_users:
notify_users_list = notify_users.split(',')
notify_users_list = [int(userid) for userid in notify_users_list]
return notify_users_list
return None
def format_record(self, record_id, fields, fold=False):
"""Return the fields defined in field_list for the record_id in
display format.
String fields will be folded if their width is greater than the
specification in the format_dictionary and fold=True
"""
# TODO can we make this a std cvt function.
target = self.get_target(record_id)
line = []
for field_name in fields:
field_value = target[field_name]
fmt_value = self.get_format_dict(field_name)
max_width = fmt_value[1]
field_type = fmt_value[2]
if isinstance(field_type, six.string_types) and field_value:
if max_width < len(field_value):
line.append('\n'.join(wrap(field_value, max_width)))
else:
line.append('%s' % field_value)
else:
line.append('%s' % field_value)
return line
def disabled_target(self, target_record): # pylint: disable=no-self-use
"""
If target_record disabled, return true, else return false.
"""
val = target_record['ScanEnabled'].lower()
if val == 'enabled':
return False
if val == 'disabled':
return True
ValueError('ScanEnabled field must contain "Enabled" or "Disabled'
' string. %s is invalid.' % val)
def disabled_target_id(self, targetid):
"""
Return True if target recorded for this target_id marked
disabled. Otherwise return True
Parameters:
target_id(:term:`integer`)
Valid target Id for the Target_Tableue .
Returns: (:class:`py:bool`)
True if this target id disabled
Exceptions:
KeyError if target_id not in database
"""
return(self.disabled_target(self.data_dict[targetid]))
def get_output_width(self, col_list):
"""
Get the width of a table from the column names in the list
"""
total_width = 0
for name in col_list:
value = self.get_format_dict(name)
total_width += value[1]
return total_width
def get_unique_creds(self):
"""
Get the set of Credentials and Principal that represents the
unique combination of both. The result could be used to test with
all Principals/Credentials knows in the db.
Return list of targetIDs that represent unique sets of Principal and
Credential
"""
creds = {k: '%s%s' % (v['Principal'], v['Credential'])
for k, v in self.data_dict.items()}
ucreds = dict([[v, k] for k, v in creds.items()])
unique_keys = dict([[v, k] for k, v in ucreds.items()])
unique_creds = [(self.data_dict[k]['Principal'],
self.data_dict[k]['Credential']) for k in unique_keys]
return unique_creds
class SQLTargetsTable(TargetsTable):
"""
Subclass of Targets data for all SQL databases. Subclasses of this class
support specialized sql databases.
"""
def __init__(self, db_dict, dbtype, verbose, output_format):
"""Pass through to SQL"""
if verbose:
print('SQL Database type %s verbose=%s' % (db_dict, verbose))
super(SQLTargetsTable, self).__init__(db_dict, dbtype, verbose,
output_format)
self.connection = None
class MySQLTargetsTable(SQLTargetsTable, MySQLDBMixin):
"""
This subclass of TargetsTable process targets infromation from an sql
database.
Generate the targetstable from the sql database targets table and
the companies table, by mapping the data to the dictionary defined
for targets
"""
# TODO filename is config file name, not actual file name.
def __init__(self, db_dict, dbtype, verbose, output_format):
"""Read the input file into a dictionary."""
super(MySQLTargetsTable, self).__init__(db_dict, dbtype, verbose,
output_format)
self.connectdb(db_dict, verbose)
self._load_table()
self._load_joins()
def _load_joins(self):
"""
Load the tables that would normally be joins. In this case it is the
companies table. Move the companyName into the targets table
TODO we should not be doing this in this manner but with a
join.
"""
# Get companies table and insert into targets table:
# TODO in smipyping name is db_dict. Elsewhere it is db_info
companies_tbl = CompaniesTable.factory(self.db_dict,
self.db_type,
self.verbose)
try:
# set the companyname into the targets table
for target_key in self.data_dict:
target = self.data_dict[target_key]
if target['CompanyID'] in companies_tbl:
company = companies_tbl[target['CompanyID']]
target['CompanyName'] = company['CompanyName']
else:
target['CompanyName'] = "TableError CompanyID %s" % \
target['CompanyID']
except Exception as ex:
raise ValueError('Error: putting Company Name in table %r error %s'
% (self.db_dict, ex))
def update_fields(self, targetid, changes):
"""
Update the database record defined by targetid with the dictionary
of items defined by changes where each item is an entry in the
target record. Update does NOT test if the new value is the same
as the original value.
"""
cursor = self.connection.cursor()
# dynamically build the update sql based on the changes dictionary
set_names = "SET "
values = []
comma = False
for key, value in changes.items():
if comma:
set_names = set_names + ", "
else:
comma = True
set_names = set_names + "{0} = %s".format(key)
values.append(value)
values.append(targetid)
sql = "Update Targets " + set_names
# append targetid component
sql = sql + " WHERE TargetID=%s"
# Record the original data for the audit log.
original_data = {}
target_record = self.get_target(targetid)
for change in changes:
original_data[change] = target_record[change]
try:
cursor.execute(sql, tuple(values))
self.connection.commit()
audit_logger = get_logger(AUDIT_LOGGER_NAME)
audit_logger.info('TargetsTable TargetID: %s, update fields: %s, '
'original fields: %s',
targetid, changes, original_data)
except Exception as ex:
self.connection.rollback()
audit_logger = get_logger(AUDIT_LOGGER_NAME)
audit_logger.error('TargetsTable TargetID: %s failed SQL update. '
'SQL: %s Changes: %s Exception: %s',
targetid, sql, changes, ex)
raise ex
finally:
self._load_table()
self._load_joins()
cursor.close()
def activate(self, targetid, activate_flag):
"""
Activate or deactivate the table entry defined by the
targetid parameter to the value defined by the activate_flag
Parameters:
targetid (:term:`py:integer`):
The database key property for this table
activate_flag (:class:`py:bool`):
Next state that will be set into the database for this target.
Since the db field is an enum it actually sete Active or Inactive
strings into the field
"""
cursor = self.connection.cursor()
enabled_kw = 'Enabled' if activate_flag else 'Disabled'
sql = 'UPDATE Targets SET ScanEnabled = %s WHERE TargetID = %s'
try:
cursor.execute(sql, (enabled_kw, targetid)) # noqa F841
self.connection.commit()
audit_logger = get_logger(AUDIT_LOGGER_NAME)
audit_logger.info('TargetTable TargetId %s,set scanEnabled to %s',
targetid, enabled_kw)
except mysqlerror as ex:
audit_logger = get_logger(AUDIT_LOGGER_NAME)
audit_logger.error('TargetTable userid %s failed SQL change '
'ScanEnabled. SQL=%s '
'Change to %s exception %s: %s',
targetid, sql, enabled_kw, ex.__class__.__name__,
ex)
self.connection.rollback()
raise ex
finally:
self._load_table()
self._load_joins()
def delete(self, targetid):
"""
Delete the target in the targets table defined by the targetid
"""
cursor = self.connection.cursor()
sql = "DELETE FROM Targets WHERE TargetID=%s"
try:
# pylint: disable=unused-variable
mydata = cursor.execute(sql, (targetid,)) # noqa F841
self.connection.commit()
audit_logger = get_logger(AUDIT_LOGGER_NAME)
audit_logger.info('TargetTable TargetId %s Deleted', targetid)
except mysqlerror as ex:
audit_logger = get_logger(AUDIT_LOGGER_NAME)
audit_logger.error('TargetTable targetid %s failed SQL DELETE. '
'SQL=%s exception %s: %s',
targetid, sql, ex.__class__.__name__, ex)
self.connection.rollback()
raise ex
finally:
self._load_table()
self._load_joins()
self.connection.close()
def insert(self, fields):
"""
Write a new record to the database containing the fields defined in
the input.
Parameters:
field_data ()
Dictionary of fields to be inserted into the table. There is
one entry in the dictionary for each field to be inserted.
Exceptions:
"""
cursor = self.connection.cursor()
placeholders = ', '.join(['%s'] * len(fields))
columns = ', '.join(fields.keys())
sql = "INSERT INTO %s ( %s ) VALUES ( %s )" % (self.table_name,
columns,
placeholders)
try:
cursor.execute(sql, fields.values())
self.connection.commit()
new_targetid = cursor.lastrowid
audit_logger = get_logger(AUDIT_LOGGER_NAME)
audit_logger.info('TargetsTable TargetId %s added. %s',
new_targetid, fields)
except mysqlerror as ex:
audit_logger = get_logger(AUDIT_LOGGER_NAME)
audit_logger.error('TargetTable INSERT failed SQL update. SQL=%s. '
'data=%s. Exception %s: %s', sql, fields,
ex.__class__.__name__, ex)
self.connection.rollback()
raise ex
finally:
self._load_table()
self._load_joins()
self.connection.close()
class CsvTargetsTable(TargetsTable):
"""Comma Separated Values form of the Target base."""
def __init__(self, db_dict, dbtype, verbose, output_format):
"""Read the input file into a dictionary."""
super(CsvTargetsTable, self).__init__(db_dict, dbtype, verbose,
output_format)
fn = db_dict['targetsfilename']
self.filename = fn
# If the filename is not a full directory, the data file must be
# either in the local directory or the same directory as the
# config file defined by the db_dict entry directory
if os.path.isabs(fn):
if not os.path.isfile(fn):
ValueError('CSV file %s does not exist ' % fn)
else:
self.filename = fn
else:
if os.path.isfile(fn):
self.filename = fn
else:
full_fn = os.path.join(db_dict['directory'], fn)
if not os.path.isfile(full_fn):
ValueError('CSV file %s does not exist '
'in local directory or config directory %s' %
(fn, db_dict['directory']))
else:
self.filename = full_fn
with open(self.filename) as input_file:
reader = csv.DictReader(input_file)
# create dictionary (id = key) with dictionary for
# each set of entries
result = {}
for row in reader:
key = int(row['TargetID'])
if key in result:
# duplicate row handling
print('ERROR. Duplicate Id in table: %s\nrow=%s' %
(key, row))
raise ValueError('Input Error. duplicate Id')
else:
result[key] = row
self.data_dict = result
def write_updated_record(self, record_id):
"""Backup the existing file and write the new one.
with cvs it writes the whole file back
"""
backfile = '%s.bak' % self.filename
# TODO does this cover directories/clean up for possible exceptions.
if os.path.isfile(backfile):
os.remove(backfile)
os.rename(self.filename, backfile)
self.write_file(self.filename)
def write_file(self, file_name):
"""Write the current Target base to the named file."""
with open(file_name, 'wb') as f:
writer = csv.DictWriter(f, fieldnames=self.get_field_list())
writer.writeheader()
for key, value in sorted(self.data_dict.items()):
writer.writerow(value)
| 1.742188 | 2 |
dev/Code/Framework/AzFramework/CodeGen/AzEBusInline.py | jeikabu/lumberyard | 1,738 | 2165 | #
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
import os
from az_code_gen.base import *
from AzReflectionCpp import format_cpp_annotations
class AZEBusInline_Driver(TemplateDriver):
def apply_transformations(self, json_object):
format_cpp_annotations(json_object)
def render_templates(self, input_file, **template_kwargs):
input_file_name, input_file_ext = os.path.splitext(input_file)
self.render_template_to_file(
"AzEBusInline.tpl", template_kwargs, '{}.generated.inline'.format(input_file_name))
# Factory function - called from launcher
def create_drivers(env):
return [AZEBusInline_Driver(env)]
| 1.90625 | 2 |
QUANTAXIS/QASU/crawl_eastmoney.py | QUANTAXISER/QUANTAXIS | 1 | 2166 | import os
from QUANTAXIS.QASetting import QALocalize
#from QUANTAXIS_CRAWLY.run_selenium_alone import (read_east_money_page_zjlx_to_sqllite, open_chrome_driver, close_chrome_dirver)
from QUANTAXIS_CRAWLY.run_selenium_alone import *
import urllib
import pandas as pd
import time
from QUANTAXIS.QAUtil import (DATABASE)
def QA_request_eastmoney_zjlx( param_stock_code_list ):
# 改用
strUrl = "http://data.eastmoney.com/zjlx/{}.html".format(param_stock_code_list[0])
# 延时
time.sleep(1.223)
response = urllib.request.urlopen(strUrl)
content = response.read()
# 🛠todo 改用 re 正则表达式做匹配
strings = content.decode("utf-8", "ignore")
string_lines = strings.split("\r\n")
#for aline in string_lines:
# aline = aline.strip()
# if '_stockCode' in aline:
# _stockCode = aline[len('var _stockCode = '):]
# _stockCode = _stockCode.strip("\"\"\,")
# if '_stockMarke' in aline:
# _stockMarke = aline[len('_stockMarke = '):]
# _stockMarke = _stockMarke.strip("\"\"\,")
# # 60XXXX ,
#_stockMarke = 1
# 00XXXX ,
# _stockMarke = 2
# 30XXXX ,
# _stockMarke = 2
# if '_stockName' in aline:
# _stockName = aline[len('_stockName = '):]
# _stockName = _stockName.strip("\"\"\,")
# if '_market' in aline:
# _market = aline[len('_market = '):]
# _market = _market.strip("\"\"\,")
# break
#_market= 'hsa'
# print(_stockCode)
# print(_stockMarke)
# print(_stockName)
# print(_market)
values = []
for aline in string_lines:
aline = aline.strip()
if 'EM_CapitalFlowInterface' in aline:
# print(aline)
# print('------------------')
aline = aline.strip()
if aline.startswith('var strUrl = '):
if 'var strUrl = ' in aline:
aline = aline[len('var strUrl = '):]
values = aline.split('+')
# print(values)
break
# print('------------------')
print(values)
for iStockCode in range(len(param_stock_code_list)):
requestStr = ""
strCode = param_stock_code_list[iStockCode]
if strCode[0:2] == '60':
_stockMarke = '1'
elif strCode[0:2] == '00' or strCode[0:2] == '30':
_stockMarke = '2'
else:
print(strCode + " 暂不支持, 60, 00, 30 开头的股票代码")
return
for iItem in values:
if '_stockCode' in iItem:
requestStr = requestStr + param_stock_code_list[iStockCode]
elif '_stockMarke' in iItem:
requestStr = requestStr + _stockMarke
else:
if 'http://ff.eastmoney.com/' in iItem:
requestStr = 'http://ff.eastmoney.com/'
else:
iItem = iItem.strip(' "')
iItem = iItem.rstrip(' "')
requestStr = requestStr + iItem
# print(requestStr)
# 延时
time.sleep(1.456)
response = urllib.request.urlopen(requestStr)
content2 = response.read()
# print(content2)
strings = content2.decode("utf-8", "ignore")
# print(strings)
list_data_zjlx = []
if 'var aff_data=({data:[["' in strings:
leftChars = strings[len('var aff_data=({data:[["'):]
# print(leftChars)
dataArrays = leftChars.split(',')
# print(dataArrays)
for aItemIndex in range(0, len(dataArrays), 13):
'''
日期
收盘价
涨跌幅
主力净流入 净额 净占比
超大单净流入 净额 净占比
大单净流入 净额 净占比
中单净流入 净额 净占比
小单净流入 净额 净占比
'''
dict_row = {}
dict_row['stock_code'] = param_stock_code_list[iStockCode]
# 日期
# print(aItemIndex)
data01 = dataArrays[aItemIndex]
data01 = data01.strip('"')
# print('日期',data01)
dict_row['date'] = data01
# 主力净流入 净额
data02 = dataArrays[aItemIndex + 1]
data02 = data02.strip('"')
# print('主力净流入 净额',data02)
dict_row['zljll_je_wy'] = data02
# 主力净流入 净占比
data03 = dataArrays[aItemIndex + 2]
data03 = data03.strip('"')
# print('主力净流入 净占比',data03)
# date01 = aItemData.strip('[\'\'')
dict_row['zljll_jzb_bfb'] = data03
# 超大单净流入 净额
data04 = dataArrays[aItemIndex + 3]
data04 = data04.strip('"')
# print('超大单净流入 净额',data04)
dict_row['cddjll_je_wy'] = data04
# 超大单净流入 净占比
data05 = dataArrays[aItemIndex + 4]
data05 = data05.strip('"')
# print('超大单净流入 净占比',data05)
dict_row['cddjll_je_jzb'] = data05
# 大单净流入 净额
data06 = dataArrays[aItemIndex + 5]
data06 = data06.strip('"')
# print('大单净流入 净额',data06)
dict_row['ddjll_je_wy'] = data06
# 大单净流入 净占比
data07 = dataArrays[aItemIndex + 6]
data07 = data07.strip('"')
# print('大单净流入 净占比',data07)
dict_row['ddjll_je_jzb'] = data07
# 中单净流入 净额
data08 = dataArrays[aItemIndex + 7]
data08 = data08.strip('"')
# print('中单净流入 净额',data08)
dict_row['zdjll_je_wy'] = data08
# 中单净流入 净占比
data09 = dataArrays[aItemIndex + 8]
data09 = data09.strip('"')
# print('中单净流入 净占比',data09)
dict_row['zdjll_je_jzb'] = data09
# 小单净流入 净额
data10 = dataArrays[aItemIndex + 9]
data10 = data10.strip('"')
# print('小单净流入 净额',data10)
dict_row['xdjll_je_wy'] = data10
# 小单净流入 净占比
data11 = dataArrays[aItemIndex + 10]
data11 = data11.strip('"')
# print('小单净流入 净占比',data11)
dict_row['xdjll_je_jzb'] = data11
# 收盘价
data12 = dataArrays[aItemIndex + 11]
data12 = data12.strip('"')
# print('收盘价',data12)
dict_row['close_price'] = data12
# 涨跌幅
data13 = dataArrays[aItemIndex + 12]
data13 = data13.strip('"')
data13 = data13.strip('"]]})')
# print('涨跌幅',data13)
dict_row['change_price'] = data13
# 读取一条记录成功
# print("成功读取一条记录")
# print(dict_row)
list_data_zjlx.append(dict_row)
# print(list_data_zjlx)
df = pd.DataFrame(list_data_zjlx)
# print(df)
client = DATABASE
coll_stock_zjlx = client.eastmoney_stock_zjlx
# coll_stock_zjlx.insert_many(QA_util_to_json_from_pandas(df))
for i in range(len(list_data_zjlx)):
aRec = list_data_zjlx[i]
# 🛠todo 当天结束后,获取当天的资金流相,当天的资金流向是瞬时间点的
ret = coll_stock_zjlx.find_one(aRec)
if ret == None:
coll_stock_zjlx.insert_one(aRec)
print("🤑 插入新的记录 ", aRec)
else:
print("😵 记录已经存在 ", ret)
'''
作为测试用例来获取, 对比 reqeust 方式的获取数据是否一致
'''
def QA_read_eastmoney_zjlx_web_page_to_sqllite(stockCodeList = None):
# todo 🛠 check stockCode 是否存在有效合法
# todo 🛠 QALocalize 从QALocalize 目录中读取 固定位置存放驱动文件
print("📨当前工作路径文件位置 : ",os.getcwd())
path_check = os.getcwd()+"/QUANTAXIS_WEBDRIVER"
if os.path.exists(path_check) == False:
print("😵 确认当前路径是否包含selenium_driver目录 😰 ")
return
else:
print(os.getcwd()+"/QUANTAXIS_WEBDRIVER"," 目录存在 😁")
print("")
# path_for_save_data = QALocalize.download_path + "/eastmoney_stock_zjlx"
# isExists = os.path.exists(path_for_save_data)
# if isExists == False:
# os.mkdir(path_for_save_data)
# isExists = os.path.exists(path_for_save_data)
# if isExists == True:
# print(path_for_save_data,"目录不存在! 成功建立目录 😢")
# else:
# print(path_for_save_data,"目录不存在! 失败建立目录 🤮, 可能没有权限 🈲")
# return
# else:
# print(path_for_save_data,"目录存在!准备读取数据 😋")
browser = open_chrome_driver()
for indexCode in range(len(stockCodeList)):
#full_path_name = path_for_save_data + "/" + stockCodeList[indexCode] + "_zjlx.sqlite.db"
read_east_money_page_zjlx_to_sqllite(stockCodeList[indexCode], browser)
pass
close_chrome_dirver(browser)
#创建目录
#启动线程读取网页,写入数据库
#等待完成 | 2.65625 | 3 |
wsgi.py | javicacheiro/salt-git-synchronizer-proxy | 0 | 2167 | #!/usr/bin/env python
import logging
import sys
from app import app as application
def setup_flask_logging():
# Log to stdout
handler = logging.StreamHandler(sys.stdout)
# Log to a file
#handler = logging.FileHandler('./application.log')
handler.setLevel(logging.INFO)
handler.setFormatter(logging.Formatter(
'%(asctime)s [%(funcName)s] %(levelname)s: %(message)s '
))
application.logger.addHandler(handler)
# Set default log level for the general logger
# each handler can then restrict the messages logged
application.logger.setLevel(logging.INFO)
setup_flask_logging()
if __name__ == '__main__':
application.run()
| 2.671875 | 3 |
game/base/enemy.py | PythonixCoders/PyWeek29 | 8 | 2168 | <gh_stars>1-10
#!/usr/bin/env python
from game.base.being import Being
class Enemy(Being):
def __init__(self, app, scene, **kwargs):
super().__init__(app, scene, **kwargs)
self.friendly = False
| 2.265625 | 2 |
main/rates/migrations/0002_auto_20170625_1510.py | Hawk94/coin_tracker | 0 | 2169 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-25 15:10
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('rates', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='rate',
old_name='euro_rate',
new_name='eur_rate',
),
migrations.RenameField(
model_name='rate',
old_name='pound_rates',
new_name='gbp_rate',
),
]
| 1.6875 | 2 |
setup.py | dojeda/quetzal-openapi-client | 0 | 2170 | # coding: utf-8
"""
Quetzal API
Quetzal: an API to manage data files and their associated metadata.
OpenAPI spec version: 0.5.0
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
from setuptools import setup, find_packages # noqa: H301
NAME = "quetzal-openapi-client"
VERSION = "0.5.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["urllib3 >= 1.15", "six >= 1.10", "certifi", "python-dateutil"]
setup(
name=NAME,
version=VERSION,
description="Quetzal API auto-generated client",
author='<NAME>',
author_email="<EMAIL>",
url="https://github.com/quet.zal/quetzal-openapi-client",
project_urls={
"Documentation": "https://quetzal-openapi-client.readthedocs.io",
"Code": "https://github.com/quetz-al/quetzal-openapi-client",
"Issue tracker": "https://github.com/quetz-al/quetzal-openapi-client/issues",
},
license="BSD-3-Clause",
keywords=["OpenAPI", "OpenAPI-Generator", "Quetzal API"],
install_requires=REQUIRES,
packages=find_packages(exclude=['test', 'docs']),
namespace_packages=['quetzal'],
include_package_data=True,
long_description="""\
quetzal-openapi-client
======================
This is an auto-generated package using
[openapi-generator](https://github.com/OpenAPITools/openapi-generator)
from an OpenAPI specification of the Quetzal API.
An improvement layer on this client exists in the quetzal-client package.
Quetzal is an API to manage data files and their associated metadata.
See more at [quetz.al](https://quetz.al) and its
[readthedocs documentation](https://quetzal-api.readthedocs.io).
""",
long_description_content_type='text/markdown',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Database :: Front-Ends',
'Topic :: Internet :: WWW/HTTP',
'Topic :: System :: Archiving',
],
)
| 1.539063 | 2 |
youtube_dl/extractor/turner.py | jonyg80/youtube-dl | 66,635 | 2171 | # coding: utf-8
from __future__ import unicode_literals
import re
from .adobepass import AdobePassIE
from ..compat import compat_str
from ..utils import (
fix_xml_ampersands,
xpath_text,
int_or_none,
determine_ext,
float_or_none,
parse_duration,
xpath_attr,
update_url_query,
ExtractorError,
strip_or_none,
url_or_none,
)
class TurnerBaseIE(AdobePassIE):
_AKAMAI_SPE_TOKEN_CACHE = {}
def _extract_timestamp(self, video_data):
return int_or_none(xpath_attr(video_data, 'dateCreated', 'uts'))
def _add_akamai_spe_token(self, tokenizer_src, video_url, content_id, ap_data, custom_tokenizer_query=None):
secure_path = self._search_regex(r'https?://[^/]+(.+/)', video_url, 'secure path') + '*'
token = self._AKAMAI_SPE_TOKEN_CACHE.get(secure_path)
if not token:
query = {
'path': secure_path,
}
if custom_tokenizer_query:
query.update(custom_tokenizer_query)
else:
query['videoId'] = content_id
if ap_data.get('auth_required'):
query['accessToken'] = self._extract_mvpd_auth(ap_data['url'], content_id, ap_data['site_name'], ap_data['site_name'])
auth = self._download_xml(
tokenizer_src, content_id, query=query)
error_msg = xpath_text(auth, 'error/msg')
if error_msg:
raise ExtractorError(error_msg, expected=True)
token = xpath_text(auth, 'token')
if not token:
return video_url
self._AKAMAI_SPE_TOKEN_CACHE[secure_path] = token
return video_url + '?hdnea=' + token
def _extract_cvp_info(self, data_src, video_id, path_data={}, ap_data={}, fatal=False):
video_data = self._download_xml(
data_src, video_id,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=fatal)
if not video_data:
return {}
video_id = video_data.attrib['id']
title = xpath_text(video_data, 'headline', fatal=True)
content_id = xpath_text(video_data, 'contentId') or video_id
# rtmp_src = xpath_text(video_data, 'akamai/src')
# if rtmp_src:
# split_rtmp_src = rtmp_src.split(',')
# if len(split_rtmp_src) == 2:
# rtmp_src = split_rtmp_src[1]
# aifp = xpath_text(video_data, 'akamai/aifp', default='')
urls = []
formats = []
thumbnails = []
subtitles = {}
rex = re.compile(
r'(?P<width>[0-9]+)x(?P<height>[0-9]+)(?:_(?P<bitrate>[0-9]+))?')
# Possible formats locations: files/file, files/groupFiles/files
# and maybe others
for video_file in video_data.findall('.//file'):
video_url = url_or_none(video_file.text.strip())
if not video_url:
continue
ext = determine_ext(video_url)
if video_url.startswith('/mp4:protected/'):
continue
# TODO Correct extraction for these files
# protected_path_data = path_data.get('protected')
# if not protected_path_data or not rtmp_src:
# continue
# protected_path = self._search_regex(
# r'/mp4:(.+)\.[a-z0-9]', video_url, 'secure path')
# auth = self._download_webpage(
# protected_path_data['tokenizer_src'], query={
# 'path': protected_path,
# 'videoId': content_id,
# 'aifp': aifp,
# })
# token = xpath_text(auth, 'token')
# if not token:
# continue
# video_url = rtmp_src + video_url + '?' + token
elif video_url.startswith('/secure/'):
secure_path_data = path_data.get('secure')
if not secure_path_data:
continue
video_url = self._add_akamai_spe_token(
secure_path_data['tokenizer_src'],
secure_path_data['media_src'] + video_url,
content_id, ap_data)
elif not re.match('https?://', video_url):
base_path_data = path_data.get(ext, path_data.get('default', {}))
media_src = base_path_data.get('media_src')
if not media_src:
continue
video_url = media_src + video_url
if video_url in urls:
continue
urls.append(video_url)
format_id = video_file.get('bitrate')
if ext in ('scc', 'srt', 'vtt'):
subtitles.setdefault('en', []).append({
'ext': ext,
'url': video_url,
})
elif ext == 'png':
thumbnails.append({
'id': format_id,
'url': video_url,
})
elif ext == 'smil':
formats.extend(self._extract_smil_formats(
video_url, video_id, fatal=False))
elif re.match(r'https?://[^/]+\.akamaihd\.net/[iz]/', video_url):
formats.extend(self._extract_akamai_formats(
video_url, video_id, {
'hds': path_data.get('f4m', {}).get('host'),
# nba.cdn.turner.com, ht.cdn.turner.com, ht2.cdn.turner.com
# ht3.cdn.turner.com, i.cdn.turner.com, s.cdn.turner.com
# ssl.cdn.turner.com
'http': 'pmd.cdn.turner.com',
}))
elif ext == 'm3u8':
m3u8_formats = self._extract_m3u8_formats(
video_url, video_id, 'mp4',
m3u8_id=format_id or 'hls', fatal=False)
if '/secure/' in video_url and '?hdnea=' in video_url:
for f in m3u8_formats:
f['_seekable'] = False
formats.extend(m3u8_formats)
elif ext == 'f4m':
formats.extend(self._extract_f4m_formats(
update_url_query(video_url, {'hdcore': '3.7.0'}),
video_id, f4m_id=format_id or 'hds', fatal=False))
else:
f = {
'format_id': format_id,
'url': video_url,
'ext': ext,
}
mobj = rex.search(video_url)
if mobj:
f.update({
'width': int(mobj.group('width')),
'height': int(mobj.group('height')),
'tbr': int_or_none(mobj.group('bitrate')),
})
elif isinstance(format_id, compat_str):
if format_id.isdigit():
f['tbr'] = int(format_id)
else:
mobj = re.match(r'ios_(audio|[0-9]+)$', format_id)
if mobj:
if mobj.group(1) == 'audio':
f.update({
'vcodec': 'none',
'ext': 'm4a',
})
else:
f['tbr'] = int(mobj.group(1))
formats.append(f)
self._sort_formats(formats)
for source in video_data.findall('closedCaptions/source'):
for track in source.findall('track'):
track_url = url_or_none(track.get('url'))
if not track_url or track_url.endswith('/big'):
continue
lang = track.get('lang') or track.get('label') or 'en'
subtitles.setdefault(lang, []).append({
'url': track_url,
'ext': {
'scc': 'scc',
'webvtt': 'vtt',
'smptett': 'tt',
}.get(source.get('format'))
})
thumbnails.extend({
'id': image.get('cut') or image.get('name'),
'url': image.text,
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
} for image in video_data.findall('images/image'))
is_live = xpath_text(video_data, 'isLive') == 'true'
return {
'id': video_id,
'title': self._live_title(title) if is_live else title,
'formats': formats,
'subtitles': subtitles,
'thumbnails': thumbnails,
'thumbnail': xpath_text(video_data, 'poster'),
'description': strip_or_none(xpath_text(video_data, 'description')),
'duration': parse_duration(xpath_text(video_data, 'length') or xpath_text(video_data, 'trt')),
'timestamp': self._extract_timestamp(video_data),
'upload_date': xpath_attr(video_data, 'metas', 'version'),
'series': xpath_text(video_data, 'showTitle'),
'season_number': int_or_none(xpath_text(video_data, 'seasonNumber')),
'episode_number': int_or_none(xpath_text(video_data, 'episodeNumber')),
'is_live': is_live,
}
def _extract_ngtv_info(self, media_id, tokenizer_query, ap_data=None):
streams_data = self._download_json(
'http://medium.ngtv.io/media/%s/tv' % media_id,
media_id)['media']['tv']
duration = None
chapters = []
formats = []
for supported_type in ('unprotected', 'bulkaes'):
stream_data = streams_data.get(supported_type, {})
m3u8_url = stream_data.get('secureUrl') or stream_data.get('url')
if not m3u8_url:
continue
if stream_data.get('playlistProtection') == 'spe':
m3u8_url = self._add_akamai_spe_token(
'http://token.ngtv.io/token/token_spe',
m3u8_url, media_id, ap_data or {}, tokenizer_query)
formats.extend(self._extract_m3u8_formats(
m3u8_url, media_id, 'mp4', m3u8_id='hls', fatal=False))
duration = float_or_none(stream_data.get('totalRuntime'))
if not chapters:
for chapter in stream_data.get('contentSegments', []):
start_time = float_or_none(chapter.get('start'))
chapter_duration = float_or_none(chapter.get('duration'))
if start_time is None or chapter_duration is None:
continue
chapters.append({
'start_time': start_time,
'end_time': start_time + chapter_duration,
})
self._sort_formats(formats)
return {
'formats': formats,
'chapters': chapters,
'duration': duration,
}
| 2.078125 | 2 |
ml/sandbox/00-data.py | robk-dev/algo-trading | 1 | 2172 | from alpha_vantage.timeseries import TimeSeries
from pprint import pprint
import json
import argparse
def save_dataset(symbol='MSFT', time_window='daily_adj'):
credentials = json.load(open('creds.json', 'r'))
api_key = credentials['av_api_key']
print(symbol, time_window)
ts = TimeSeries(key=api_key, output_format='pandas')
if time_window == 'intraday':
data, meta_data = ts.get_intraday(
symbol=symbol, interval='1min', outputsize='full')
elif time_window == 'daily':
data, meta_data = ts.get_daily(symbol, outputsize='full')
elif time_window == 'daily_adj':
data, meta_data = ts.get_daily_adjusted(symbol, outputsize='full')
pprint(data.head(10))
data.to_csv(f'./{symbol}_{time_window}.csv')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('symbol', type=str, help="the stock symbol you want to download")
parser.add_argument('time_window', type=str, choices=[
'intraday', 'daily', 'daily_adj'], help="the time period you want to download the stock history for")
namespace = parser.parse_args()
save_dataset(**vars(namespace))
| 3.046875 | 3 |
tests/zpill.py | al3pht/cloud-custodian | 2,415 | 2173 | <gh_stars>1000+
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import fnmatch
from io import StringIO
import json
import os
import shutil
import zipfile
import re
from datetime import datetime, timedelta, tzinfo
from distutils.util import strtobool
import boto3
import placebo
from botocore.response import StreamingBody
from placebo import pill
from c7n.testing import CustodianTestCore
from .constants import ACCOUNT_ID
# Custodian Test Account. This is used only for testing.
# Access is available for community project maintainers.
###########################################################################
# BEGIN PLACEBO MONKEY PATCH
#
# Placebo is effectively abandoned upstream, since mitch went back to work at AWS, irony...
# These monkeypatch patches represent fixes on trunk of that repo that have not been released
# into an extant version, we carry them here. We can drop this when this issue is resolved
#
# https://github.com/garnaat/placebo/issues/63
#
# License - Apache 2.0
# Copyright (c) 2015 <NAME>
class UTC(tzinfo):
"""UTC"""
def utcoffset(self, dt):
return timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return timedelta(0)
utc = UTC()
def deserialize(obj):
"""Convert JSON dicts back into objects."""
# Be careful of shallow copy here
target = dict(obj)
class_name = None
if "__class__" in target:
class_name = target.pop("__class__")
if "__module__" in obj:
obj.pop("__module__")
# Use getattr(module, class_name) for custom types if needed
if class_name == "datetime":
return datetime(tzinfo=utc, **target)
if class_name == "StreamingBody":
return StringIO(target["body"])
# Return unrecognized structures as-is
return obj
def serialize(obj):
"""Convert objects into JSON structures."""
# Record class and module information for deserialization
result = {"__class__": obj.__class__.__name__}
try:
result["__module__"] = obj.__module__
except AttributeError:
pass
# Convert objects to dictionary representation based on type
if isinstance(obj, datetime):
result["year"] = obj.year
result["month"] = obj.month
result["day"] = obj.day
result["hour"] = obj.hour
result["minute"] = obj.minute
result["second"] = obj.second
result["microsecond"] = obj.microsecond
return result
if isinstance(obj, StreamingBody):
result["body"] = obj.read()
obj._raw_stream = StringIO(result["body"])
obj._amount_read = 0
return result
if isinstance(obj, bytes):
return obj.decode('utf8')
# Raise a TypeError if the object isn't recognized
raise TypeError("Type not serializable")
pill.FakeHttpResponse.raw = None
placebo.pill.serialize = serialize
placebo.pill.deserialize = deserialize
# END PLACEBO MONKEY
##########################################################################
class BluePill(pill.Pill):
def playback(self):
super(BluePill, self).playback()
self._avail = self.get_available()
def get_available(self):
return {
os.path.join(self.data_path, n)
for n in fnmatch.filter(os.listdir(self.data_path), "*.json")
}
def get_next_file_path(self, service, operation):
fn, format = super(BluePill, self).get_next_file_path(service, operation)
# couple of double use cases
if fn in self._avail:
self._avail.remove(fn)
else:
print("\ndouble use %s\n" % fn)
return (fn, format)
def stop(self):
result = super(BluePill, self).stop()
if self._avail:
print("Unused json files \n %s" % ("\n".join(sorted(self._avail))))
return result
class ZippedPill(pill.Pill):
def __init__(self, path, prefix=None, debug=False):
super(ZippedPill, self).__init__(prefix, debug)
self.path = path
self._used = set()
self.archive = None
def playback(self):
self.archive = zipfile.ZipFile(self.path, "r")
self._files = set(self.archive.namelist())
return super(ZippedPill, self).playback()
def record(self):
self.archive = zipfile.ZipFile(self.path, "a", zipfile.ZIP_DEFLATED)
self._files = set()
files = {n for n in self.archive.namelist() if n.startswith(self.prefix)}
if not files:
return super(ZippedPill, self).record()
# We can't update files in a zip, so copy
self.archive.close()
os.rename(self.path, "%s.tmp" % self.path)
src = zipfile.ZipFile("%s.tmp" % self.path, "r")
self.archive = zipfile.ZipFile(self.path, "w", zipfile.ZIP_DEFLATED)
for n in src.namelist():
if n in files:
continue
self.archive.writestr(n, src.read(n))
os.remove("%s.tmp" % self.path)
return super(ZippedPill, self).record()
def stop(self):
super(ZippedPill, self).stop()
if self.archive:
self.archive.close()
def save_response(self, service, operation, response_data, http_response=200):
filepath = self.get_new_file_path(service, operation)
pill.LOG.debug("save_response: path=%s", filepath)
json_data = {"status_code": http_response, "data": response_data}
self.archive.writestr(
filepath,
json.dumps(json_data, indent=4, default=pill.serialize),
zipfile.ZIP_DEFLATED,
)
self._files.add(filepath)
def load_response(self, service, operation):
response_file = self.get_next_file_path(service, operation)
self._used.add(response_file)
pill.LOG.debug("load_responses: %s", response_file)
response_data = json.loads(
self.archive.read(response_file), object_hook=pill.deserialize
)
return (
pill.FakeHttpResponse(response_data["status_code"]), response_data["data"]
)
def get_new_file_path(self, service, operation):
base_name = "{0}.{1}".format(service, operation)
if self.prefix:
base_name = "{0}.{1}".format(self.prefix, base_name)
pill.LOG.debug("get_new_file_path: %s", base_name)
index = 0
glob_pattern = os.path.join(self._data_path, base_name + "*")
for file_path in fnmatch.filter(self._files, glob_pattern):
file_name = os.path.basename(file_path)
m = self.filename_re.match(file_name)
if m:
i = int(m.group("index"))
if i > index:
index = i
index += 1
return os.path.join(self._data_path, "{0}_{1}.json".format(base_name, index))
def get_next_file_path(self, service, operation):
base_name = "{0}.{1}".format(service, operation)
if self.prefix:
base_name = "{0}.{1}".format(self.prefix, base_name)
pill.LOG.debug("get_next_file_path: %s", base_name)
next_file = None
while next_file is None:
index = self._index.setdefault(base_name, 1)
fn = os.path.join(self._data_path, base_name + "_{0}.json".format(index))
fn = fn.replace('\\', '/')
if fn in self._files:
next_file = fn
self._index[base_name] += 1
self._files.add(fn)
elif index != 1:
self._index[base_name] = 1
else:
# we are looking for the first index and it's not here
raise IOError("response file ({0}) not found".format(fn))
return fn
def attach(session, data_path, prefix=None, debug=False):
pill = ZippedPill(data_path, prefix=prefix, debug=debug)
pill.attach(session, prefix)
return pill
class RedPill(pill.Pill):
def datetime_converter(self, obj):
if isinstance(obj, datetime):
return obj.isoformat()
def save_response(self, service, operation, response_data,
http_response=200):
"""
Override to sanitize response metadata and account_ids
"""
# aws sso setups involve a short lived credential transfer
if service == "portal.sso":
return
if 'ResponseMetadata' in response_data:
response_data['ResponseMetadata'] = {}
response_data = json.dumps(response_data, default=serialize)
response_data = re.sub(r"\b\d{12}\b", ACCOUNT_ID, response_data) # noqa
response_data = json.loads(response_data, object_hook=deserialize)
super(RedPill, self).save_response(service, operation, response_data,
http_response)
class PillTest(CustodianTestCore):
archive_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "placebo_data.zip"
)
placebo_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "data", "placebo"
)
output_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "data", "output"
)
recording = False
def cleanUp(self):
self.pill = None
def record_flight_data(self, test_case, zdata=False, augment=False, region=None):
self.recording = True
test_dir = os.path.join(self.placebo_dir, test_case)
if not (zdata or augment):
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
os.makedirs(test_dir)
session = boto3.Session(region_name=region)
default_region = session.region_name
if not zdata:
pill = RedPill()
pill.attach(session, test_dir)
else:
pill = attach(session, self.archive_path, test_case, debug=True)
pill.record()
self.pill = pill
self.addCleanup(pill.stop)
self.addCleanup(self.cleanUp)
class FakeFactory:
def __call__(fake, region=None, assume=None):
new_session = None
# slightly experimental for test recording, using
# cross account assumes, note this will record sts
# assume role api calls creds into test data, they will
# go stale, but its best to modify before commiting.
# Disabled by default.
if 0 and (assume is not False and fake.assume_role):
client = session.client('sts')
creds = client.assume_role(
RoleArn=fake.assume_role,
RoleSessionName='CustodianTest')['Credentials']
new_session = boto3.Session(
aws_access_key_id=creds['AccessKeyId'],
aws_secret_access_key=creds['SecretAccessKey'],
aws_session_token=creds['SessionToken'],
region_name=region or fake.region or default_region)
elif region and region != default_region:
new_session = boto3.Session(region_name=region)
if new_session:
assert not zdata
new_pill = placebo.attach(new_session, test_dir, debug=True)
new_pill.record()
self.addCleanup(new_pill.stop)
return new_session
return session
return FakeFactory()
def replay_flight_data(self, test_case, zdata=False, region=None):
"""
The `region` argument is to allow functional tests to override the
default region. It is unused when replaying stored data.
"""
if strtobool(os.environ.get('C7N_FUNCTIONAL', 'no')):
self.recording = True
return lambda region=region, assume=None: boto3.Session(region_name=region)
if not zdata:
test_dir = os.path.join(self.placebo_dir, test_case)
if not os.path.exists(test_dir):
raise RuntimeError("Invalid Test Dir for flight data %s" % test_dir)
session = boto3.Session(region_name=region)
if not zdata:
pill = placebo.attach(session, test_dir)
# pill = BluePill()
# pill.attach(session, test_dir)
else:
pill = attach(session, self.archive_path, test_case, False)
pill.playback()
self.addCleanup(pill.stop)
self.addCleanup(self.cleanUp)
return lambda region=None, assume=None: session
| 1.6875 | 2 |
flexget/tests/test_next_series_seasons.py | metaMMA/Flexget | 0 | 2174 | <gh_stars>0
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import pytest
from flexget.entry import Entry
# TODO Add more standard tests
class TestNextSeriesSeasonSeasonsPack(object):
_config = """
templates:
global:
parsing:
series: internal
anchors:
_nss_backfill: &nss_backfill
next_series_seasons:
backfill: yes
_nss_from_start: &nss_from_start
next_series_seasons:
from_start: yes
_nss_backfill_from_start: &nss_backfill_from_start
next_series_seasons:
backfill: yes
from_start: yes
_series_ep_pack: &series_ep_pack
identified_by: ep
tracking: backfill
season_packs:
threshold: 1000
reject_eps: yes
_series_ep_tracking_pack: &series_ep_tracking_pack
identified_by: ep
tracking: backfill
season_packs:
threshold: 1000
reject_eps: yes
_series_ep_tracking_begin_s02e01: &series_ep_tracking_pack_begin_s02e01
identified_by: ep
tracking: backfill
begin: s02e01
season_packs:
threshold: 1000
reject_eps: yes
_series_ep_tracking_begin_s04e01: &series_ep_tracking_pack_begin_s04e01
identified_by: ep
tracking: backfill
begin: s04e01
season_packs:
threshold: 1000
reject_eps: yes
tasks:
inject_series:
series:
settings:
test_series:
season_packs: always
test_series:
- Test Series 1
- Test Series 2
- Test Series 3
- Test Series 4
- Test Series 5
- Test Series 6
- Test Series 7
- Test Series 8
- Test Series 9
- Test Series 10
- Test Series 11
- Test Series 12
- Test Series 13
- Test Series 14
- Test Series 15
- Test Series 16
- Test Series 17
- Test Series 18
- Test Series 19
- Test Series 20
- Test Series 21
- Test Series 22
- Test Series 23
- Test Series 24
- Test Series 25
- Test Series 50
- Test Series 100
test_next_series_seasons_season_pack:
next_series_seasons: yes
series:
- Test Series 1:
<<: *series_ep_pack
max_reruns: 0
test_next_series_seasons_season_pack_backfill:
<<: *nss_backfill
series:
- Test Series 2:
<<: *series_ep_tracking_pack
max_reruns: 0
test_next_series_seasons_season_pack_backfill_and_begin:
<<: *nss_backfill
series:
- Test Series 3:
<<: *series_ep_tracking_pack_begin_s02e01
max_reruns: 0
test_next_series_seasons_season_pack_from_start:
<<: *nss_from_start
series:
- Test Series 4:
<<: *series_ep_pack
max_reruns: 0
test_next_series_seasons_season_pack_from_start_backfill:
<<: *nss_backfill_from_start
series:
- Test Series 5:
<<: *series_ep_tracking_pack
max_reruns: 0
test_next_series_seasons_season_pack_from_start_backfill_and_begin:
<<: *nss_backfill_from_start
series:
- Test Series 6:
<<: *series_ep_tracking_pack_begin_s02e01
max_reruns: 0
test_next_series_seasons_season_pack_and_ep:
next_series_seasons: yes
series:
- Test Series 7:
<<: *series_ep_pack
max_reruns: 0
test_next_series_seasons_season_pack_and_ep_backfill:
<<: *nss_backfill
series:
- Test Series 8:
<<: *series_ep_tracking_pack
max_reruns: 0
test_next_series_seasons_season_pack_and_ep_backfill_and_begin:
<<: *nss_backfill
series:
- Test Series 9:
<<: *series_ep_tracking_pack_begin_s02e01
max_reruns: 0
test_next_series_seasons_season_pack_and_ep_from_start:
<<: *nss_from_start
series:
- Test Series 10:
<<: *series_ep_pack
max_reruns: 0
test_next_series_seasons_season_pack_and_ep_from_start_backfill:
<<: *nss_backfill_from_start
series:
- Test Series 11:
<<: *series_ep_tracking_pack
max_reruns: 0
test_next_series_seasons_season_pack_and_ep_from_start_backfill_and_begin:
<<: *nss_backfill_from_start
series:
- Test Series 12:
<<: *series_ep_tracking_pack_begin_s02e01
max_reruns: 0
test_next_series_seasons_season_pack_gap:
next_series_seasons: yes
series:
- Test Series 13:
<<: *series_ep_pack
max_reruns: 0
test_next_series_seasons_season_pack_gap_backfill:
<<: *nss_backfill
series:
- Test Series 14:
<<: *series_ep_tracking_pack
max_reruns: 0
test_next_series_seasons_season_pack_gap_backfill_and_begin:
<<: *nss_backfill
series:
- Test Series 15:
<<: *series_ep_tracking_pack_begin_s04e01
max_reruns: 0
test_next_series_seasons_season_pack_gap_from_start:
<<: *nss_from_start
series:
- Test Series 16:
<<: *series_ep_pack
max_reruns: 0
test_next_series_seasons_season_pack_gap_from_start_backfill:
<<: *nss_backfill_from_start
series:
- Test Series 17:
<<: *series_ep_tracking_pack
max_reruns: 0
test_next_series_seasons_season_pack_gap_from_start_backfill_and_begin:
<<: *nss_backfill_from_start
series:
- Test Series 18:
<<: *series_ep_tracking_pack_begin_s04e01
max_reruns: 0
test_next_series_seasons_season_pack_and_ep_gap:
next_series_seasons: yes
series:
- Test Series 19:
<<: *series_ep_pack
max_reruns: 0
test_next_series_seasons_season_pack_and_ep_gap_backfill:
<<: *nss_backfill
series:
- Test Series 20:
<<: *series_ep_tracking_pack
max_reruns: 0
test_next_series_seasons_season_pack_and_ep_gap_backfill_and_begin:
<<: *nss_backfill
series:
- Test Series 21:
<<: *series_ep_tracking_pack_begin_s04e01
max_reruns: 0
test_next_series_seasons_season_pack_and_ep_gap_from_start:
<<: *nss_from_start
series:
- Test Series 22:
<<: *series_ep_pack
max_reruns: 0
test_next_series_seasons_season_pack_and_ep_gap_from_start_backfill:
<<: *nss_backfill_from_start
series:
- Test Series 23:
<<: *series_ep_tracking_pack
max_reruns: 0
test_next_series_seasons_season_pack_and_ep_gap_from_start_backfill_and_begin:
<<: *nss_backfill_from_start
series:
- Test Series 24:
<<: *series_ep_tracking_pack_begin_s04e01
max_reruns: 0
test_next_series_seasons_season_pack_begin_completed:
next_series_seasons: yes
series:
- Test Series 50:
identified_by: ep
begin: S02E01
season_packs:
threshold: 1000
reject_eps: yes
max_reruns: 0
test_next_series_seasons_season_pack_from_start_multirun:
next_series_seasons:
from_start: yes
series:
- Test Series 100:
<<: *series_ep_pack
max_reruns: 0
"""
@pytest.fixture()
def config(self):
"""Season packs aren't supported by guessit yet."""
return self._config
def inject_series(self, execute_task, release_name):
execute_task(
'inject_series',
options={'inject': [Entry(title=release_name, url='')], 'disable_tracking': True},
)
@pytest.mark.parametrize(
"task_name,inject,result_find",
[
('test_next_series_seasons_season_pack', ['Test Series 1 S02'], ['Test Series 1 S03']),
(
'test_next_series_seasons_season_pack_backfill',
['Test Series 2 S02'],
['Test Series 2 S01', 'Test Series 2 S03'],
),
(
'test_next_series_seasons_season_pack_backfill_and_begin',
['Test Series 3 S02'],
['Test Series 3 S03'],
),
(
'test_next_series_seasons_season_pack_from_start',
['Test Series 4 S02'],
['Test Series 4 S03'],
),
(
'test_next_series_seasons_season_pack_from_start_backfill',
['Test Series 5 S02'],
['Test Series 5 S03', 'Test Series 5 S01'],
),
(
'test_next_series_seasons_season_pack_from_start_backfill_and_begin',
['Test Series 6 S02'],
['Test Series 6 S03'],
),
(
'test_next_series_seasons_season_pack_and_ep',
['Test Series 7 S02', 'Test Series 7 S03E01'],
['Test Series 7 S03'],
),
(
'test_next_series_seasons_season_pack_and_ep_backfill',
['Test Series 8 S02', 'Test Series 8 S03E01'],
['Test Series 8 S01', 'Test Series 8 S03'],
),
(
'test_next_series_seasons_season_pack_and_ep_backfill_and_begin',
['Test Series 9 S02', 'Test Series 9 S03E01'],
['Test Series 9 S03'],
),
(
'test_next_series_seasons_season_pack_and_ep_from_start',
['Test Series 10 S02', 'Test Series 10 S03E01'],
['Test Series 10 S03'],
),
(
'test_next_series_seasons_season_pack_and_ep_from_start_backfill',
['Test Series 11 S02', 'Test Series 11 S03E01'],
['Test Series 11 S03', 'Test Series 11 S01'],
),
(
'test_next_series_seasons_season_pack_and_ep_from_start_backfill_and_begin',
['Test Series 12 S02', 'Test Series 12 S03E01'],
['Test Series 12 S03'],
),
(
'test_next_series_seasons_season_pack_gap',
['Test Series 13 S02', 'Test Series 13 S06'],
['Test Series 13 S07'],
),
(
'test_next_series_seasons_season_pack_gap_backfill',
['Test Series 14 S02', 'Test Series 14 S06'],
[
'Test Series 14 S07',
'Test Series 14 S05',
'Test Series 14 S04',
'Test Series 14 S03',
'Test Series 14 S01',
],
),
(
'test_next_series_seasons_season_pack_gap_backfill_and_begin',
['Test Series 15 S02', 'Test Series 15 S06'],
['Test Series 15 S07', 'Test Series 15 S05', 'Test Series 15 S04'],
),
(
'test_next_series_seasons_season_pack_gap_from_start',
['Test Series 16 S02', 'Test Series 16 S06'],
['Test Series 16 S07'],
),
(
'test_next_series_seasons_season_pack_gap_from_start_backfill',
['Test Series 17 S02', 'Test Series 17 S06'],
[
'Test Series 17 S07',
'Test Series 17 S05',
'Test Series 17 S04',
'Test Series 17 S03',
'Test Series 17 S01',
],
),
(
'test_next_series_seasons_season_pack_gap_from_start_backfill_and_begin',
['Test Series 18 S02', 'Test Series 18 S06'],
['Test Series 18 S07', 'Test Series 18 S05', 'Test Series 18 S04'],
),
(
'test_next_series_seasons_season_pack_and_ep_gap',
['Test Series 19 S02', 'Test Series 19 S06', 'Test Series 19 S07E01'],
['Test Series 19 S07'],
),
(
'test_next_series_seasons_season_pack_and_ep_gap_backfill',
['Test Series 20 S02', 'Test Series 20 S06', 'Test Series 20 S07E01'],
[
'Test Series 20 S07',
'Test Series 20 S05',
'Test Series 20 S04',
'Test Series 20 S03',
'Test Series 20 S01',
],
),
(
'test_next_series_seasons_season_pack_and_ep_gap_backfill_and_begin',
['Test Series 21 S02', 'Test Series 21 S06', 'Test Series 21 S07E01'],
['Test Series 21 S07', 'Test Series 21 S05', 'Test Series 21 S04'],
),
(
'test_next_series_seasons_season_pack_and_ep_gap_from_start',
['Test Series 22 S02', 'Test Series 22 S03E01', 'Test Series 22 S06'],
['Test Series 22 S07'],
),
(
'test_next_series_seasons_season_pack_and_ep_gap_from_start_backfill',
['Test Series 23 S02', 'Test Series 23 S03E01', 'Test Series 23 S06'],
[
'Test Series 23 S07',
'Test Series 23 S05',
'Test Series 23 S04',
'Test Series 23 S03',
'Test Series 23 S01',
],
),
(
'test_next_series_seasons_season_pack_and_ep_gap_from_start_backfill_and_begin',
['Test Series 24 S02', 'Test Series 24 S03E01', 'Test Series 24 S06'],
['Test Series 24 S07', 'Test Series 24 S05', 'Test Series 24 S04'],
),
(
'test_next_series_seasons_season_pack_begin_completed',
['Test Series 50 S02'],
['Test Series 50 S03'],
),
],
)
def test_next_series_seasons(self, execute_task, task_name, inject, result_find):
for entity_id in inject:
self.inject_series(execute_task, entity_id)
task = execute_task(task_name)
for result_title in result_find:
assert task.find_entry(title=result_title)
assert len(task.all_entries) == len(result_find)
# Tests which require multiple tasks to be executed in order
# Each run_parameter is a tuple of lists: [task name, list of series ID(s) to inject, list of result(s) to find]
@pytest.mark.parametrize(
"run_parameters",
[
(
[
'test_next_series_seasons_season_pack_from_start_multirun',
[],
['Test Series 100 S01'],
],
[
'test_next_series_seasons_season_pack_from_start_multirun',
[],
['Test Series 100 S02'],
],
)
],
)
def test_next_series_seasons_multirun(self, execute_task, run_parameters):
for this_test in run_parameters:
for entity_id in this_test[1]:
self.inject_series(execute_task, entity_id)
task = execute_task(this_test[0])
for result_title in this_test[2]:
assert task.find_entry(title=result_title)
assert len(task.all_entries) == len(this_test[2])
| 1.78125 | 2 |
pymatgen/analysis/wulff.py | hpatel1567/pymatgen | 1 | 2175 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module define a WulffShape class to generate the Wulff shape from
a lattice, a list of indices and their corresponding surface energies,
and the total area and volume of the wulff shape,the weighted surface energy,
the anisotropy and shape_factor can also be calculated.
In support of plotting from a given view in terms of miller index.
The lattice is from the conventional unit cell, and (hkil) for hexagonal
lattices.
If you use this code extensively, consider citing the following:
<NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>.
(2016). Surface energies of elemental crystals. Scientific Data.
"""
from pymatgen.core.structure import Structure
from pymatgen.util.coord import get_angle
import numpy as np
import scipy as sp
from scipy.spatial import ConvexHull
import logging
import warnings
__author__ = '<NAME>, <NAME>, <NAME>'
__copyright__ = 'Copyright 2013, The Materials Virtual Lab'
__version__ = '0.1'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__date__ = 'May 5 2016'
logger = logging.getLogger(__name__)
def hkl_tuple_to_str(hkl):
"""
Prepare for display on plots
"(hkl)" for surfaces
Agrs:
hkl: in the form of [h, k, l] or (h, k, l)
"""
str_format = '($'
for x in hkl:
if x < 0:
str_format += '\\overline{' + str(-x) + '}'
else:
str_format += str(x)
str_format += '$)'
return str_format
def get_tri_area(pts):
"""
Given a list of coords for 3 points,
Compute the area of this triangle.
Args:
pts: [a, b, c] three points
"""
a, b, c = pts[0], pts[1], pts[2]
v1 = np.array(b) - np.array(a)
v2 = np.array(c) - np.array(a)
area_tri = abs(sp.linalg.norm(sp.cross(v1, v2)) / 2)
return area_tri
class WulffFacet:
"""
Helper container for each Wulff plane.
"""
def __init__(self, normal, e_surf, normal_pt, dual_pt, index, m_ind_orig,
miller):
"""
:param normal:
:param e_surf:
:param normal_pt:
:param dual_pt:
:param index:
:param m_ind_orig:
:param miller:
"""
self.normal = normal
self.e_surf = e_surf
self.normal_pt = normal_pt
self.dual_pt = dual_pt
self.index = index
self.m_ind_orig = m_ind_orig
self.miller = miller
self.points = []
self.outer_lines = []
class WulffShape:
"""
Generate Wulff Shape from list of miller index and surface energies,
with given conventional unit cell.
surface energy (Jm^2) is the length of normal.
Wulff shape is the convex hull.
Based on:
http://scipy.github.io/devdocs/generated/scipy.spatial.ConvexHull.html
Process:
1. get wulff simplices
2. label with color
3. get wulff_area and other properties
.. attribute:: debug (bool)
.. attribute:: alpha
transparency
.. attribute:: color_set
.. attribute:: grid_off (bool)
.. attribute:: axis_off (bool)
.. attribute:: show_area
.. attribute:: off_color
color of facets off wulff
.. attribute:: structure
Structure object, input conventional unit cell (with H ) from lattice
.. attribute:: miller_list
list of input miller index, for hcp in the form of hkil
.. attribute:: hkl_list
modify hkill to hkl, in the same order with input_miller
.. attribute:: e_surf_list
list of input surface energies, in the same order with input_miller
.. attribute:: lattice
Lattice object, the input lattice for the conventional unit cell
.. attribute:: facets
[WulffFacet] for all facets considering symm
.. attribute:: dual_cv_simp
simplices from the dual convex hull (dual_pt)
.. attribute:: wulff_pt_list
.. attribute:: wulff_cv_simp
simplices from the convex hull of wulff_pt_list
.. attribute:: on_wulff
list for all input_miller, True is on wulff.
.. attribute:: color_area
list for all input_miller, total area on wulff, off_wulff = 0.
.. attribute:: miller_area
($hkl$): area for all input_miller
"""
def __init__(self, lattice, miller_list, e_surf_list, symprec=1e-5):
"""
Args:
lattice: Lattice object of the conventional unit cell
miller_list ([(hkl), ...]: list of hkl or hkil for hcp
e_surf_list ([float]): list of corresponding surface energies
symprec (float): for recp_operation, default is 1e-5.
"""
if any([se < 0 for se in e_surf_list]):
warnings.warn("Unphysical (negative) surface energy detected.")
self.color_ind = list(range(len(miller_list)))
self.input_miller_fig = [hkl_tuple_to_str(x) for x in miller_list]
# store input data
self.structure = Structure(lattice, ["H"], [[0, 0, 0]])
self.miller_list = tuple([tuple(x) for x in miller_list])
self.hkl_list = tuple([(x[0], x[1], x[-1]) for x in miller_list])
self.e_surf_list = tuple(e_surf_list)
self.lattice = lattice
self.symprec = symprec
# 2. get all the data for wulff construction
# get all the surface normal from get_all_miller_e()
self.facets = self._get_all_miller_e()
logger.debug(len(self.facets))
# 3. consider the dual condition
dual_pts = [x.dual_pt for x in self.facets]
dual_convex = ConvexHull(dual_pts)
dual_cv_simp = dual_convex.simplices
# simplices (ndarray of ints, shape (nfacet, ndim))
# list of [i, j, k] , ndim = 3
# i, j, k: ind for normal_e_m
# recalculate the dual of dual, get the wulff shape.
# conner <-> surface
# get cross point from the simplices of the dual convex hull
wulff_pt_list = [self._get_cross_pt_dual_simp(dual_simp)
for dual_simp in dual_cv_simp]
wulff_convex = ConvexHull(wulff_pt_list)
wulff_cv_simp = wulff_convex.simplices
logger.debug(", ".join([str(len(x)) for x in wulff_cv_simp]))
# store simplices and convex
self.dual_cv_simp = dual_cv_simp
self.wulff_pt_list = wulff_pt_list
self.wulff_cv_simp = wulff_cv_simp
self.wulff_convex = wulff_convex
self.on_wulff, self.color_area = self._get_simpx_plane()
miller_area = []
for m, in_mill_fig in enumerate(self.input_miller_fig):
miller_area.append(
in_mill_fig + ' : ' + str(round(self.color_area[m], 4)))
self.miller_area = miller_area
def _get_all_miller_e(self):
"""
from self:
get miller_list(unique_miller), e_surf_list and symmetry
operations(symmops) according to lattice
apply symmops to get all the miller index, then get normal,
get all the facets functions for wulff shape calculation:
|normal| = 1, e_surf is plane's distance to (0, 0, 0),
normal[0]x + normal[1]y + normal[2]z = e_surf
return:
[WulffFacet]
"""
all_hkl = []
color_ind = self.color_ind
planes = []
recp = self.structure.lattice.reciprocal_lattice_crystallographic
recp_symmops = self.lattice.get_recp_symmetry_operation(self.symprec)
for i, (hkl, energy) in enumerate(zip(self.hkl_list,
self.e_surf_list)):
for op in recp_symmops:
miller = tuple([int(x) for x in op.operate(hkl)])
if miller not in all_hkl:
all_hkl.append(miller)
normal = recp.get_cartesian_coords(miller)
normal /= sp.linalg.norm(normal)
normal_pt = [x * energy for x in normal]
dual_pt = [x / energy for x in normal]
color_plane = color_ind[divmod(i, len(color_ind))[1]]
planes.append(WulffFacet(normal, energy, normal_pt,
dual_pt, color_plane, i, hkl))
# sort by e_surf
planes.sort(key=lambda x: x.e_surf)
return planes
def _get_cross_pt_dual_simp(self, dual_simp):
"""
|normal| = 1, e_surf is plane's distance to (0, 0, 0),
plane function:
normal[0]x + normal[1]y + normal[2]z = e_surf
from self:
normal_e_m to get the plane functions
dual_simp: (i, j, k) simplices from the dual convex hull
i, j, k: plane index(same order in normal_e_m)
"""
matrix_surfs = [self.facets[dual_simp[i]].normal for i in range(3)]
matrix_e = [self.facets[dual_simp[i]].e_surf for i in range(3)]
cross_pt = sp.dot(sp.linalg.inv(matrix_surfs), matrix_e)
return cross_pt
def _get_simpx_plane(self):
"""
Locate the plane for simpx of on wulff_cv, by comparing the center of
the simpx triangle with the plane functions.
"""
on_wulff = [False] * len(self.miller_list)
surface_area = [0.0] * len(self.miller_list)
for simpx in self.wulff_cv_simp:
pts = [self.wulff_pt_list[simpx[i]] for i in range(3)]
center = np.sum(pts, 0) / 3.0
# check whether the center of the simplices is on one plane
for plane in self.facets:
abs_diff = abs(np.dot(plane.normal, center) - plane.e_surf)
if abs_diff < 1e-5:
on_wulff[plane.index] = True
surface_area[plane.index] += get_tri_area(pts)
plane.points.append(pts)
plane.outer_lines.append([simpx[0], simpx[1]])
plane.outer_lines.append([simpx[1], simpx[2]])
plane.outer_lines.append([simpx[0], simpx[2]])
# already find the plane, move to the next simplices
break
for plane in self.facets:
plane.outer_lines.sort()
plane.outer_lines = [line for line in plane.outer_lines
if plane.outer_lines.count(line) != 2]
return on_wulff, surface_area
def _get_colors(self, color_set, alpha, off_color, custom_colors={}):
"""
assign colors according to the surface energies of on_wulff facets.
return:
(color_list, color_proxy, color_proxy_on_wulff, miller_on_wulff,
e_surf_on_wulff_list)
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
color_list = [off_color] * len(self.hkl_list)
color_proxy_on_wulff = []
miller_on_wulff = []
e_surf_on_wulff = [(i, e_surf)
for i, e_surf in enumerate(self.e_surf_list)
if self.on_wulff[i]]
c_map = plt.get_cmap(color_set)
e_surf_on_wulff.sort(key=lambda x: x[1], reverse=False)
e_surf_on_wulff_list = [x[1] for x in e_surf_on_wulff]
if len(e_surf_on_wulff) > 1:
cnorm = mpl.colors.Normalize(vmin=min(e_surf_on_wulff_list),
vmax=max(e_surf_on_wulff_list))
else:
# if there is only one hkl on wulff, choose the color of the median
cnorm = mpl.colors.Normalize(vmin=min(e_surf_on_wulff_list) - 0.1,
vmax=max(e_surf_on_wulff_list) + 0.1)
scalar_map = mpl.cm.ScalarMappable(norm=cnorm, cmap=c_map)
for i, e_surf in e_surf_on_wulff:
color_list[i] = scalar_map.to_rgba(e_surf, alpha=alpha)
if tuple(self.miller_list[i]) in custom_colors.keys():
color_list[i] = custom_colors[tuple(self.miller_list[i])]
color_proxy_on_wulff.append(
plt.Rectangle((2, 2), 1, 1, fc=color_list[i], alpha=alpha))
miller_on_wulff.append(self.input_miller_fig[i])
scalar_map.set_array([x[1] for x in e_surf_on_wulff])
color_proxy = [plt.Rectangle((2, 2), 1, 1, fc=x, alpha=alpha)
for x in color_list]
return color_list, color_proxy, color_proxy_on_wulff, miller_on_wulff, e_surf_on_wulff_list
def show(self, *args, **kwargs):
r"""
Show the Wulff plot.
Args:
*args: Passed to get_plot.
**kwargs: Passed to get_plot.
"""
self.get_plot(*args, **kwargs).show()
def get_line_in_facet(self, facet):
"""
Returns the sorted pts in a facet used to draw a line
"""
lines = list(facet.outer_lines)
pt = []
prev = None
while len(lines) > 0:
if prev is None:
l = lines.pop(0)
else:
for i, l in enumerate(lines):
if prev in l:
l = lines.pop(i)
if l[1] == prev:
l.reverse()
break
# make sure the lines are connected one by one.
# find the way covering all pts and facets
pt.append(self.wulff_pt_list[l[0]].tolist())
pt.append(self.wulff_pt_list[l[1]].tolist())
prev = l[1]
return pt
def get_plot(self, color_set='PuBu', grid_off=True, axis_off=True,
show_area=False, alpha=1, off_color='red', direction=None,
bar_pos=(0.75, 0.15, 0.05, 0.65), bar_on=False, units_in_JPERM2=True,
legend_on=True, aspect_ratio=(8, 8), custom_colors={}):
"""
Get the Wulff shape plot.
Args:
color_set: default is 'PuBu'
grid_off (bool): default is True
axis_off (bool): default is Ture
show_area (bool): default is False
alpha (float): chosen from 0 to 1 (float), default is 1
off_color: Default color for facets not present on the Wulff shape.
direction: default is (1, 1, 1)
bar_pos: default is [0.75, 0.15, 0.05, 0.65]
bar_on (bool): default is False
legend_on (bool): default is True
aspect_ratio: default is (8, 8)
custom_colors ({(h,k,l}: [r,g,b,alpha}): Customize color of each
facet with a dictionary. The key is the corresponding Miller
index and value is the color. Undefined facets will use default
color site. Note: If you decide to set your own colors, it
probably won't make any sense to have the color bar on.
Return:
(matplotlib.pyplot)
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d as mpl3
color_list, color_proxy, color_proxy_on_wulff, miller_on_wulff, e_surf_on_wulff = self._get_colors(
color_set, alpha, off_color, custom_colors=custom_colors)
if not direction:
# If direction is not specified, use the miller indices of
# maximum area.
direction = max(self.area_fraction_dict.items(),
key=lambda x: x[1])[0]
fig = plt.figure()
fig.set_size_inches(aspect_ratio[0], aspect_ratio[1])
azim, elev = self._get_azimuth_elev([direction[0], direction[1],
direction[-1]])
wulff_pt_list = self.wulff_pt_list
ax = mpl3.Axes3D(fig, azim=azim, elev=elev)
for plane in self.facets:
# check whether [pts] is empty
if len(plane.points) < 1:
# empty, plane is not on_wulff.
continue
# assign the color for on_wulff facets according to its
# index and the color_list for on_wulff
plane_color = color_list[plane.index]
pt = self.get_line_in_facet(plane)
# plot from the sorted pts from [simpx]
tri = mpl3.art3d.Poly3DCollection([pt])
tri.set_color(plane_color)
tri.set_edgecolor("#808080")
ax.add_collection3d(tri)
# set ranges of x, y, z
# find the largest distance between on_wulff pts and the origin,
# to ensure complete and consistent display for all directions
r_range = max([np.linalg.norm(x) for x in wulff_pt_list])
ax.set_xlim([-r_range * 1.1, r_range * 1.1])
ax.set_ylim([-r_range * 1.1, r_range * 1.1])
ax.set_zlim([-r_range * 1.1, r_range * 1.1])
# add legend
if legend_on:
color_proxy = color_proxy
if show_area:
ax.legend(color_proxy, self.miller_area, loc='upper left',
bbox_to_anchor=(0, 1), fancybox=True, shadow=False)
else:
ax.legend(color_proxy_on_wulff, miller_on_wulff,
loc='upper center',
bbox_to_anchor=(0.5, 1), ncol=3, fancybox=True,
shadow=False)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
# Add colorbar
if bar_on:
cmap = plt.get_cmap(color_set)
cmap.set_over('0.25')
cmap.set_under('0.75')
bounds = [round(e, 2) for e in e_surf_on_wulff]
bounds.append(1.2 * bounds[-1])
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
# display surface energies
ax1 = fig.add_axes(bar_pos)
cbar = mpl.colorbar.ColorbarBase(
ax1, cmap=cmap, norm=norm, boundaries=[0] + bounds + [10],
extend='both', ticks=bounds[:-1], spacing='proportional',
orientation='vertical')
units = "$J/m^2$" if units_in_JPERM2 else r"$eV/\AA^2$"
cbar.set_label('Surface Energies (%s)' % (units), fontsize=100)
if grid_off:
ax.grid('off')
if axis_off:
ax.axis('off')
return plt
def _get_azimuth_elev(self, miller_index):
"""
Args:
miller_index: viewing direction
Returns:
azim, elev for plotting
"""
if miller_index == (0, 0, 1) or miller_index == (0, 0, 0, 1):
return 0, 90
else:
cart = self.lattice.get_cartesian_coords(miller_index)
azim = get_angle([cart[0], cart[1], 0], (1, 0, 0))
v = [cart[0], cart[1], 0]
elev = get_angle(cart, v)
return azim, elev
@property
def volume(self):
"""
Volume of the Wulff shape
"""
return self.wulff_convex.volume
@property
def miller_area_dict(self):
"""
Returns {hkl: area_hkl on wulff}
"""
return dict(zip(self.miller_list, self.color_area))
@property
def miller_energy_dict(self):
"""
Returns {hkl: surface energy_hkl}
"""
return dict(zip(self.miller_list, self.e_surf_list))
@property
def surface_area(self):
"""
Total surface area of Wulff shape.
"""
return sum(self.miller_area_dict.values())
@property
def weighted_surface_energy(self):
"""
Returns:
sum(surface_energy_hkl * area_hkl)/ sum(area_hkl)
"""
return self.total_surface_energy / self.surface_area
@property
def area_fraction_dict(self):
"""
Returns:
(dict): {hkl: area_hkl/total area on wulff}
"""
return {hkl: self.miller_area_dict[hkl] / self.surface_area
for hkl in self.miller_area_dict.keys()}
@property
def anisotropy(self):
"""
Returns:
(float) Coefficient of Variation from weighted surface energy
The ideal sphere is 0.
"""
square_diff_energy = 0
weighted_energy = self.weighted_surface_energy
area_frac_dict = self.area_fraction_dict
miller_energy_dict = self.miller_energy_dict
for hkl in miller_energy_dict.keys():
square_diff_energy += (miller_energy_dict[hkl] - weighted_energy) \
** 2 * area_frac_dict[hkl]
return np.sqrt(square_diff_energy) / weighted_energy
@property
def shape_factor(self):
"""
This is useful for determining the critical nucleus size.
A large shape factor indicates great anisotropy.
See <NAME>., <NAME>. & <NAME>. Kinetics
of Materials. (<NAME>, 2005), p.461
Returns:
(float) Shape factor.
"""
return self.surface_area / (self.volume ** (2 / 3))
@property
def effective_radius(self):
"""
Radius of the Wulffshape when the
Wulffshape is approximated as a sphere.
Returns:
(float) radius.
"""
return ((3 / 4) * (self.volume / np.pi)) ** (1 / 3)
@property
def total_surface_energy(self):
"""
Total surface energy of the Wulff shape.
Returns:
(float) sum(surface_energy_hkl * area_hkl)
"""
tot_surface_energy = 0
for hkl in self.miller_energy_dict.keys():
tot_surface_energy += self.miller_energy_dict[hkl] * \
self.miller_area_dict[hkl]
return tot_surface_energy
@property
def tot_corner_sites(self):
"""
Returns the number of vertices in the convex hull.
Useful for identifying catalytically active sites.
"""
return len(self.wulff_convex.vertices)
@property
def tot_edges(self):
"""
Returns the number of edges in the convex hull.
Useful for identifying catalytically active sites.
"""
all_edges = []
for facet in self.facets:
edges = []
pt = self.get_line_in_facet(facet)
lines = []
for i, p in enumerate(pt):
if i == len(pt) / 2:
break
lines.append(tuple(sorted(tuple([tuple(pt[i * 2]), tuple(pt[i * 2 + 1])]))))
for i, p in enumerate(lines):
if p not in all_edges:
edges.append(p)
all_edges.extend(edges)
return len(all_edges)
| 3.28125 | 3 |
ccg/supertagger/any2int.py | stanojevic/ccgtools | 0 | 2176 |
class Any2Int:
def __init__(self, min_count: int, include_UNK: bool, include_PAD: bool):
self.min_count = min_count
self.include_UNK = include_UNK
self.include_PAD = include_PAD
self.frozen = False
self.UNK_i = -1
self.UNK_s = "<UNK>"
self.PAD_i = -2
self.PAD_s = "<PAD>"
self.voc_size = 0
self._s2i = dict()
self._i2s = []
self.frequency = dict()
def iter_item(self):
return enumerate(self._i2s)
def get_s2i(self, s, default: int):
assert self.frozen
i = self._s2i.get(s, -1)
if i >= 0:
return i
elif self.include_UNK:
return self.UNK_i
else:
return default
def __getitem__(self, s):
return self.s2i(s)
def s2i(self, s):
i = self.get_s2i(s, -1)
if i >= 0:
return i
else:
raise Exception(f"out of vocabulary entry {s}")
def contains(self, s):
return self.get_s2i(s, -1) != -1
def i2s(self, i):
assert self.frozen
if 0 <= i < self.voc_size:
return self._i2s[i]
else:
raise Exception(f"not entry at position {i} for a vocabulary of size {self.voc_size}")
def add_to_counts(self, s):
assert not self.frozen
self.frequency[s] = self.frequency.get(s, 0)+1
def freeze(self):
assert not self.frozen
if self.include_UNK:
self.UNK_i = len(self._i2s)
self._i2s.append(self.UNK_s)
if self.include_PAD:
self.PAD_i = len(self._i2s)
self._i2s.append(self.PAD_s)
for s, count in sorted(self.frequency.items(), key=lambda x: -x[1]):
if count >= self.min_count:
self._i2s.append(s)
for i, s in enumerate(self._i2s):
self._s2i[s] = i
self.voc_size = len(self._i2s)
self.frozen = True
def __reduce__(self):
return Any2Int, (2, self.include_UNK, self.include_PAD), (self.min_count, self.include_UNK, self.frozen,
self.UNK_i, self.UNK_s, self.PAD_i, self.PAD_s,
self.voc_size, self._s2i, self._i2s, self.frequency)
def __setstate__(self, state):
self.min_count = state[0]
self.include_UNK = state[1]
self.frozen = state[2]
self.UNK_i = state[3]
self.UNK_s = state[4]
self.PAD_i = state[5]
self.PAD_s = state[6]
self.voc_size = state[7]
self._s2i = state[8]
self._i2s = state[9]
self.frequency = state[10]
| 3.171875 | 3 |
app/sensor.py | sosprz/nettemp | 51 | 2177 | <gh_stars>10-100
from app import app
from flask import Flask, request, jsonify, g
import sqlite3
import os
import json
from random import randint
from flask_jwt_extended import jwt_required
import datetime
from flask_mysqldb import MySQL
mysql = MySQL()
def get_db(rom):
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(rom)
return db
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
def check_value(value, type, rom):
adj=''
tm=''
value=float(value)
m = mysql.connection.cursor()
sql = "SELECT adj, tmp FROM sensors WHERE rom=%s"
m.execute(sql, [rom])
sensor=m.fetchall()
for adj, tmp in sensor:
tmp=float(tmp)
adj=float(adj)
msg=[]
sql = "SELECT min, max, value1, value2, value3 FROM types WHERE type=%s"
m.execute(sql, [type])
list=m.fetchall()
msg.append("IN VALUE: %f" % value)
msg.append(list)
m.close()
if adj:
value=float(value)+(adj)
msg.append("ADJ: %d" % value)
for min, max, v1, v2, v3 in list:
if (value>=float(min)) and (value<=float(max)):
if(value==v1) or (value==v2) or (value==v3):
msg.append("filter 2 back to previous %f" % tmp)
value=tmp
else:
value=float(value)
else:
msg.append("filter 1 back to previous %f" % tmp)
value=tmp
msg.append("VALUE OUT: %f" % value)
print(msg)
return value
def new_db(rom):
rom = rom+'.sql'
conn = sqlite3.connect(app.romdir+rom)
c = conn.cursor()
sql = "SELECT count() FROM sqlite_master WHERE type='table' AND name='def'"
c.execute(sql)
if c.fetchone()[0]==1:
print ("Database %s exists" %rom)
return True
else:
with app.app_context():
db = get_db(app.romdir+rom)
with app.open_resource('schema/sensors_db_schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
print ("Database %s created" %rom)
return False
def insert_db(rom,value):
rom = rom+'.sql'
conn = sqlite3.connect(app.romdir+rom)
c = conn.cursor()
sql = "SELECT count() FROM sqlite_master WHERE type='table' AND name='def'"
c.execute(sql)
if c.fetchone()[0]==1:
data = [value]
sql = "INSERT OR IGNORE INTO def (value) VALUES (?)"
c.execute(sql, data)
conn.commit()
conn.close()
print ("[ nettemp ][ sensor ] Database %s insert ok" %rom)
return True
else:
print ("[ nettemp ][ sensor ] Database %s not exist" %rom)
return False
def update_sensor_tmp(rom,value):
m = mysql.connection.cursor()
rom1 = [rom]
sql="SELECT count(*) FROM sensors WHERE rom=%s"
m.execute(sql, rom1)
coun=m.fetchone()
if coun[0]==1:
if int(datetime.datetime.now().strftime("%M"))%5==0:
tmp_5ago=value
sql = "UPDATE sensors SET tmp=%s, tmp_5ago=%s, nodata='', time=CURRENT_TIMESTAMP() WHERE rom=%s"
data = [value,tmp_5ago,rom]
else:
sql = "UPDATE sensors SET tmp=%s, nodata='', time=CURRENT_TIMESTAMP() WHERE rom=%s"
data = [value,rom]
m.execute(sql, data)
# stat min max
data = [value, value, rom]
sql = "UPDATE sensors SET stat_min=%s, stat_min_time=CURRENT_TIMESTAMP() WHERE (stat_min>%s OR stat_min is null OR stat_min='0.0') AND rom=%s"
m.execute(sql, data)
sql = "UPDATE sensors SET stat_max=%s, stat_max_time=CURRENT_TIMESTAMP() WHERE (stat_max<%s OR stat_max is null OR stat_max='0.0') AND rom=%s"
m.execute(sql, data)
m.connection.commit()
m.close()
print ("[ nettemp ][ sensor ] Sensor %s updated" %rom)
return True
else:
print ("[ nettemp ][ sensor ] Sensor %s not exist" %rom)
return False
def delete_db(rom):
rom=rom+'.sql'
if os.path.isfile(app.romdir+rom):
os.remove(rom)
print ("[ nettemp ][ sensor ] Database %s deleted" %rom)
return True
else:
print ("[ nettemp ][ sensor ] Database %s not exist" %rom)
return False
def delete_sensor(id,rom):
data = [id, rom]
m = mysql.connection.cursor()
sql="DELETE FROM sensors WHERE id=? AND rom=%s"
m.execute(sql, data)
m.connection.commit()
m.close()
delete_db(rom)
print ("[ nettemp ][ sensor ] Sensor %s removed ok" %rom)
def create_sensor(rom, data, data2, map_settings):
m = mysql.connection.cursor()
rom1 = [rom]
sql = "SELECT count(*) FROM sensors WHERE rom=%s"
m.execute(sql, rom1)
coun = m.fetchone()
if coun[0]==0:
sql = "INSERT INTO sensors (rom,type,device,ip,gpio,i2c,usb,name) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)"
m.execute(sql, data)
sql2 = "UPDATE sensors SET alarm='off', adj='0', charts='on', status='on', ch_group=%s, tmp_min='0', tmp_max='0', minmax='off', stat_min='0', stat_max='0', tmp_5ago='0', fiveago='on', map_id=%s, nodata_time='5', email_delay='10' WHERE rom=%s"
m.execute(sql2, data2)
map = "INSERT INTO maps (type, pos_x, pos_y, map_on, map_id, display_name) VALUES (%s, %s, %s, %s, %s, %s)"
m.execute(map, map_settings)
m.connection.commit()
m.close()
print ("[ nettemp ][ sensor ] Sensor %s added ok" %rom)
else:
print ("[ nettemp ][ sensor ] Sensor %s already exist" %rom)
return None
def sensor():
data = request.get_json()
for j in data:
rom = None
if 'rom' in j:
rom=j['rom']
type = None
if 'type' in j:
type=j['type']
device = None
if 'device' in j:
device=j['device']
ip = None
if 'ip' in j:
ip = j['ip']
gpio = None
if 'gpio' in j:
gpio=j['gpio']
i2c = None
if 'i2c' in j:
i2c=j['i2c']
usb = None
if 'usb' in j:
usb=j['usb']
name = randint(1000,9000)
if 'name' in j:
name=j['name']
if not j['name']:
name = randint(1000,9000)
tmp = None
if 'tmp' in j:
tmp=j['tmp']
value = None
if 'value' in j:
value=j['value']
group = type
if 'group' in j:
group=j['group']
map_id = randint(1000,9000)
map_y = randint(50,600)
map_x = randint(50,600)
data = [rom, type, device, ip, gpio, i2c, usb, name]
data2 = [group, map_id, rom]
map_settings = [type, map_y, map_x, 'on', map_id, 'on']
value=check_value(value, type, rom)
if insert_db(rom, value) == False:
new_db(rom)
insert_db(rom,value)
if update_sensor_tmp(rom,value) == False:
create_sensor(rom,data,data2,map_settings)
update_sensor_tmp(rom,value)
@app.route('/sensor', methods=['POST'])
@jwt_required
def url_sensor():
sensor()
return '', 200
@app.route('/local', methods=['POST'])
def url_localhost():
if request.remote_addr == '127.0.0.1':
sensor()
return 'Local'
else:
return '', 404
| 2.625 | 3 |
tests/either_catch_test.py | funnel-io/python-on-rails | 1 | 2178 | from python_on_rails.either import as_either, Failure, Success
@as_either(TypeError)
def add_one(x):
return x + 1
@as_either()
def times_five(x):
return x * 5
def test_success_executes_bindings():
result = Success(1).bind(add_one).bind(times_five)
assert isinstance(result, Success)
assert result.value == 10
def test_a_failure_stops_the_execution_of_later_bindings():
result = Success("NaN").bind(add_one).bind(times_five)
assert isinstance(result, Failure)
assert type(result.value) == TypeError
assert repr(result.value) == "TypeError('can only concatenate str (not \"int\") to str')"
| 2.75 | 3 |
ServerSide/models.py | Coullence/DRF_Percels-Couriers_API_V.0.0.2 | 0 | 2179 | from django.db import models
# Create your models here.
# Station
class Stations(models.Model):
stationName = models.CharField(max_length=100)
stationLocation = models.CharField(max_length=100)
stationStaffId = models.CharField(max_length=100)
date = models.DateTimeField(auto_now_add=True)
def __str_(self):
return self.stationName
# Customers
class Customers(models.Model):
customerName = models.CharField(max_length=100)
customerPhone = models.CharField(max_length=100)
customerId = models.CharField(max_length=100)
customerStartLoc = models.CharField(max_length=100)
customerDestinationLoc = models.CharField(max_length=100)
stationStaffId = models.CharField(max_length=100)
date = models.DateTimeField(auto_now_add=True)
def __str_(self):
return self.customerName
# Items
class Items(models.Model):
itemName = models.CharField(max_length=100)
itemType = models.CharField(max_length=100)
Quantity = models.CharField(max_length=100)
originStation = models.CharField(max_length=100)
originCounty = models.CharField(max_length=100)
receiverName = models.CharField(max_length=100)
receiverPhone = models.CharField(max_length=100)
destinationAddress = models.CharField(max_length=100)
destinationCounty = models.CharField(max_length=100)
dateSend= models.CharField(max_length=100)
dateExpected = models.CharField(max_length=100)
def __str__(self):
return self.itemName
# Payments
class Payments(models.Model):
customerPhone = models.CharField(max_length=100)
paymentAmount = models.CharField(max_length=100)
paymentMeans = models.EmailField(max_length=100)
code = models.CharField(max_length=100)
date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.customerPhone
| 2.234375 | 2 |
tao_compiler/mlir/disc/tests/glob_op_test.bzl | JamesTheZ/BladeDISC | 328 | 2180 | # Test definitions for Lit, the LLVM test runner.
#
# This is reusing the LLVM Lit test runner in the interim until the new build
# rules are upstreamed.
# TODO(b/136126535): remove this custom rule.
"""Lit runner globbing test
"""
load("//tensorflow:tensorflow.bzl", "filegroup")
load("@bazel_skylib//lib:paths.bzl", "paths")
load("//tensorflow:tensorflow.bzl", "tf_cc_test", "tf_native_cc_binary", "tf_copts")
# Default values used by the test runner.
_default_test_file_exts = ["mlir", ".pbtxt", ".td"]
_default_driver = "@llvm-project//mlir:run_lit.sh"
_default_size = "small"
_default_tags = []
# These are patterns which we should never match, for tests, subdirectories, or
# test input data files.
_ALWAYS_EXCLUDE = [
"**/LICENSE.txt",
"**/README.txt",
"**/lit.local.cfg",
# Exclude input files that have spaces in their names, since bazel
# cannot cope with such "targets" in the srcs list.
"**/* *",
"**/* */**",
]
def _run_lit_test(name, test_file, data, size, tags, driver, features, exec_properties):
"""Runs lit on all tests it can find in `data` under tensorflow/compiler/mlir.
Note that, due to Bazel's hermetic builds, lit only sees the tests that
are included in the `data` parameter, regardless of what other tests might
exist in the directory searched.
Args:
name: str, the name of the test, including extension.
data: [str], the data input to the test.
size: str, the size of the test.
tags: [str], tags to attach to the test.
driver: str, label of the driver shell script.
Note: use of a custom driver is not currently supported
and specifying a default driver will abort the tests.
features: [str], list of extra features to enable.
"""
name_without_suffix = test_file[0].split('.')[0]
local_test_files = name + ".test_files"
filegroup(
name = local_test_files,
srcs = native.glob([
"data/" + name_without_suffix + "*.mlir",
]),
)
tf_cc_test(
name = name,
srcs = test_file,
size = size,
deps = [
"//tensorflow/compiler/mlir/disc/tests:mlir_feature_test",
"//tensorflow/core:test",
"//tensorflow/core:test_main",
"//tensorflow/core:testlib",
],
data = [":" + local_test_files] + data + [
"//tensorflow/compiler/mlir/disc:disc_compiler_main",
"//tensorflow/compiler/mlir:tf-mlir-translate",
"//tensorflow/compiler/mlir:tf-opt",
],
)
def glob_op_tests(
exclude = [],
test_file_exts = _default_test_file_exts,
default_size = _default_size,
size_override = {},
data = [],
per_test_extra_data = {},
default_tags = _default_tags,
tags_override = {},
driver = _default_driver,
features = [],
exec_properties = {}):
"""Creates all plausible Lit tests (and their inputs) under this directory.
Args:
exclude: [str], paths to exclude (for tests and inputs).
test_file_exts: [str], extensions for files that are tests.
default_size: str, the test size for targets not in "size_override".
size_override: {str: str}, sizes to use for specific tests.
data: [str], additional input data to the test.
per_test_extra_data: {str: [str]}, extra data to attach to a given file.
default_tags: [str], additional tags to attach to the test.
tags_override: {str: str}, tags to add to specific tests.
driver: str, label of the driver shell script.
Note: use of a custom driver is not currently supported
and specifying a default driver will abort the tests.
features: [str], list of extra features to enable.
exec_properties: a dictionary of properties to pass on.
"""
# Ignore some patterns by default for tests and input data.
exclude = _ALWAYS_EXCLUDE + exclude
tests = native.glob(
["*." + ext for ext in test_file_exts],
exclude = exclude,
)
# Run tests individually such that errors can be attributed to a specific
# failure.
for i in range(len(tests)):
curr_test = tests[i]
# Instantiate this test with updated parameters.
lit_test(
name = curr_test,
data = data + per_test_extra_data.get(curr_test, []),
size = size_override.get(curr_test, default_size),
tags = default_tags + tags_override.get(curr_test, []),
driver = driver,
features = features,
exec_properties = exec_properties,
)
def lit_test(
name,
data = [],
size = _default_size,
tags = _default_tags,
driver = _default_driver,
features = [],
exec_properties = {}):
"""Runs test files under lit.
Args:
name: str, the name of the test.
data: [str], labels that should be provided as data inputs.
size: str, the size of the test.
tags: [str], tags to attach to the test.
driver: str, label of the driver shell script.
Note: use of a custom driver is not currently supported
and specifying a default driver will abort the tests.
features: [str], list of extra features to enable.
"""
_run_lit_test(name + ".test", [name], data, size, tags, driver, features, exec_properties)
| 1.554688 | 2 |
build-scripts/PackageCheckHelpers.py | yulicrunchy/JALoP | 4 | 2181 | """
These are functions to add to the configure context.
"""
def __checkCanLink(context, source, source_type, message_libname, real_libs=[]):
"""
Check that source can be successfully compiled and linked against real_libs.
Keyword arguments:
source -- source to try to compile
source_type -- type of source file, (probably should be ".c")
message_libname -- library name to show in the message output from scons
real_libs -- list of actual libraries to link against (defaults to a list
with one element, the value of messager_libname)
"""
if not real_libs:
real_libs = [message_libname]
context.Message("Checking for %s..." % message_libname)
libsave = context.env.get('LIBS')
context.env.AppendUnique(LIBS=real_libs)
ret = context.TryLink(source, source_type)
context.Result( ret )
if libsave is None:
del(context.env['LIBS'])
else:
context.env['LIBS'] = libsave
return ret
libuuid_source = '''
#include <uuid/uuid.h>
int main() {
uuid_t uu;
char uuid_str[37];
uuid_generate(uu);
uuid_unparse(uu, uuid_str);
return 0;
}
'''
def CheckLibUUID(context):
return __checkCanLink(context, libuuid_source, ".c", "libuuid", ["uuid"])
selinux_source = '''
#include <selinux/selinux.h>
int main() {
security_context_t ctx;
getpeercon(0, &ctx);
return 0;
}
'''
def CheckSeLinux(context):
return __checkCanLink(context, selinux_source, '.cpp', 'selinux', ['selinux'])
byteswap_source = '''
#include <byteswap.h>
#include <stdint.h>
int main() {
uint16_t b16 = 0x00FF;
uint32_t b32 = 0x0011EEFF;
uint64_t b64 = 0x00112233CCDDEEFF;
bswap_16(b16);
bswap_32(b32);
bswap_64(b64);
return 0;
}
'''
def CheckByteswap(context):
context.Message("Checking for byteswap.h...")
ret = context.TryCompile(byteswap_source, '.c')
context.Result( ret )
return ret
bdb_source = '''
#include <db.h>
#if defined(DB_VERSION_MAJOR) && DB_VERSION_MAJOR >= 4
#if DB_VERSION_MAJOR == 4
#if defined(DB_VERSION_MINOR) && DB_VERSION_MINOR >= 3
#else
#error ""
#endif
#endif
#else
#error ""
#endif
'''
def CheckBDB(context):
context.Message("Checking for BDB >= 4.3...")
ret = context.TryCompile(bdb_source, '.c')
context.Result(ret)
return ret
| 2.421875 | 2 |
src/transformers/modeling_tf_pytorch_utils.py | ari-holtzman/transformers | 5,129 | 2182 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, <NAME>. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch - TF 2.0 general utilities."""
import logging
import os
import re
import numpy
logger = logging.getLogger(__name__)
def convert_tf_weight_name_to_pt_weight_name(tf_name, start_prefix_to_remove=""):
""" Convert a TF 2.0 model variable name in a pytorch model weight name.
Conventions for TF2.0 scopes -> PyTorch attribute names conversions:
- '$1___$2' is replaced by $2 (can be used to duplicate or remove layers in TF2.0 vs PyTorch)
- '_._' is replaced by a new level separation (can be used to convert TF2.0 lists in PyTorch nn.ModulesList)
return tuple with:
- pytorch model weight name
- transpose: boolean indicating weither TF2.0 and PyTorch weights matrices are transposed with regards to each other
"""
tf_name = tf_name.replace(":0", "") # device ids
tf_name = re.sub(
r"/[^/]*___([^/]*)/", r"/\1/", tf_name
) # '$1___$2' is replaced by $2 (can be used to duplicate or remove layers in TF2.0 vs PyTorch)
tf_name = tf_name.replace(
"_._", "/"
) # '_._' is replaced by a level separation (can be used to convert TF2.0 lists in PyTorch nn.ModulesList)
tf_name = re.sub(r"//+", "/", tf_name) # Remove empty levels at the end
tf_name = tf_name.split("/") # Convert from TF2.0 '/' separators to PyTorch '.' separators
tf_name = tf_name[1:] # Remove level zero
# When should we transpose the weights
transpose = bool(tf_name[-1] == "kernel" or "emb_projs" in tf_name or "out_projs" in tf_name)
# Convert standard TF2.0 names in PyTorch names
if tf_name[-1] == "kernel" or tf_name[-1] == "embeddings" or tf_name[-1] == "gamma":
tf_name[-1] = "weight"
if tf_name[-1] == "beta":
tf_name[-1] = "bias"
# Remove prefix if needed
tf_name = ".".join(tf_name)
if start_prefix_to_remove:
tf_name = tf_name.replace(start_prefix_to_remove, "", 1)
return tf_name, transpose
#####################
# PyTorch => TF 2.0 #
#####################
def load_pytorch_checkpoint_in_tf2_model(tf_model, pytorch_checkpoint_path, tf_inputs=None, allow_missing_keys=False):
""" Load pytorch checkpoints in a TF 2.0 model
"""
try:
import tensorflow as tf # noqa: F401
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
pt_path = os.path.abspath(pytorch_checkpoint_path)
logger.info("Loading PyTorch weights from {}".format(pt_path))
pt_state_dict = torch.load(pt_path, map_location="cpu")
logger.info("PyTorch checkpoint contains {:,} parameters".format(sum(t.numel() for t in pt_state_dict.values())))
return load_pytorch_weights_in_tf2_model(
tf_model, pt_state_dict, tf_inputs=tf_inputs, allow_missing_keys=allow_missing_keys
)
def load_pytorch_model_in_tf2_model(tf_model, pt_model, tf_inputs=None, allow_missing_keys=False):
""" Load pytorch checkpoints in a TF 2.0 model
"""
pt_state_dict = pt_model.state_dict()
return load_pytorch_weights_in_tf2_model(
tf_model, pt_state_dict, tf_inputs=tf_inputs, allow_missing_keys=allow_missing_keys
)
def load_pytorch_weights_in_tf2_model(tf_model, pt_state_dict, tf_inputs=None, allow_missing_keys=False):
""" Load pytorch state_dict in a TF 2.0 model.
"""
try:
import torch # noqa: F401
import tensorflow as tf # noqa: F401
from tensorflow.python.keras import backend as K
except ImportError:
logger.error(
"Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
if tf_inputs is None:
tf_inputs = tf_model.dummy_inputs
if tf_inputs is not None:
tf_model(tf_inputs, training=False) # Make sure model is built
# Adapt state dict - TODO remove this and update the AWS weights files instead
# Convert old format to new format if needed from a PyTorch state_dict
old_keys = []
new_keys = []
for key in pt_state_dict.keys():
new_key = None
if "gamma" in key:
new_key = key.replace("gamma", "weight")
if "beta" in key:
new_key = key.replace("beta", "bias")
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
pt_state_dict[new_key] = pt_state_dict.pop(old_key)
# Make sure we are able to load PyTorch base models as well as derived models (with heads)
# TF models always have a prefix, some of PyTorch models (base ones) don't
start_prefix_to_remove = ""
if not any(s.startswith(tf_model.base_model_prefix) for s in pt_state_dict.keys()):
start_prefix_to_remove = tf_model.base_model_prefix + "."
symbolic_weights = tf_model.trainable_weights + tf_model.non_trainable_weights
tf_loaded_numel = 0
weight_value_tuples = []
all_pytorch_weights = set(list(pt_state_dict.keys()))
for symbolic_weight in symbolic_weights:
sw_name = symbolic_weight.name
name, transpose = convert_tf_weight_name_to_pt_weight_name(
sw_name, start_prefix_to_remove=start_prefix_to_remove
)
# Find associated numpy array in pytorch model state dict
if name not in pt_state_dict:
if allow_missing_keys:
continue
raise AttributeError("{} not found in PyTorch model".format(name))
array = pt_state_dict[name].numpy()
if transpose:
array = numpy.transpose(array)
if len(symbolic_weight.shape) < len(array.shape):
array = numpy.squeeze(array)
elif len(symbolic_weight.shape) > len(array.shape):
array = numpy.expand_dims(array, axis=0)
try:
assert list(symbolic_weight.shape) == list(array.shape)
except AssertionError as e:
e.args += (symbolic_weight.shape, array.shape)
raise e
tf_loaded_numel += array.size
# logger.warning("Initialize TF weight {}".format(symbolic_weight.name))
weight_value_tuples.append((symbolic_weight, array))
all_pytorch_weights.discard(name)
K.batch_set_value(weight_value_tuples)
if tf_inputs is not None:
tf_model(tf_inputs, training=False) # Make sure restore ops are run
logger.info("Loaded {:,} parameters in the TF 2.0 model.".format(tf_loaded_numel))
logger.info("Weights or buffers not loaded from PyTorch model: {}".format(all_pytorch_weights))
return tf_model
#####################
# TF 2.0 => PyTorch #
#####################
def load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path, tf_inputs=None, allow_missing_keys=False):
""" Load TF 2.0 HDF5 checkpoint in a PyTorch model
We use HDF5 to easily do transfer learning
(see https://github.com/tensorflow/tensorflow/blob/ee16fcac960ae660e0e4496658a366e2f745e1f0/tensorflow/python/keras/engine/network.py#L1352-L1357).
"""
try:
import tensorflow as tf # noqa: F401
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
import transformers
logger.info("Loading TensorFlow weights from {}".format(tf_checkpoint_path))
# Instantiate and load the associated TF 2.0 model
tf_model_class_name = "TF" + pt_model.__class__.__name__ # Add "TF" at the beggining
tf_model_class = getattr(transformers, tf_model_class_name)
tf_model = tf_model_class(pt_model.config)
if tf_inputs is None:
tf_inputs = tf_model.dummy_inputs
if tf_inputs is not None:
tf_model(tf_inputs, training=False) # Make sure model is built
tf_model.load_weights(tf_checkpoint_path, by_name=True)
return load_tf2_model_in_pytorch_model(pt_model, tf_model, allow_missing_keys=allow_missing_keys)
def load_tf2_model_in_pytorch_model(pt_model, tf_model, allow_missing_keys=False):
""" Load TF 2.0 model in a pytorch model
"""
weights = tf_model.weights
return load_tf2_weights_in_pytorch_model(pt_model, weights, allow_missing_keys=allow_missing_keys)
def load_tf2_weights_in_pytorch_model(pt_model, tf_weights, allow_missing_keys=False):
""" Load TF2.0 symbolic weights in a PyTorch model
"""
try:
import tensorflow as tf # noqa: F401
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
new_pt_params_dict = {}
current_pt_params_dict = dict(pt_model.named_parameters())
# Make sure we are able to load PyTorch base models as well as derived models (with heads)
# TF models always have a prefix, some of PyTorch models (base ones) don't
start_prefix_to_remove = ""
if not any(s.startswith(pt_model.base_model_prefix) for s in current_pt_params_dict.keys()):
start_prefix_to_remove = pt_model.base_model_prefix + "."
# Build a map from potential PyTorch weight names to TF 2.0 Variables
tf_weights_map = {}
for tf_weight in tf_weights:
pt_name, transpose = convert_tf_weight_name_to_pt_weight_name(
tf_weight.name, start_prefix_to_remove=start_prefix_to_remove
)
tf_weights_map[pt_name] = (tf_weight.numpy(), transpose)
all_tf_weights = set(list(tf_weights_map.keys()))
loaded_pt_weights_data_ptr = {}
missing_keys_pt = []
for pt_weight_name, pt_weight in current_pt_params_dict.items():
# Handle PyTorch shared weight ()not duplicated in TF 2.0
if pt_weight.data_ptr() in loaded_pt_weights_data_ptr:
new_pt_params_dict[pt_weight_name] = loaded_pt_weights_data_ptr[pt_weight.data_ptr()]
continue
# Find associated numpy array in pytorch model state dict
if pt_weight_name not in tf_weights_map:
if allow_missing_keys:
missing_keys_pt.append(pt_weight_name)
continue
raise AttributeError("{} not found in TF 2.0 model".format(pt_weight_name))
array, transpose = tf_weights_map[pt_weight_name]
if transpose:
array = numpy.transpose(array)
if len(pt_weight.shape) < len(array.shape):
array = numpy.squeeze(array)
elif len(pt_weight.shape) > len(array.shape):
array = numpy.expand_dims(array, axis=0)
try:
assert list(pt_weight.shape) == list(array.shape)
except AssertionError as e:
e.args += (pt_weight.shape, array.shape)
raise e
# logger.warning("Initialize PyTorch weight {}".format(pt_weight_name))
new_pt_params_dict[pt_weight_name] = torch.from_numpy(array)
loaded_pt_weights_data_ptr[pt_weight.data_ptr()] = torch.from_numpy(array)
all_tf_weights.discard(pt_weight_name)
missing_keys, unexpected_keys = pt_model.load_state_dict(new_pt_params_dict, strict=False)
missing_keys += missing_keys_pt
if len(missing_keys) > 0:
logger.info(
"Weights of {} not initialized from TF 2.0 model: {}".format(pt_model.__class__.__name__, missing_keys)
)
if len(unexpected_keys) > 0:
logger.info(
"Weights from TF 2.0 model not used in {}: {}".format(pt_model.__class__.__name__, unexpected_keys)
)
logger.info("Weights or buffers not loaded from TF 2.0 model: {}".format(all_tf_weights))
return pt_model
| 2.28125 | 2 |
hail/python/test/hail/helpers.py | mitochon/hail | 0 | 2183 | <gh_stars>0
import os
from timeit import default_timer as timer
import unittest
import pytest
from decorator import decorator
from hail.utils.java import Env
import hail as hl
from hail.backend.local_backend import LocalBackend
_initialized = False
def startTestHailContext():
global _initialized
if not _initialized:
backend_name = os.environ.get('HAIL_QUERY_BACKEND', 'spark')
if backend_name == 'spark':
hl.init(master='local[1]', min_block_size=0, quiet=True)
else:
Env.hc() # force initialization
_initialized = True
def stopTestHailContext():
pass
_test_dir = os.environ.get('HAIL_TEST_RESOURCES_DIR', '../src/test/resources')
_doctest_dir = os.environ.get('HAIL_DOCTEST_DATA_DIR', 'hail/docs/data')
def resource(filename):
return os.path.join(_test_dir, filename)
def doctest_resource(filename):
return os.path.join(_doctest_dir, filename)
def schema_eq(x, y):
x_fds = dict(x)
y_fds = dict(y)
return x_fds == y_fds
def convert_struct_to_dict(x):
if isinstance(x, hl.Struct):
return {k: convert_struct_to_dict(v) for k, v in x._fields.items()}
elif isinstance(x, list):
return [convert_struct_to_dict(elt) for elt in x]
elif isinstance(x, tuple):
return tuple([convert_struct_to_dict(elt) for elt in x])
elif isinstance(x, dict):
return {k: convert_struct_to_dict(v) for k, v in x.items()}
else:
return x
_dataset = None
def get_dataset():
global _dataset
if _dataset is None:
_dataset = hl.split_multi_hts(hl.import_vcf(resource('sample.vcf'))).cache()
return _dataset
def assert_time(f, max_duration):
start = timer()
x = f()
end = timer()
assert (start - end) < max_duration
print(f'took {end - start:.3f}')
return x
def create_all_values():
return hl.struct(
f32=hl.float32(3.14),
i64=hl.int64(-9),
m=hl.null(hl.tfloat64),
astruct=hl.struct(a=hl.null(hl.tint32), b=5.5),
mstruct=hl.null(hl.tstruct(x=hl.tint32, y=hl.tstr)),
aset=hl.set(['foo', 'bar', 'baz']),
mset=hl.null(hl.tset(hl.tfloat64)),
d=hl.dict({hl.array(['a', 'b']): 0.5, hl.array(['x', hl.null(hl.tstr), 'z']): 0.3}),
md=hl.null(hl.tdict(hl.tint32, hl.tstr)),
h38=hl.locus('chr22', 33878978, 'GRCh38'),
ml=hl.null(hl.tlocus('GRCh37')),
i=hl.interval(
hl.locus('1', 999),
hl.locus('1', 1001)),
c=hl.call(0, 1),
mc=hl.null(hl.tcall),
t=hl.tuple([hl.call(1, 2, phased=True), 'foo', hl.null(hl.tstr)]),
mt=hl.null(hl.ttuple(hl.tlocus('GRCh37'), hl.tbool)),
nd=hl.nd.arange(0, 10).reshape((2, 5)),
)
def prefix_struct(s, prefix):
return hl.struct(**{prefix + k: s[k] for k in s})
def create_all_values_table():
all_values = create_all_values()
return (hl.utils.range_table(5, n_partitions=3)
.annotate_globals(**prefix_struct(all_values, 'global_'))
.annotate(**all_values)
.cache())
def create_all_values_matrix_table():
all_values = create_all_values()
return (hl.utils.range_matrix_table(3, 2, n_partitions=2)
.annotate_globals(**prefix_struct(all_values, 'global_'))
.annotate_rows(**prefix_struct(all_values, 'row_'))
.annotate_cols(**prefix_struct(all_values, 'col_'))
.annotate_entries(**prefix_struct(all_values, 'entry_'))
.cache())
def create_all_values_datasets():
return (create_all_values_table(), create_all_values_matrix_table())
def skip_unless_spark_backend():
from hail.backend.spark_backend import SparkBackend
@decorator
def wrapper(func, *args, **kwargs):
if isinstance(hl.utils.java.Env.backend(), SparkBackend):
return func(*args, **kwargs)
else:
raise unittest.SkipTest('requires Spark')
return wrapper
fails_local_backend = pytest.mark.xfail(
os.environ.get('HAIL_QUERY_BACKEND') == 'local',
reason="doesn't yet work on local backend",
strict=True)
def run_with_cxx_compile():
@decorator
def wrapper(func, *args, **kwargs):
return
return wrapper
def assert_evals_to(e, v):
res = hl.eval(e)
if res != v:
raise ValueError(f' actual: {res}\n expected: {v}')
def assert_all_eval_to(*expr_and_expected):
exprs, expecteds = zip(*expr_and_expected)
assert_evals_to(hl.tuple(exprs), expecteds)
def lower_only():
@decorator
def wrapper(func, *args, **kwargs):
flags = hl._get_flags()
prev_lower = flags.get('lower')
prev_lower_only = flags.get('lower_only')
hl._set_flags(lower='1', lower_only='1')
try:
return func(*args, **kwargs)
finally:
hl._set_flags(lower=prev_lower, lower_only=prev_lower_only)
return wrapper | 1.992188 | 2 |
src/entity_linker/models/figer_model/labeling_model.py | mjstrobl/WEXEA | 10 | 2184 | <filename>src/entity_linker/models/figer_model/labeling_model.py
"""
Modifications copyright (C) 2020 <NAME>
"""
import time
import tensorflow as tf
import numpy as np
from entity_linker.models.base import Model
class LabelingModel(Model):
"""Unsupervised Clustering using Discrete-State VAE"""
def __init__(self, batch_size, num_labels, context_encoded_dim,
true_entity_embeddings,
word_embed_dim, context_encoded, mention_embed, scope_name, device):
self.batch_size = batch_size
self.num_labels = num_labels
self.word_embed_dim = word_embed_dim
with tf.variable_scope(scope_name) as s, tf.device(device) as d:
if mention_embed == None:
self.label_weights = tf.get_variable(
name="label_weights",
shape=[context_encoded_dim, num_labels],
initializer=tf.random_normal_initializer(mean=0.0,
stddev=1.0/(100.0)))
else:
context_encoded = tf.concat(
1, [context_encoded, mention_embed], name='con_ment_repr')
self.label_weights = tf.get_variable(
name="label_weights",
shape=[context_encoded_dim+word_embed_dim, num_labels],
initializer=tf.random_normal_initializer(mean=0.0,
stddev=1.0/(100.0)))
# [B, L]
self.label_scores = tf.matmul(context_encoded, self.label_weights)
self.label_probs = tf.sigmoid(self.label_scores)
### PREDICT TYPES FROM ENTITIES
#true_entity_embeddings = tf.nn.dropout(true_entity_embeddings, keep_prob=0.5)
self.entity_label_scores = tf.matmul(true_entity_embeddings, self.label_weights)
self.entity_label_probs = tf.sigmoid(self.label_scores)
def loss_graph(self, true_label_ids, scope_name, device_gpu):
with tf.variable_scope(scope_name) as s, tf.device(device_gpu) as d:
# [B, L]
self.cross_entropy_losses = tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.label_scores,
targets=true_label_ids,
name="labeling_loss")
self.labeling_loss = tf.reduce_sum(
self.cross_entropy_losses) / tf.to_float(self.batch_size)
self.enlabel_cross_entropy_losses = tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.entity_label_scores,
targets=true_label_ids,
name="entity_labeling_loss")
self.entity_labeling_loss = tf.reduce_sum(
self.enlabel_cross_entropy_losses) / tf.to_float(self.batch_size)
| 2.0625 | 2 |
python/molecular_diameter.py | wutobias/collection | 2 | 2185 | <gh_stars>1-10
#!/usr/bin/env python
import sys
import parmed as pmd
import numpy as np
from scipy.spatial import distance
if len(sys.argv) < 2:
print "Usage: molecular_diameter.py <mymolecule.mol2>"
exit(1)
mol = pmd.load_file(sys.argv[1])
crds = mol.coordinates
dist = distance.cdist(crds, crds, 'euclidean')
print np.max(dist)
exit(0) | 2.453125 | 2 |
examples/text_classification/yelp_reviews_polarity/train.py | liorshk/simpletransformers | 3,151 | 2186 | <reponame>liorshk/simpletransformers<gh_stars>1000+
import sys
import pandas as pd
from simpletransformers.classification import ClassificationModel
prefix = "data/"
train_df = pd.read_csv(prefix + "train.csv", header=None)
train_df.head()
eval_df = pd.read_csv(prefix + "test.csv", header=None)
eval_df.head()
train_df[0] = (train_df[0] == 2).astype(int)
eval_df[0] = (eval_df[0] == 2).astype(int)
train_df = pd.DataFrame(
{"text": train_df[1].replace(r"\n", " ", regex=True), "labels": train_df[0]}
)
print(train_df.head())
eval_df = pd.DataFrame(
{"text": eval_df[1].replace(r"\n", " ", regex=True), "labels": eval_df[0]}
)
print(eval_df.head())
model_type = sys.argv[1]
if model_type == "bert":
model_name = "bert-base-cased"
elif model_type == "roberta":
model_name = "roberta-base"
elif model_type == "distilbert":
model_name = "distilbert-base-cased"
elif model_type == "distilroberta":
model_type = "roberta"
model_name = "distilroberta-base"
elif model_type == "electra-base":
model_type = "electra"
model_name = "google/electra-base-discriminator"
elif model_type == "electra-small":
model_type = "electra"
model_name = "google/electra-small-discriminator"
elif model_type == "xlnet":
model_name = "xlnet-base-cased"
train_args = {
"reprocess_input_data": True,
"overwrite_output_dir": True,
"use_cached_eval_features": True,
"output_dir": f"outputs/{model_type}",
"best_model_dir": f"outputs/{model_type}/best_model",
"evaluate_during_training": True,
"max_seq_length": 128,
"num_train_epochs": 3,
"evaluate_during_training_steps": 1000,
"wandb_project": "Classification Model Comparison",
"wandb_kwargs": {"name": model_name},
"save_model_every_epoch": False,
"save_eval_checkpoints": False,
# "use_early_stopping": True,
# "early_stopping_metric": "mcc",
# "n_gpu": 2,
# "manual_seed": 4,
# "use_multiprocessing": False,
"train_batch_size": 128,
"eval_batch_size": 64,
# "config": {
# "output_hidden_states": True
# }
}
if model_type == "xlnet":
train_args["train_batch_size"] = 64
train_args["gradient_accumulation_steps"] = 2
# Create a ClassificationModel
model = ClassificationModel(model_type, model_name, args=train_args)
# Train the model
model.train_model(train_df, eval_df=eval_df)
# # # Evaluate the model
# result, model_outputs, wrong_predictions = model.eval_model(eval_df)
| 2.765625 | 3 |
LoadGraph.py | mahdi-zafarmand/SNA | 0 | 2187 | <reponame>mahdi-zafarmand/SNA
import networkx as nx
import os.path
def load_graph(path, weighted=False, delimiter='\t', self_loop=False):
graph = nx.Graph()
if not os.path.isfile(path):
print("Error: file " + path + " not found!")
exit(-1)
with open(path) as file:
for line in file.readlines():
w = 1.0
line = line.split(delimiter)
v1 = int(line[0])
v2 = int(line[1])
graph.add_node(v1)
graph.add_node(v2)
if weighted:
w = float(line[2])
if (self_loop and v1 == v2) or (v1 != v2):
graph.add_edge(v1, v2, weight=w)
return graph
def load_graph_uncertain(path, delimiter='\t', self_loop=False):
graph = nx.Graph()
if not os.path.isfile(path):
print("Error: file " + path + " not found!")
exit(-1)
with open(path) as file:
for line in file.readlines():
line = line.split(delimiter)
v1 = int(line[0])
v2 = int(line[1])
graph.add_node(v1)
graph.add_node(v2)
w = float(line[2])
p = float(line[3])
if (self_loop and v1 == v2) or (v1 != v2):
graph.add_edge(v1, v2, weight=w, prob=p)
return graph
| 3.09375 | 3 |
mayan/apps/document_signatures/models.py | wan1869/dushuhu | 0 | 2188 | import logging
import uuid
from django.db import models
from django.urls import reverse
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from model_utils.managers import InheritanceManager
from mayan.apps.django_gpg.exceptions import VerificationError
from mayan.apps.django_gpg.models import Key
from mayan.apps.documents.models import DocumentVersion
from mayan.apps.storage.classes import DefinedStorageLazy
from .literals import STORAGE_NAME_DOCUMENT_SIGNATURES_DETACHED_SIGNATURE
from .managers import DetachedSignatureManager, EmbeddedSignatureManager
logger = logging.getLogger(name=__name__)
def upload_to(*args, **kwargs):
return force_text(s=uuid.uuid4())
class SignatureBaseModel(models.Model):
"""
Fields:
* key_id - Key Identifier - This is what identifies uniquely a key. Not
two keys in the world have the same Key ID. The Key ID is also used to
locate a key in the key servers: http://pgp.mit.edu
* signature_id - Signature ID - Every time a key is used to sign something
it will generate a unique signature ID. No two signature IDs are the same,
even when using the same key.
"""
document_version = models.ForeignKey(
editable=False, on_delete=models.CASCADE, related_name='signatures',
to=DocumentVersion, verbose_name=_('Document version')
)
# Basic fields
date = models.DateField(
blank=True, editable=False, null=True, verbose_name=_('Date signed')
)
key_id = models.CharField(
help_text=_('ID of the key that will be used to sign the document.'),
max_length=40, verbose_name=_('Key ID')
)
# With proper key
signature_id = models.CharField(
blank=True, editable=False, null=True, max_length=64,
verbose_name=_('Signature ID')
)
public_key_fingerprint = models.CharField(
blank=True, editable=False, null=True, max_length=40,
verbose_name=_('Public key fingerprint')
)
objects = InheritanceManager()
class Meta:
ordering = ('pk',)
verbose_name = _('Document version signature')
verbose_name_plural = _('Document version signatures')
def __str__(self):
return self.signature_id or '{} - {}'.format(self.date, self.key_id)
def get_absolute_url(self):
return reverse(
viewname='signatures:document_version_signature_details',
kwargs={'signature_id': self.pk}
)
def get_key_id(self):
if self.public_key_fingerprint:
return self.public_key_fingerprint[-16:]
else:
return self.key_id
def get_signature_type_display(self):
if self.is_detached:
return _('Detached')
else:
return _('Embedded')
@property
def is_detached(self):
return hasattr(self, 'signature_file')
@property
def is_embedded(self):
return not hasattr(self, 'signature_file')
class EmbeddedSignature(SignatureBaseModel):
objects = EmbeddedSignatureManager()
class Meta:
verbose_name = _('Document version embedded signature')
verbose_name_plural = _('Document version embedded signatures')
def save(self, *args, **kwargs):
logger.debug(msg='checking for embedded signature')
if self.pk:
raw = True
else:
raw = False
with self.document_version.open(raw=raw) as file_object:
try:
verify_result = Key.objects.verify_file(
file_object=file_object
)
except VerificationError as exception:
# Not signed
logger.debug(
'embedded signature verification error; %s', exception
)
else:
self.date = verify_result.date
self.key_id = verify_result.key_id
self.signature_id = verify_result.signature_id
self.public_key_fingerprint = verify_result.pubkey_fingerprint
super(EmbeddedSignature, self).save(*args, **kwargs)
class DetachedSignature(SignatureBaseModel):
signature_file = models.FileField(
blank=True, help_text=_(
'Signature file previously generated.'
), null=True, storage=DefinedStorageLazy(
name=STORAGE_NAME_DOCUMENT_SIGNATURES_DETACHED_SIGNATURE
), upload_to=upload_to, verbose_name=_('Signature file')
)
objects = DetachedSignatureManager()
class Meta:
verbose_name = _('Document version detached signature')
verbose_name_plural = _('Document version detached signatures')
def __str__(self):
return '{}-{}'.format(self.document_version, _('signature'))
def delete(self, *args, **kwargs):
if self.signature_file.name:
self.signature_file.storage.delete(name=self.signature_file.name)
super(DetachedSignature, self).delete(*args, **kwargs)
def save(self, *args, **kwargs):
with self.document_version.open() as file_object:
try:
verify_result = Key.objects.verify_file(
file_object=file_object, signature_file=self.signature_file
)
except VerificationError as exception:
# Not signed
logger.debug(
'detached signature verification error; %s', exception
)
else:
self.signature_file.seek(0)
self.date = verify_result.date
self.key_id = verify_result.key_id
self.signature_id = verify_result.signature_id
self.public_key_fingerprint = verify_result.pubkey_fingerprint
return super(DetachedSignature, self).save(*args, **kwargs)
| 2.15625 | 2 |
scripts/sync_reports_config.py | ramezrawas/galaxy-1 | 6 | 2189 | from ConfigParser import ConfigParser
from sys import argv
REPLACE_PROPERTIES = ["file_path", "database_connection", "new_file_path"]
MAIN_SECTION = "app:main"
def sync():
# Add or replace the relevant properites from galaxy.ini
# into reports.ini
reports_config_file = "config/reports.ini"
if len(argv) > 1:
reports_config_file = argv[1]
universe_config_file = "config/galaxy.ini"
if len(argv) > 2:
universe_config_file = argv[2]
parser = ConfigParser()
parser.read(universe_config_file)
with open(reports_config_file, "r") as f:
reports_config_lines = f.readlines()
replaced_properties = set([])
with open(reports_config_file, "w") as f:
# Write all properties from reports config replacing as
# needed.
for reports_config_line in reports_config_lines:
(line, replaced_property) = get_synced_line(reports_config_line, parser)
if replaced_property:
replaced_properties.add(replaced_property)
f.write(line)
# If any properties appear in universe config and not in
# reports write these as well.
for replacement_property in REPLACE_PROPERTIES:
if parser.has_option(MAIN_SECTION, replacement_property) and \
not (replacement_property in replaced_properties):
f.write(get_universe_line(replacement_property, parser))
def get_synced_line(reports_line, universe_config):
# Cycle through properties to replace and perform replacement on
# this line if needed.
synced_line = reports_line
replaced_property = None
for replacement_property in REPLACE_PROPERTIES:
if reports_line.startswith(replacement_property) and \
universe_config.has_option(MAIN_SECTION, replacement_property):
synced_line = get_universe_line(replacement_property, universe_config)
replaced_property = replacement_property
break
return (synced_line, replaced_property)
def get_universe_line(property_name, universe_config):
return "%s=%s\n" % (property_name, universe_config.get(MAIN_SECTION, property_name))
if __name__ == '__main__':
sync()
| 2.9375 | 3 |
src/gausskernel/dbmind/xtuner/test/test_ssh.py | wotchin/openGauss-server | 1 | 2190 | # Copyright (c) 2020 Huawei Technologies Co.,Ltd.
#
# openGauss is licensed under Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
#
# http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
# -------------------------------------------------------------------------
#
# test_ssh.py
#
# IDENTIFICATION
# src/gausskernel/dbmind/xtuner/test/test_ssh.py
#
# -------------------------------------------------------------------------
from ssh import ExecutorFactory
def test_remote():
exe = ExecutorFactory().set_host('').set_user('').set_pwd('').get_executor() # padding your information
print(exe.exec_command_sync("cat /proc/cpuinfo | grep \"processor\" | wc -l"))
print(exe.exec_command_sync("cat /proc/self/cmdline | xargs -0"))
print(exe.exec_command_sync("echo -e 'hello \\n world'")[0].count('\n'))
print(exe.exec_command_sync("echo -e 'hello \\n world'")[0])
print(exe.exec_command_sync('echo $SHELL'))
def test_local():
exe = ExecutorFactory().get_executor()
print(exe.exec_command_sync("ping -h"))
if __name__ == "__main__":
test_remote()
test_local()
| 1.828125 | 2 |
models/utils.py | wyshi/Unsupervised-Structure-Learning | 34 | 2191 | <reponame>wyshi/Unsupervised-Structure-Learning<gh_stars>10-100
# Original work Copyright (C) 2017 <NAME>, Carnegie Mellon University
# Modified work Copyright 2018 <NAME>.
import tensorflow as tf
import numpy as np
from nltk.translate.bleu_score import sentence_bleu
from nltk.translate.bleu_score import SmoothingFunction
def get_bleu_stats(ref, hyps):
scores = []
for hyp in hyps:
try:
scores.append(sentence_bleu([ref], hyp, smoothing_function=SmoothingFunction().method7,
weights=[1./3, 1./3,1./3]))
except:
scores.append(0.0)
return np.max(scores), np.mean(scores)
def gaussian_kld(recog_mu, recog_logvar, prior_mu, prior_logvar):
kld = -0.5 * tf.reduce_sum(1 + (recog_logvar - prior_logvar)
- tf.div(tf.pow(prior_mu - recog_mu, 2), tf.exp(prior_logvar))
- tf.div(tf.exp(recog_logvar), tf.exp(prior_logvar)), reduction_indices=1)
return kld
def norm_log_liklihood(x, mu, logvar):
return -0.5*tf.reduce_sum(tf.log(2*np.pi) + logvar + tf.div(tf.pow((x-mu), 2), tf.exp(logvar)), reduction_indices=1)
def sample_gaussian(mu, logvar):
epsilon = tf.random_normal(tf.shape(logvar), name="epsilon")
std = tf.exp(0.5 * logvar)
z= mu + tf.multiply(std, epsilon)
return z
def get_bow(embedding, avg=False):
"""
Assumption, the last dimension is the embedding
The second last dimension is the sentence length. The rank must be 3
"""
embedding_size = embedding.get_shape()[2].value
if avg:
return tf.reduce_mean(embedding, reduction_indices=[1]), embedding_size
else:
return tf.reduce_sum(embedding, reduction_indices=[1]), embedding_size
def get_rnn_encode(embedding, cell, length_mask=None, scope=None, reuse=None):
"""
Assumption, the last dimension is the embedding
The second last dimension is the sentence length. The rank must be 3
The padding should have zero
"""
with tf.variable_scope(scope, 'RnnEncoding', reuse=reuse):
if length_mask is None:
length_mask = tf.reduce_sum(tf.sign(tf.reduce_max(tf.abs(embedding), reduction_indices=2)),reduction_indices=1)
length_mask = tf.to_int32(length_mask)
_, encoded_input = tf.nn.dynamic_rnn(cell, embedding, sequence_length=length_mask, dtype=tf.float32)
return encoded_input, cell.state_size
def get_bi_rnn_encode(embedding, f_cell, b_cell, length_mask=None, scope=None, reuse=None):
"""
Assumption, the last dimension is the embedding
The second last dimension is the sentence length. The rank must be 3
The padding should have zero
"""
with tf.variable_scope(scope, 'RnnEncoding', reuse=reuse):
if length_mask is None:
length_mask = tf.reduce_sum(tf.sign(tf.reduce_max(tf.abs(embedding), reduction_indices=2)),reduction_indices=1)
length_mask = tf.to_int32(length_mask)
_, encoded_input = tf.nn.bidirectional_dynamic_rnn(f_cell, b_cell, embedding, sequence_length=length_mask, dtype=tf.float32)
encoded_input = tf.concat(encoded_input, 1)
return encoded_input, f_cell.state_size+b_cell.state_size
def get_prob_for_one_sent(vocab_prob, sent, length_mask=None):
"""
:param vocab_prob:
:param sent:
:param length_mask:
:return:
"""
tf.boolean_mask(tf.reshape(usr_input_sent, [-1, 50]), tf.sequence_mask(length_mask, 50))
def tf_repeat(tensor, repeats):
"""
:param tensor:
:param repeats:
:return:
"""
with tf.variable_scope("repeat"):
expanded_tensor = tf.expand_dims(tensor, -1)
multiples = [1] + repeats
tiled_tensor = tf.tile(expanded_tensor, multiples=multiples)
repeated_tensor = tf.reshape(tiled_tensor, tf.shape(tensor) * repeats)
return repeated_tensor | 2.484375 | 2 |
gluoncv/data/transforms/block.py | Kh4L/gluon-cv | 5,447 | 2192 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable= arguments-differ
# pylint: disable= missing-docstring
"Addtional image transforms."
import random
import math
import numpy as np
from mxnet import image, nd
from mxnet.gluon import Block
__all__ = ['RandomCrop', 'RandomErasing']
class RandomCrop(Block):
"""Randomly crop `src` with `size` (width, height).
Padding is optional.
Upsample result if `src` is smaller than `size`.
Parameters
----------
size : int or tuple of (W, H)
Size of the final output.
pad: int or tuple
if int, size of the zero-padding
if tuple, number of values padded to the edges of each axis.
((before_1, after_1), ... (before_N, after_N)) unique pad widths for each axis.
((before, after),) yields same before and after pad for each axis.
(pad,) or int is a shortcut for before = after = pad width for all axes.
interpolation : int
Interpolation method for resizing. By default uses bilinear
interpolation. See OpenCV's resize function for available choices.
Inputs:
- **data**: input tensor with (Hi x Wi x C) shape.
Outputs:
- **out**: output tensor with (size[0] x size[1] x C) or (size x size x C) shape.
"""
def __init__(self, size, pad=None, interpolation=2):
super(RandomCrop, self).__init__()
numeric_types = (float, int, np.generic)
if isinstance(size, numeric_types):
size = (size, size)
self._args = (size, interpolation)
self.pad = ((pad, pad), (pad, pad), (0, 0)) if isinstance(pad, int) else pad
def forward(self, x):
if self.pad:
return image.random_crop(nd.array(
np.pad(x.asnumpy(), self.pad, mode='constant', constant_values=0)), *self._args)[0]
else:
return image.random_crop(x, *self._args)[0]
class RandomErasing(Block):
"""Randomly erasing the area in `src` between `s_min` and `s_max` with `probability`.
`ratio` controls the ratio between width and height.
`mean` means the value in erasing area.
Parameters
----------
probability : float
Probability of erasing.
s_min : float
Min area to all area.
s_max : float
Max area to all area.
ratio : float
The ratio between width and height.
mean : int or tuple of (R, G, B)
The value in erasing area.
Inputs:
- **data**: input tensor with (Hi x Wi x C) shape.
Outputs:
- **out**: output tensor with (Hi x Wi x C) shape.
"""
def __init__(self, probability=0.5, s_min=0.02, s_max=0.4, ratio=0.3,
mean=(125.31, 122.96, 113.86)):
super(RandomErasing, self).__init__()
self.probability = probability
self.mean = mean
self.s_min = s_min
self.s_max = s_max
self.ratio = ratio
def forward(self, x):
if not isinstance(self.probability, float):
raise TypeError('Got inappropriate size arg')
if not isinstance(self.s_min, float):
raise TypeError('Got inappropriate size arg')
if not isinstance(self.s_max, float):
raise TypeError('Got inappropriate size arg')
if not isinstance(self.ratio, float):
raise TypeError('Got inappropriate size arg')
if not isinstance(self.mean, (int, tuple)):
raise TypeError('Got inappropriate size arg')
if random.uniform(0, 1) > self.probability:
return x
width, height, _ = x.shape
area = width * height
target_area = random.uniform(self.s_min, self.s_max) * area
aspect_ratio = random.uniform(self.ratio, 1/self.ratio)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if w < width and h < height:
x1 = random.randint(0, width - w)
y1 = random.randint(0, height - h)
x[x1:x1+w, y1:y1+h, 0] = self.mean[0]
x[x1:x1+w, y1:y1+h, 1] = self.mean[1]
x[x1:x1+w, y1:y1+h, 2] = self.mean[2]
return x
| 2.125 | 2 |
explore.py | lribiere/explore-mit-bih-arrhythmia-db | 3 | 2193 | import plotly.graph_objects as go
import streamlit as st
import pandas as pd
from utils import *
import glob
import wfdb
import os
ANNOTATIONS_COL_NAME = 'annotations'
'''
# MIT-BIH Arrhythmia DB Exploration
'''
record_ids = [os.path.basename(file)[:-4] for file in glob.glob('data/*.dat')]
if len(record_ids) == 0:
st.write('Warning ! No data could be found under the ./data/ directory.',
'*\*.dat*, *\*.hea*, *\*.atr* files and such should be placed ',
'immediately under the ./data/ directory')
else:
record_ids.sort()
record_id = st.selectbox('Select a record id', record_ids)
record = wfdb.rdrecord(f'data/{record_id}')
annotation = wfdb.rdann(f'data/{record_id}', 'atr')
st.write('Signals found in this record :')
for idx, signal in enumerate(record.sig_name):
st.write(f'- `{signal}` : in {record.units[idx]}, with a frequency of '
f'{record.fs * record.samps_per_frame[idx]}hz')
st.write(f'Comments for this record : {record.comments}')
signals_df = pd.DataFrame(record.p_signal, columns=record.sig_name)
annot_serie = pd.Series(annotation.symbol, index=annotation.sample,
name=ANNOTATIONS_COL_NAME)
full_df = pd.concat([signals_df, annot_serie], axis=1)
''' ## Annotations '''
beat_annot_count = annot_serie.isin(dict(beat_annotations)).sum()
non_beat_annot_count = annot_serie.isin(dict(non_beat_annotations)).sum()
unique_annot = annot_serie.value_counts().index.values
st.write(f'This record contains `{annot_serie.size}` annotations '
f'among which `{beat_annot_count}` beat annotations and '
f'`{non_beat_annot_count}` non beat annotation(s).')
st.write('The annotations are the followings :')
for annot in unique_annot:
st.write(f'- `{annot}` : {annotation_definitions[annot]}')
st.write('More explanations on the annotations are available here : '
'https://archive.physionet.org/physiobank/annotations.shtml')
# Plot counts for each annotation
annot_counts_df = annot_serie \
.value_counts() \
.rename_axis(ANNOTATIONS_COL_NAME) \
.reset_index(name='counts')
bar_fig = go.Figure(data=[go.Bar(x=annot_counts_df[ANNOTATIONS_COL_NAME],
y=annot_counts_df['counts'],
text=annot_counts_df['counts'],
textposition='auto'
)])
bar_fig.update_layout(title='Annotations by count', yaxis_title='counts',
xaxis_title='annotations')
st.write(bar_fig)
''' ## Explore full dataset '''
signal = st.selectbox('Select a signal', record.sig_name)
# Plot signals and annotations
matching_rows_by_annot = {}
for annot in unique_annot:
matching_rows_by_annot[annot] = full_df[ANNOTATIONS_COL_NAME] == annot
fig = go.Figure(layout=go.Layout(title=go.layout.Title(
text='{} signal with annotations'.format(signal))))
fig.add_trace(go.Scatter(x=full_df.index.values,
y=full_df[signal],
mode='lines',
name=signal))
for annot, annot_matching_rows in matching_rows_by_annot.items():
fig.add_trace(go.Scatter(x=full_df.index[annot_matching_rows].values,
y=full_df[annot_matching_rows][signal].values,
mode='markers',
name='{} (annot)'.format(annot)))
st.plotly_chart(fig)
| 2.5625 | 3 |
release/stubs.min/System/__init___parts/CharEnumerator.py | tranconbv/ironpython-stubs | 0 | 2194 | class CharEnumerator(object):
""" Supports iterating over a System.String object and reading its individual characters. This class cannot be inherited. """
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return CharEnumerator()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
def Clone(self):
"""
Clone(self: CharEnumerator) -> object
Creates a copy of the current System.CharEnumerator object.
Returns: An System.Object that is a copy of the current System.CharEnumerator object.
"""
pass
def Dispose(self):
"""
Dispose(self: CharEnumerator)
Releases all resources used by the current instance of the System.CharEnumerator class.
"""
pass
def MoveNext(self):
"""
MoveNext(self: CharEnumerator) -> bool
Increments the internal index of the current System.CharEnumerator object to the next character of the enumerated string.
Returns: true if the index is successfully incremented and within the enumerated string; otherwise,false.
"""
pass
def next(self,*args):
""" next(self: object) -> object """
pass
def Reset(self):
"""
Reset(self: CharEnumerator)
Initializes the index to a position logically before the first character of the enumerated string.
"""
pass
def __contains__(self,*args):
""" __contains__[Char](enumerator: IEnumerator[Char],value: Char) -> bool """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self,*args):
""" __iter__(self: IEnumerator) -> object """
pass
def __reduce_ex__(self,*args):
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
Current=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the currently referenced character in the string enumerated by this System.CharEnumerator object.
Get: Current(self: CharEnumerator) -> Char
"""
| 3.125 | 3 |
src/home_automation_hub/config.py | levidavis/py-home | 0 | 2195 | <filename>src/home_automation_hub/config.py
from .config_store import ConfigStore
config = ConfigStore()
config.set_mqtt_broker("mqtt", 1883)
config.set_redis_config("redis", 6379, 0)
| 1.296875 | 1 |
Harpe-website/website/contrib/communication/utils.py | Krozark/Harpe-Website | 0 | 2196 | # -*- coding: utf-8 -*-
import socket as csocket
from struct import pack,unpack
from website.contrib.communication.models import *
def enum(**enums):
return type('Enum', (), enums)
class Socket:
Dommaine = enum(IP=csocket.AF_INET,LOCAL=csocket.AF_UNIX)
Type = enum(TCP=csocket.SOCK_STREAM, UDP=csocket.SOCK_DGRAM)
Down = enum(SEND=0,RECIVE=1,BOTH=2)
NTW_WELCOM_MSG = "hello!\0"
NTW_ERROR_NO = 0
def __init__ (self,dommaine,type,protocole=0):
self.sock = csocket.socket(dommaine,type,protocole)
self.buffer = b""
self.status = 0
def connect(self,host,port):
self.sock.connect((host,port))
def verify_connexion(self):
code = 404
if self.receive() > 0:
msg = self._unpack_str()
if msg == self.NTW_WELCOM_MSG and self.status == self.NTW_ERROR_NO:
print "verify_connexion <%d : %s>" % (self.status,msg)
else:
print "verify_connexion <%d : %s>" % (self.status,msg)
self.clear()
return self.status
def _unpack_str(self):
i = 0
while self.buffer[i]!= '\0':
i+=1
i+=1
res = self.buffer[:i]
self.buffer = self.buffer[i:]
return res
def send(self):
size = len(self.buffer)
_size = pack('!Ih',size,self.status)
data = _size + self.buffer
sent = self.sock.send(data)
if sent == 0:
print "Connexion lost"
return False
return True
def receive(self):
recv = b''
recv = self.sock.recv(6)
if recv == b'':
print "Connexion lost"
return None
size,self.status = unpack('!Ih',recv)
self.buffer = self.sock.recv(size)
return len(recv) + len(self.buffer)
#Format C Type Python type Standard size
#x pad byte no value
#c char string of length 1
#b signed char integer 1
#B unsigned char integer 1
#? _Bool bool 1
#h short integer 2
#H unsigned short integer 2
#i int integer 4
#I unsigned int integer 4
#l long integer 4
#L unsigned long integer 4
#q long long integer 8
#Q unsigned long long integer 8
#f float float 4
#d double float 8
#s char[] string
#p char[] string
#P void * integer
def add(self,typ,*args):
self.buffer +=pack('!'+typ,*args)
def clear(self):
self.buffer = b""
self.status = 0
def call(self,ret_type,func_id,types="",*args):
if len(types) < len(args):
print "Wrong number of args/type"
return 0
self.clear()
self.add("i",func_id)
if types:
self.add(types,*args)
self.send()
size = self.receive()
if size:
if self.status != 0:
print "recive error code : %d" % self.status
else:
return unpack("!"+ret_type,self.buffer)[0]
return 0
def create_socket():
sock = Socket(Socket.Dommaine.IP,Socket.Type.TCP)
ser = HarpeServer.objects.filter(is_active=True)[:1]
if not ser:
return False
ser = ser[0]
sock.connect(ser.ip,ser.port)
if sock.verify_connexion() != sock.NTW_ERROR_NO:
print "An error occur"
return None
return sock
def send_AnalyseMgf_to_calc(analyseMfg):
sock = create_socket()
if not sock:
return False
data = analyseMfg.mgf.read() + '\0'
return sock.call("i",HarpeServer.FUNCTION_ID.ANALYSE,"i%ds" % (analyseMfg.mgf.size+1) ,analyseMfg.pk,data)
| 2.78125 | 3 |
traitarm/reconstruction/visualize_recon.py | hzi-bifo/Model-T | 1 | 2197 | import pandas as pd
import ete2
from ete2 import faces, Tree, AttrFace, TreeStyle
import pylab
from matplotlib.colors import hex2color, rgb2hex, hsv_to_rgb, rgb_to_hsv
kelly_colors_hex = [
0xFFB300, # Vivid Yellow
0x803E75, # Strong Purple
0xFF6800, # Vivid Orange
0xA6BDD7, # Very Light Blue
0xC10020, # Vivid Red
0xCEA262, # Grayish Yellow
0x817066, # Medium Gray
# The following don't work well for people with defective color vision
0x007D34, # Vivid Green
0xF6768E, # Strong Purplish Pink
0x00538A, # Strong Blue
0xFF7A5C, # Strong Yellowish Pink
0x53377A, # Strong Violet
0xFF8E00, # Vivid Orange Yellow
0xB32851, # Strong Purplish Red
0xF4C800, # Vivid Greenish Yellow
0x7F180D, # Strong Reddish Brown
0x93AA00, # Vivid Yellowish Green
0x593315, # Deep Yellowish Brown
0xF13A13, # Vivid Reddish Orange
0x232C16, # Dark Olive Green
]
def my_layout(node):
if node.is_leaf():
# If terminal node, draws its name
name_face = AttrFace("name")
else:
# If internal node, draws label with smaller font size
name_face = AttrFace("name", fsize=10)
# Adds the name face to the image at the preferred position
faces.add_face_to_node(name_face, node, column=0, position="branch-right")
def adjust_kelly_brightness(hex_color, val, recon_min, recon_max):
"""set brightness according to change in continuous reconstruction value"""
h, s, v = rgb_to_hsv(hex2color('#{0:06X}'.format(hex_color)))
scale_factor = 1 - (recon_max - val) / (recon_max - recon_min)
v_new = v - (v * (scale_factor))
return rgb2hex(hsv_to_rgb(pd.np.array([h, s, v_new])))
def get_style():
ts = TreeStyle()
# Do not add leaf names automatically
ts.show_leaf_name = False
ts.show_scale = True
ts.force_topology = False
# Use my custom layout
ts.layout_fn = my_layout
return ts
def plot_tree(pt_tree, target_node, out):
#pt_tree, feats, pf2color = get_tree(phenotype = phenotype, feat_list = "top_cor", is_ml_plus_phypat = True, target_node = target_node)
pt_tree.dist = 0
target = pt_tree.search_nodes(name = target_node)[0]
target.render(out + '_tree.pdf', tree_style = get_style())
#target.render(out + '_tree.png', tree_style = get_style())
return target, feats, pf2color
def plot_legend(feats, out, pf2color, pf_desc = False, pf_acc = True, include_class = False):
fig = pylab.figure()
figlegend = pylab.figure(figsize = (9, 6))
ax = fig.add_subplot(111)
x = [0,1]
lines = [ax.plot(x, pd.np.ones(len(x)), 'o', color = "#%06x" % (pf2color[feats.index[i]]))[0] for i in range(len(pf2color))]
labels= [i for i in feats.index]
#labels= ["%s" %(feats.loc[:,"Pfam_acc"].iloc[i]) for i in range(feats.shape[0])]
#if include_class:
# labels= ["%s %s" %(labels[i], feats.loc[:, "class"].iloc[i]) for i in range(len(labels))]
#if pf_desc:
# labels = ["%s %s" % (labels[i], pf2short_desc.loc[feats.loc[:,"Pfam_acc"].iloc[i], 1]) for i in range(len(labels))]
#if pf_acc:
# labels = ["%s %s" % (labels[i], pf2acc.loc[feats.loc[:,"Pfam_acc"].iloc[i], 1]) for i in range(len(labels))]
figlegend.legend(lines, labels, markerscale = 2.5, numpoints = 1, frameon = False)
#fig.show()
fig.tight_layout()
figlegend.savefig(out + "_legend.svg")
figlegend.savefig(out + "_legend.png")
return figlegend
def get_tree(phenotype, tree, gain_recon, loss_recon, node_recon, pfam_mapping, feat_list, sample_mapping, threshold = 0.5, target_node = None, are_continuous_features_with_discrete_phenotype = False, max_feats = 10, miscl = None, node_annotation = None):
#read target feats
feats = pd.read_csv(feat_list, index_col = 0, sep = "\t")
pt_tree = ete2.Tree(tree, format = 1)
pt_tree.ladderize()
if not node_annotation is None:
node_table = pd.read_csv(node_annotation, sep = "\t", index_col = 0)
sample_mapping = pd.read_csv(sample_mapping, index_col = 0, sep = "\t")
#read node and edge reconstruction matrices
node_recon = pd.read_csv(node_recon, sep = "\t", index_col = 0)
gain_recon = pd.read_csv(gain_recon, sep = "\t", index_col = 0)
gain_recon.index = ["_".join(("_".join(i.split("_")[:-1]), i.split("_")[-1])) for i in gain_recon.index.values]
loss_recon = pd.read_csv(loss_recon, sep = "\t", index_col = 0)
loss_recon.index = ["_".join(("_".join(i.split("_")[:-1]), i.split("_")[-1])) for i in loss_recon.index.values]
#prune to target node
if target_node is not None:
pt_tree = pt_tree.search_nodes(name = target_node)[0]
node2name = dict((i.name, i.name) for i in pt_tree.traverse(strategy = 'preorder'))
pfams_with_event = set()
pfam2color = {}
#set the style of the branches and nodes according to the posterior probability
top10_feats = feats.iloc[:max_feats,]
#for visualization of continuous feature get the range of values for each feature
if are_continuous_features_with_discrete_phenotype:
recon_min = gain_recon.abs().apply(pd.np.min)
recon_max = gain_recon.abs().apply(pd.np.max)
if not miscl is None:
miscl_m = pd.read_csv(miscl, sep = "\t", index_col = 0)
for n in pt_tree.traverse():
#ignore the root
if n.name == "N1":
continue
if not node_annotation is None:
if n.name in node_table.index:
for attr,i in zip(node_table.columns, range(len(node_table.columns))):
value = node_table.loc[n.name, attr]
if not pd.isnull(value):
if value == 0:
rf = ete2.CircleFace(radius = 8, style = "circle", color = 'red')
elif value == 2:
rf = faces.CircleFace(radius = 8, style = "circle", color = 'orange')
else:
rf = faces.CircleFace(radius = 8, style = "circle", color = 'green')
else:
rf = faces.CircleFace(radius = 8, style = "circle", color = 'grey')
n.add_face(rf, column = i, position = "aligned")
ns = node_recon.loc[n.name, phenotype]
style = ete2.NodeStyle()
style["shape"] = 'square'
style['size'] = 10
if pd.isnull(ns):
style['fgcolor'] = 'grey'
elif ns < threshold:
style['fgcolor'] = 'darkred'
else:
style['fgcolor'] = 'green'
if not n.name == "N1":
branch_id = n.name + "_" + n.up.name
if gain_recon.loc[branch_id, phenotype] > threshold:
style["hz_line_type"] = 1
style["hz_line_color"] = 'green'
style["hz_line_width"] = 3
elif loss_recon.loc[branch_id, phenotype] > threshold:
style["hz_line_type"] = 1
style["hz_line_color"] = 'red'
style["hz_line_width"] = 3
else:
style["hz_line_type"] = 0
style["hz_line_color"] = 'black'
n.set_style(style)
#check if sample was misclassified and add misclassified label
if not miscl is None:
if node2name[n.name] in miscl_m.index:
tf = faces.TextFace("misclassified")
n.add_face(tf, column = 0, position = "branch-right")
#set species name instead of tax id
if n.name in sample_mapping.index:
node2name[n.name] = sample_mapping.loc[n.name,][0]
#add majority feature gains and losses
events = []
for i in range(top10_feats.shape[0]):
if not are_continuous_features_with_discrete_phenotype:
cf = faces.CircleFace(radius = 8, style = "circle", color = kelly_colors_hex[i])
#gain events
if gain_recon.loc[branch_id, top10_feats.index[i]] > threshold:
pfam2color[top10_feats.index[i]] = kelly_colors_hex[i]
tf = faces.TextFace("-")
events.append(tf)
pfams_with_event.add(node_recon.index[i])
events.append(cf)
#loss events
elif loss_recon.loc[branch_id, top10_feats.index[i]] > threshold:
pfam2color[top10_feats.index[i]] = kelly_colors_hex[i]
tf = faces.TextFace("-")
events.append(tf)
pfams_with_event.add(node_recon.index[i])
events.append(cf)
#continuous features
else:
adjusted_color = adjust_kelly_brightness(kelly_colors_hex[i], abs(loss_recon.loc[branch_id, top10_feats.index[i]]), recon_min.loc[top10_feats.index[i]], recon_max.loc[top10_feats.index[i]])
#tf = faces.TextFace(gain_recon.loc[branch_id, top10_feats.index[i]])
if loss_recon.loc[branch_id, top10_feats.index[i]] < 0:
tf = faces.TextFace("-")
else:
tf = faces.TextFace("+")
cf = faces.CircleFace(radius = 8, style = "circle", color = adjusted_color)
pfam2color[top10_feats.index[i]] = kelly_colors_hex[i]
pfams_with_event.add(node_recon.index[i])
events.append(cf)
events.append(tf)
for i in range(len(events)):
n.add_face(events[i], column = i, position = "branch-top")
for n in pt_tree.traverse():
if n.name in node2name:
n.name = node2name[n.name]
#filtered_pfams = filter(lambda i: i in list(pfams_with_event), top10_feats.loc[:,"Pfam_acc"].values)
#print filtered_pfams
#filtered_ids = pt_gt2id.loc[filtered_pfams, 0] - 1
#print filtered_ids
#top10_feats_with_event = top10_feats.loc[filtered_ids,]
#process node annotation
return pt_tree, top10_feats, pfam2color
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("""visualize target list of features""")
parser.add_argument("node_recon", help = "node ancestral character state reconstruction")
parser.add_argument("gain_recon", help = "gain events ancestral character state reconstruction")
parser.add_argument("loss_recon", help = "loss events ancestral character state reconstruction")
parser.add_argument("tree", help = "tree with internal nodes labeled")
parser.add_argument("pfam_mapping", help = "feature mapping/list")
parser.add_argument("feat_list", help = "list of features")
parser.add_argument("--target_node", default = "N1", help = "list of features")
parser.add_argument("phenotype", help = "target phenotype")
parser.add_argument("--are_continuous_features_with_discrete_phenotype", action = 'store_true', help = "set if using continuous features with a discrete phenotype")
parser.add_argument("threshold", type = float, help = "threshold to call genotype/phenotype events")
parser.add_argument("sample_mapping", help = "mapping between sample ids and names")
parser.add_argument("out", help = "output file")
parser.add_argument("--max_feats", type = int, default = 10, help = "visualize at most max_feats features")
parser.add_argument("--miscl", help = "table of misclassified samples")
parser.add_argument("--node_annotation", help = "table of binary features for labeling the nodes")
a = parser.parse_args()
pt_tree, feats, pf2color = get_tree(node_recon = a.node_recon, gain_recon = a.gain_recon, loss_recon = a.loss_recon, pfam_mapping = a.pfam_mapping, tree = a.tree, feat_list = a.feat_list, phenotype = a.phenotype, target_node = a.target_node, threshold = a.threshold, sample_mapping = a.sample_mapping, are_continuous_features_with_discrete_phenotype = a.are_continuous_features_with_discrete_phenotype, max_feats = a.max_feats, miscl = a.miscl, node_annotation = a.node_annotation)
plot_tree(pt_tree, a.target_node, a.out)
plot_legend(feats, a.out, pf2color)
| 2.4375 | 2 |
scripts/misc/operator_condition_number_scipy.py | volpatto/firedrake_scripts | 5 | 2198 | import attr
from firedrake import *
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from scipy.linalg import svd
from scipy.sparse.linalg import svds
from scipy.sparse import csr_matrix
from slepc4py import SLEPc
import pandas as pd
from tqdm import tqdm
import os
matplotlib.use('Agg')
@attr.s
class ConditionNumberResult(object):
form = attr.ib()
assembled_form = attr.ib()
condition_number = attr.ib()
sparse_operator = attr.ib()
number_of_dofs = attr.ib()
nnz = attr.ib()
is_operator_symmetric = attr.ib()
bcs = attr.ib(default=list())
def plot_matrix(assembled_form, **kwargs):
"""Provides a plot of a matrix."""
fig, ax = plt.subplots(1, 1)
petsc_mat = assembled_form.M.handle
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
Mnp = Mnp.toarray()
# Eliminate rows and columns filled with zero entries
Mnp = Mnp[~(Mnp==0).all(1)]
idx = np.argwhere(np.all(Mnp[..., :] == 0, axis=0))
Mnp = np.delete(Mnp, idx, axis=1)
Am = np.ma.masked_values(Mnp, 0, rtol=1e-13)
# Plot the matrix
plot = ax.matshow(Am, **kwargs)
# Remove axis ticks and values
ax.tick_params(length=0)
ax.set_xticklabels([])
ax.set_yticklabels([])
return plot
def plot_matrix_mixed(assembled_form, **kwargs):
"""Provides a plot of a mixed matrix."""
fig, ax = plt.subplots(1, 1)
petsc_mat = assembled_form.M.handle
f0_size = assembled_form.M[0, 0].handle.getSize()
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
Mnp = Mnp.toarray()
# Eliminate rows and columns filled with zero entries
Mnp = Mnp[~(Mnp==0).all(1)]
idx = np.argwhere(np.all(Mnp[..., :] == 0, axis=0))
Mnp = np.delete(Mnp, idx, axis=1)
Am = np.ma.masked_values(Mnp, 0, rtol=1e-13)
# Plot the matrix
plot = ax.matshow(Am, **kwargs)
# Remove axis ticks and values
ax.tick_params(length=0)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.axhline(y=f0_size[0] - 0.5, color="k")
ax.axvline(x=f0_size[0] - 0.5, color="k")
return plot
def plot_matrix_primal_hybrid_full(a_form, bcs=[], **kwargs):
"""Provides a plot of a full hybrid-mixed matrix."""
fig, ax = plt.subplots(1, 1)
assembled_form = assemble(a_form, bcs=bcs, mat_type="aij")
petsc_mat = assembled_form.M.handle
f0_size = assembled_form.M[0, 0].handle.getSize()
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
Mnp = Mnp.toarray()
# Eliminate rows and columns filled with zero entries
Mnp = Mnp[~(Mnp==0).all(1)]
idx = np.argwhere(np.all(Mnp[..., :] == 0, axis=0))
Mnp = np.delete(Mnp, idx, axis=1)
Am = np.ma.masked_values(Mnp, 0, rtol=1e-13)
# Plot the matrix
plot = ax.matshow(Am, **kwargs)
# Remove axis ticks and values
ax.tick_params(length=0)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.axhline(y=f0_size[0] - 0.5, color="k")
ax.axvline(x=f0_size[0] - 0.5, color="k")
return plot
def plot_matrix_mixed_hybrid_full(a_form, bcs=[], **kwargs):
"""Provides a plot of a full hybrid-mixed matrix."""
fig, ax = plt.subplots(1, 1)
assembled_form = assemble(a_form, bcs=bcs, mat_type="aij")
petsc_mat = assembled_form.M.handle
f0_size = assembled_form.M[0, 0].handle.getSize()
f1_size = assembled_form.M[1, 1].handle.getSize()
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
Mnp = Mnp.toarray()
# Eliminate rows and columns filled with zero entries
Mnp = Mnp[~(Mnp==0).all(1)]
idx = np.argwhere(np.all(Mnp[..., :] == 0, axis=0))
Mnp = np.delete(Mnp, idx, axis=1)
Am = np.ma.masked_values(Mnp, 0, rtol=1e-13)
# Plot the matrix
plot = ax.matshow(Am, **kwargs)
# Remove axis ticks and values
ax.tick_params(length=0)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.axhline(y=f0_size[0] - 0.5, color="k")
ax.axvline(x=f0_size[0] - 0.5, color="k")
ax.axhline(y=f0_size[0] + f1_size[0] - 0.5, color="k")
ax.axvline(x=f0_size[0] + f1_size[0] - 0.5, color="k")
return plot
def plot_matrix_hybrid_multiplier(a_form, trace_index=2, bcs=[], **kwargs):
"""Provides a plot of a condensed hybrid-mixed matrix for single scale problems."""
fig, ax = plt.subplots(1, 1)
_A = Tensor(a_form)
A = _A.blocks
idx = trace_index
S = A[idx, idx] - A[idx, :idx] * A[:idx, :idx].inv * A[:idx, idx]
Smat = assemble(S, bcs=bcs)
petsc_mat = Smat.M.handle
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
Mnp = Mnp.toarray()
# Eliminate rows and columns filled with zero entries
Mnp = Mnp[~(Mnp==0).all(1)]
idx = np.argwhere(np.all(Mnp[..., :] == 0, axis=0))
Mnp = np.delete(Mnp, idx, axis=1)
Am = np.ma.masked_values(Mnp, 0, rtol=1e-13)
# Plot the matrix
plot = ax.matshow(Am, **kwargs)
# Below there is the spy alternative
# plot = plt.spy(Am, **kwargs)
# Remove axis ticks and values
ax.tick_params(length=0)
ax.set_xticklabels([])
ax.set_yticklabels([])
return plot
def filter_real_part_in_array(array: np.ndarray, imag_threshold: float = 1e-5) -> np.ndarray:
"""Utility function to filter real part in a numpy array.
:param array:
Array with real and complex numbers.
:param imag_threshold:
Threshold to cut off imaginary part in complex number.
:return:
Filtered array with only real numbers.
"""
real_part_array = array.real[abs(array.imag) < 1e-5]
return real_part_array
def calculate_condition_number(
A,
num_of_factors,
backend: str = "scipy",
use_sparse: bool = False,
zero_tol: float = 1e-5
):
backend = backend.lower()
if backend == "scipy":
size = A.getSize()
Mnp = csr_matrix(A.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
if use_sparse:
singular_values = svds(
A=Mnp,
k=num_of_factors,
which="LM",
maxiter=5000,
return_singular_vectors=False,
solver="lobpcg"
)
else:
M = Mnp.toarray()
singular_values = svd(M, compute_uv=False, check_finite=False)
singular_values = singular_values[singular_values > zero_tol]
condition_number = singular_values.max() / singular_values.min()
elif backend == "slepc":
S = SLEPc.SVD()
S.create()
S.setOperator(A)
S.setType(SLEPc.SVD.Type.LAPACK)
S.setDimensions(nsv=num_of_factors)
S.setTolerances(max_it=5000)
S.setWhichSingularTriplets(SLEPc.SVD.Which.LARGEST)
S.solve()
num_converged_values = S.getConverged()
singular_values_list = list()
if num_converged_values > 0:
for i in range(num_converged_values):
singular_value = S.getValue(i)
singular_values_list.append(singular_value)
else:
raise RuntimeError("SLEPc SVD has not converged.")
singular_values = np.array(singular_values_list)
singular_values = singular_values[singular_values > zero_tol]
condition_number = singular_values.max() / singular_values.min()
else:
raise NotImplementedError("The required method for condition number estimation is currently unavailable.")
return condition_number
def solve_poisson_cg(mesh, degree=1, use_quads=False):
# Function space declaration
V = FunctionSpace(mesh, "CG", degree)
# Trial and test functions
u = TrialFunction(V)
v = TestFunction(V)
# Dirichlet BCs
bcs = DirichletBC(V, 0.0, "on_boundary")
# Variational form
a = inner(grad(u), grad(v)) * dx
A = assemble(a, bcs=bcs, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = V.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_ls(mesh, degree=1):
# Function space declaration
pressure_family = 'CG'
velocity_family = 'CG'
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
W = U * V
# Trial and test functions
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
# Mesh entities
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Dirichlet BCs
bcs = DirichletBC(W[0], sigma_e, "on_boundary")
# Stabilization parameters
delta_1 = Constant(1)
delta_2 = Constant(1)
delta_3 = Constant(1)
# Least-squares terms
a = delta_1 * inner(u + grad(p), v + grad(q)) * dx
a += delta_2 * div(u) * div(v) * dx
a += delta_3 * inner(curl(u), curl(v)) * dx
A = assemble(a, bcs=bcs, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = W.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_cgls(mesh, degree=1):
# Function space declaration
pressure_family = 'CG'
velocity_family = 'CG'
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
W = U * V
# Trial and test functions
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Dirichlet BCs
bcs = DirichletBC(W[0], sigma_e, "on_boundary")
# Mixed classical terms
a = (dot(u, v) - div(v) * p - q * div(u)) * dx
# Stabilizing terms
a += -0.5 * inner((u + grad(p)), v + grad(q)) * dx
# a += 0.5 * h * h * div(u) * div(v) * dx
# a += 0.5 * h * h * inner(curl(u), curl(v)) * dx
# L += 0.5 * h * h * f * div(v) * dx
a += 0.5 * div(u) * div(v) * dx
a += 0.5 * inner(curl(u), curl(v)) * dx
A = assemble(a, bcs=bcs, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = W.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_vms(mesh, degree=1):
# Function space declaration
pressure_family = 'CG'
velocity_family = 'CG'
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
W = U * V
# Trial and test functions
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Dirichlet BCs
bcs = DirichletBC(W[0], sigma_e, "on_boundary")
# Mixed classical terms
a = (dot(u, v) - div(v) * p + q * div(u)) * dx
# Stabilizing terms
a += 0.5 * inner(u + grad(p), grad(q) - v) * dx
# a += 0.5 * h * h * div(u) * div(v) * dx
# a += 0.5 * h * h * inner(curl(u), curl(v)) * dx
# L += 0.5 * h * h * f * div(v) * dx
# a += 0.5 * div(u) * div(v) * dx
# a += 0.5 * inner(curl(u), curl(v)) * dx
# L += 0.5 * f * div(v) * dx
A = assemble(a, bcs=bcs, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = W.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_mixed_RT(mesh, degree=1):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
if use_quads:
hdiv_family = 'RTCF'
pressure_family = 'DQ'
else:
hdiv_family = 'RT'
pressure_family = 'DG'
U = FunctionSpace(mesh, hdiv_family, degree + 1)
V = FunctionSpace(mesh, pressure_family, degree)
W = U * V
# Trial and test functions
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
# Mesh entities
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Dirichlet BCs
bcs = DirichletBC(W[0], sigma_e, "on_boundary")
# Mixed classical terms
a = (dot(u, v) - div(v) * p + q * div(u)) * dx
A = assemble(a, bcs=bcs, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = W.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_dgls(mesh, degree=1):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
velocity_family = 'DQ' if use_quads else 'DG'
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
W = U * V
# Trial and test functions
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Dirichlet BCs
# bcs = DirichletBC(W[0], sigma_e, "on_boundary", method="geometric")
# Average cell size and mesh dependent stabilization
h_avg = (h("+") + h("-")) / 2.0
# Jump stabilizing parameters based on Badia-Codina stabilized dG method
L0 = 1
eta_p = L0 * h # method B in the Badia-Codina paper
# eta_p = 1
# eta_p = L0 * L0 # method D in the Badia-Codina paper
eta_u = h / L0 # method B in the Badia-Codina paper
# eta_u = 1
# Nitsche's penalizing term
beta_0 = Constant(1.0)
beta = beta_0 / h
# Mixed classical terms
a = (dot(u, v) - div(v) * p - q * div(u)) * dx
# DG terms
a += jump(v, n) * avg(p) * dS - avg(q) * jump(u, n) * dS
# Edge stabilizing terms
# ** Badia-Codina based
a += (avg(eta_p) / h_avg) * (jump(u, n) * jump(v, n)) * dS
a += (avg(eta_u) / h_avg) * dot(jump(p, n), jump(q, n)) * dS
# ** Mesh independent terms
# a += jump(u, n) * jump(v, n) * dS
# a += dot(jump(p, n), jump(q, n)) * dS
# Volumetric stabilizing terms
# a += 0.5 * h * h * div(u) * div(v) * dx
# a += 0.5 * h * h * inner(curl(u), curl(v)) * dx
# L += 0.5 * h * h * f * div(v) * dx
# a += -0.5 * inner(u + grad(p), v + grad(q)) * dx
# a += 0.5 * div(u) * div(v) * dx
# a += 0.5 * inner(curl(u), curl(v)) * dx
# ** Badia-Codina based
a += -eta_u * inner(u + grad(p), v + grad(q)) * dx
a += eta_p * div(u) * div(v) * dx
a += eta_p * inner(curl(u), curl(v)) * dx
# Weakly imposed boundary conditions
a += dot(v, n) * p * ds - q * dot(u, n) * ds
a += beta * p * q * ds # may decrease convergente rates
# ** The terms below are based on ASGS Badia-Codina (2010), it is not a classical Nitsche's method
a += (eta_p / h) * dot(u, n) * dot(v, n) * ds
a += (eta_u / h) * dot(p * n, q * n) * ds
A = assemble(a, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = W.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_dvms(mesh, degree=1):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
velocity_family = 'DQ' if use_quads else 'DG'
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
W = U * V
# Trial and test functions
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Dirichlet BCs
# bcs = DirichletBC(W[0], sigma_e, "on_boundary", method="geometric")
# Average cell size and mesh dependent stabilization
h_avg = (h("+") + h("-")) / 2.0
# Jump stabilizing parameters based on Badia-Codina stabilized dG method
L0 = 1
eta_p = L0 * h # method B in the Badia-Codina paper
# eta_p = L0 * L0 # method D in the Badia-Codina paper
eta_u = h / L0 # method B in the Badia-Codina paper
# Mixed classical terms
a = (dot(u, v) - div(v) * p + q * div(u)) * dx
# DG terms
a += jump(v, n) * avg(p) * dS - avg(q) * jump(u, n) * dS
# Edge stabilizing terms
# ** Badia-Codina based
a += (avg(eta_p) / h_avg) * (jump(u, n) * jump(v, n)) * dS
a += (avg(eta_u) / h_avg) * dot(jump(p, n), jump(q, n)) * dS
# ** Mesh independent (original)
# a += jump(u, n) * jump(v, n) * dS # not considered in the original paper
# a += dot(jump(p, n), jump(q, n)) * dS
# Volumetric stabilizing terms
# a += 0.5 * inner(u + grad(p), grad(q) - v) * dx
# a += 0.5 * h * h * div(u) * div(v) * dx
# a += 0.5 * h * h * inner(curl(u), curl(v)) * dx
# L += 0.5 * h * h * f * div(v) * dx
# a += 0.5 * div(u) * div(v) * dx
# a += 0.5 * inner(curl(u), curl(v)) * dx
# L += 0.5 * f * div(v) * dx
# ** Badia-Codina based
a += eta_u * inner(u + grad(p), grad(q) - v) * dx
a += eta_p * div(u) * div(v) * dx
# Weakly imposed boundary conditions
a += dot(v, n) * p * ds - q * dot(u, n) * ds
# ** The terms below are based on ASGS Badia-Codina (2010), it is not a classical Nitsche's method
a += (eta_p / h) * dot(u, n) * dot(v, n) * ds
a += (eta_u / h) * dot(p * n, q * n) * ds # may decrease convergente rates
# ** Classical Nitsche
# a += beta * p * q * ds # may decrease convergente rates (Nitsche)
A = assemble(a, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = W.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_sipg(mesh, degree=1):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
V = FunctionSpace(mesh, pressure_family, degree)
# Trial and test functions
p = TrialFunction(V)
q = TestFunction(V)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
# Forcing function
f_expression = div(-grad(p_exact))
f = Function(V).interpolate(f_expression)
# Edge stabilizing parameter
beta0 = Constant(1e1)
beta = beta0 / h
# Symmetry term. Choose if the method is SIPG (-1) or NIPG (1)
s = Constant(-1)
# Classical volumetric terms
a = inner(grad(p), grad(q)) * dx
L = f * q * dx
# DG edge terms
a += s * dot(jump(p, n), avg(grad(q))) * dS - dot(avg(grad(p)), jump(q, n)) * dS
# Edge stabilizing terms
a += beta("+") * dot(jump(p, n), jump(q, n)) * dS
# Weak boundary conditions
a += s * dot(p * n, grad(q)) * ds - dot(grad(p), q * n) * ds
a += beta * p * q * ds
A = assemble(a, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = V.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_dls(mesh, degree=1):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
velocity_family = 'DQ' if use_quads else 'DG'
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
W = U * V
# Trial and test functions
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Dirichlet BCs
# bcs = DirichletBC(W[0], sigma_e, "on_boundary", method="geometric")
# Average cell size and mesh dependent stabilization
h_avg = (h("+") + h("-")) / 2.0
# Jump stabilizing parameters based on Badia-Codina stabilized dG method
# L0 = 1
# eta_p = L0 * h_avg # method B in the Badia-Codina paper
eta_p = 1
# eta_p = L0 * L0 # method D in the Badia-Codina paper
# eta_u = h_avg / L0 # method B in the Badia-Codina paper
eta_u = 1
# eta_u_bc = h / L0 # method B in the Badia-Codina paper
eta_u_bc = 1
# Least-Squares weights
delta = Constant(1.0)
# delta = h
delta_0 = delta
delta_1 = delta
delta_2 = delta
delta_3 = 1 / h
delta_4 = 1 / h
# Least-squares terms
a = delta_0 * inner(u + grad(p), v + grad(q)) * dx
a += delta_1 * div(u) * div(v) * dx
a += delta_2 * inner(curl(u), curl(v)) * dx
# Edge stabilizing terms
# ** Badia-Codina based (better results) **
a += eta_u * avg(delta_3) * (jump(u, n) * jump(v, n)) * dS
a += eta_p * avg(delta_4) * dot(jump(p, n), jump(q, n)) * dS
a += eta_u_bc * delta_3 * p * q * ds # may decrease convergente rates
a += eta_u_bc * delta_4 * dot(u, n) * dot(v, n) * ds
# ** Mesh independent **
# a += jump(u, n) * jump(v, n) * dS
# a += dot(jump(p, n), jump(q, n)) * dS
# a += p * q * ds
A = assemble(a, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-12)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = W.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_sdhm(
mesh,
degree=1,
is_multiplier_continuous=False
):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
velocity_family = 'DQ' if use_quads else 'DG'
trace_family = "HDiv Trace"
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
if is_multiplier_continuous:
LagrangeElement = FiniteElement("Lagrange", mesh.ufl_cell(), degree)
C0TraceElement = LagrangeElement["facet"]
T = FunctionSpace(mesh, C0TraceElement)
else:
T = FunctionSpace(mesh, trace_family, degree)
W = U * V * T
# Trial and test functions
# solution = Function(W)
# u, p, lambda_h = split(solution)
u, p, lambda_h = TrialFunctions(W)
v, q, mu_h = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Forcing function
f_expression = div(-grad(p_exact))
f = Function(V).interpolate(f_expression)
# BCs
u_projected = sigma_e
p_boundaries = p_exact
bcs = DirichletBC(W.sub(2), p_exact, "on_boundary")
# Hybridization parameter
beta_0 = Constant(1.0e-18)
# beta = beta_0 / h
beta = beta_0
# Stabilization parameters
delta_0 = Constant(-1)
delta_1 = Constant(-0.5) * h * h
delta_2 = Constant(0.5) * h * h
delta_3 = Constant(0.5) * h * h
# Mixed classical terms
a = (dot(u, v) - div(v) * p + delta_0 * q * div(u)) * dx
L = delta_0 * f * q * dx
# Stabilizing terms
a += delta_1 * inner(u + grad(p), v + grad(q)) * dx
a += delta_2 * div(u) * div(v) * dx
a += delta_3 * inner(curl(u), curl(v)) * dx
L += delta_2 * f * div(v) * dx
# Hybridization terms
a += lambda_h("+") * dot(v, n)("+") * dS + mu_h("+") * dot(u, n)("+") * dS
a += beta("+") * (lambda_h("+") - p("+")) * (mu_h("+") - q("+")) * dS
# Weakly imposed BC
a += (p_boundaries * dot(v, n) + mu_h * (dot(u, n) - dot(u_projected, n))) * ds
a += beta * (lambda_h - p_boundaries) * mu_h * ds
F = a - L
a_form = lhs(F)
_A = Tensor(a_form)
A = _A.blocks
S = A[2, 2] - A[2, :2] * A[:2, :2].inv * A[:2, 2]
Smat = assemble(S, bcs=bcs)
petsc_mat = Smat.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = Mnp.shape[0]
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=Smat,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric,
bcs=bcs
)
return result
def solve_poisson_hdg(
mesh,
degree=1,
is_multiplier_continuous=False
):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
velocity_family = 'DQ' if use_quads else 'DG'
trace_family = "HDiv Trace"
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
if is_multiplier_continuous:
LagrangeElement = FiniteElement("Lagrange", mesh.ufl_cell(), degree)
C0TraceElement = LagrangeElement["facet"]
T = FunctionSpace(mesh, C0TraceElement)
else:
T = FunctionSpace(mesh, trace_family, degree)
W = U * V * T
# Trial and test functions
# solution = Function(W)
# u, p, lambda_h = split(solution)
u, p, lambda_h = TrialFunctions(W)
v, q, mu_h = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Forcing function
f_expression = div(-grad(p_exact))
f = Function(V).interpolate(f_expression)
# Dirichlet BCs
bc_multiplier = DirichletBC(W.sub(2), p_exact, "on_boundary")
# Hybridization parameter
beta_0 = Constant(1.0e0)
beta = beta_0 / h
# beta = beta_0
# Numerical flux trace
u_hat = u + beta * (p - lambda_h) * n
# HDG classical form
a = (dot(u, v) - div(v) * p) * dx + lambda_h("+") * jump(v, n) * dS
a += -dot(u, grad(q)) * dx + jump(u_hat, n) * q("+") * dS
L = f * q * dx
# Transmission condition
a += jump(u_hat, n) * mu_h("+") * dS
# Weakly imposed BC
a += lambda_h * dot(v, n) * ds
a += dot(u_hat, n) * q * ds
F = a - L
a_form = lhs(F)
_A = Tensor(a_form)
A = _A.blocks
S = A[2, 2] - A[2, :2] * A[:2, :2].inv * A[:2, 2]
Smat = assemble(S, bcs=bc_multiplier)
petsc_mat = Smat.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = Mnp.shape[0]
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=Smat,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric,
bcs=bc_multiplier
)
return result
def solve_poisson_cgh(
mesh,
degree=1,
is_multiplier_continuous=False
):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
trace_family = "HDiv Trace"
V = FunctionSpace(mesh, pressure_family, degree)
if is_multiplier_continuous:
LagrangeElement = FiniteElement("Lagrange", mesh.ufl_cell(), degree)
C0TraceElement = LagrangeElement["facet"]
T = FunctionSpace(mesh, C0TraceElement)
else:
T = FunctionSpace(mesh, trace_family, degree)
W = V * T
# Trial and test functions
# solution = Function(W)
# u, p, lambda_h = split(solution)
p, lambda_h = TrialFunctions(W)
q, mu_h = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
# Forcing function
f_expression = div(-grad(p_exact))
f = Function(V).interpolate(f_expression)
# Dirichlet BCs
bc_multiplier = DirichletBC(W.sub(1), p_exact, "on_boundary")
# Hybridization parameter
beta_0 = Constant(1.0e0)
beta = beta_0 / h
# beta = beta_0
# Numerical flux trace
u = -grad(p)
u_hat = u + beta * (p - lambda_h) * n
# HDG classical form
a = -dot(u, grad(q)) * dx + jump(u_hat, n) * q("+") * dS
L = f * q * dx
# Transmission condition
a += jump(u_hat, n) * mu_h("+") * dS
# Weakly imposed BC
a += dot(u_hat, n) * q * ds
F = a - L
a_form = lhs(F)
_A = Tensor(a_form)
A = _A.blocks
S = A[1, 1] - A[1, :1] * A[:1, :1].inv * A[:1, 1]
Smat = assemble(S, bcs=bc_multiplier)
petsc_mat = Smat.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = Mnp.shape[0]
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=Smat,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric,
bcs=bc_multiplier
)
return result
def solve_poisson_ldgc(
mesh,
degree=1,
is_multiplier_continuous=True
):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
primal_family = "DQ" if use_quads else "DG"
V = FunctionSpace(mesh, primal_family, degree)
if is_multiplier_continuous:
LagrangeElement = FiniteElement("Lagrange", mesh.ufl_cell(), degree)
C0TraceElement = LagrangeElement["facet"]
T = FunctionSpace(mesh, C0TraceElement)
else:
trace_family = "HDiv Trace"
T = FunctionSpace(mesh, trace_family, degree)
W = V * T
# Trial and test functions
# solution = Function(W)
# u, p, lambda_h = split(solution)
p, lambda_h = TrialFunctions(W)
q, mu_h = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
# Forcing function
f_expression = div(-grad(p_exact))
f = Function(V).interpolate(f_expression)
# Dirichlet BCs
p_boundaries = Constant(0.0)
bc_multiplier = DirichletBC(W.sub(1), p_exact, "on_boundary")
# Hybridization parameter
s = Constant(-1.0)
beta = Constant(32.0)
h = CellDiameter(mesh)
h_avg = avg(h)
# Classical term
a = dot(grad(p), grad(q)) * dx
L = f * q * dx
# Hybridization terms
a += s * dot(grad(q), n)("+") * (p("+") - lambda_h("+")) * dS
a += -dot(grad(p), n)("+") * (q("+") - mu_h("+")) * dS
a += (beta / h_avg) * (p("+") - lambda_h("+")) * (q("+") - mu_h("+")) * dS
# Boundary terms
# a += -dot(vel_projected, n) * v * ds # How to set this bc??
# a += (beta / h) * (p- p_boundaries) * q * ds # is this necessary?
L += s * dot(grad(q), n) * p_boundaries * ds
F = a - L
a_form = lhs(F)
_A = Tensor(a_form)
A = _A.blocks
S = A[1, 1] - A[1, :1] * A[:1, :1].inv * A[:1, 1]
Smat = assemble(S, bcs=bc_multiplier)
petsc_mat = Smat.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = Mnp.shape[0]
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=Smat,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric,
bcs=bc_multiplier
)
return result
def solve_poisson_lsh(
mesh,
degree=1,
is_multiplier_continuous=False
):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
velocity_family = 'DQ' if use_quads else 'DG'
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
if is_multiplier_continuous:
LagrangeElement = FiniteElement("Lagrange", mesh.ufl_cell(), degree)
C0TraceElement = LagrangeElement["facet"]
T = FunctionSpace(mesh, C0TraceElement)
else:
trace_family = "HDiv Trace"
T = FunctionSpace(mesh, trace_family, degree)
W = U * V * T
# Trial and test functions
# solution = Function(W)
# u, p, lambda_h = split(solution)
u, p, lambda_h = TrialFunctions(W)
v, q, mu_h = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# BCs
bcs = DirichletBC(W.sub(2), p_exact, "on_boundary")
# Hybridization parameter
beta_0 = Constant(1.0)
beta = beta_0 / h
beta_avg = beta_0 / h("+")
# Stabilizing parameter
# delta_0 = Constant(1)
# delta_1 = Constant(1)
# delta_2 = Constant(1)
# delta_3 = Constant(1)
# delta_4 = Constant(1)
# delta_5 = Constant(1)
# LARGE_NUMBER = Constant(1e0)
delta = h * h
# delta = Constant(1)
# delta = h
delta_0 = delta
delta_1 = delta
delta_2 = delta
delta_3 = delta
delta_4 = delta
# delta_4 = LARGE_NUMBER / h
delta_5 = delta
# Numerical flux trace
u_hat = u + beta * (p - lambda_h) * n
v_hat = v + beta * (q - mu_h) * n
# Flux least-squares
# a = (
# (inner(u, v) - q * div(u) - p * div(v) + inner(grad(p), grad(q)))
# * delta_1
# * dx
# )
# # These terms below are unsymmetric
# a += delta_1 * jump(u_hat, n=n) * q("+") * dS
# a += delta_1("+") * dot(u_hat, n) * q * ds
# # a += delta_1 * dot(u, n) * q * ds
# # L = -delta_1 * dot(u_projected, n) * q * ds
# a += delta_1("+") * lambda_h("+") * jump(v, n=n) * dS
# a += delta_1 * lambda_h * dot(v, n) * ds
# # L = delta_1 * p_exact * dot(v, n) * ds
# Flux Least-squares as in DG
a = delta_0 * inner(u + grad(p), v + grad(q)) * dx
# Classical mixed Darcy eq. first-order terms as stabilizing terms
a += delta_1 * (dot(u, v) - div(v) * p) * dx
a += delta_1("+") * lambda_h("+") * jump(v, n=n) * dS
a += delta_1 * lambda_h * dot(v, n) * ds
# Mass balance least-square
a += delta_2 * div(u) * div(v) * dx
# L = delta_2 * f * div(v) * dx
# Irrotational least-squares
a += delta_3 * inner(curl(u), curl(v)) * dx
# Hybridization terms
a += mu_h("+") * jump(u_hat, n=n) * dS
a += delta_4("+") * (p("+") - lambda_h("+")) * (q("+") - mu_h("+")) * dS
# a += delta_4 * (p - lambda_h) * (q - mu_h) * ds
# a += delta_5 * (dot(u, n)("+") - dot(u_hat, n)("+")) * (dot(v, n)("+") - dot(v_hat, n)("+")) * dS
# a += delta_5 * (dot(u, n) - dot(u_hat, n)) * (dot(v, n) - dot(v_hat, n)) * ds
# Weakly imposed BC from hybridization
# a += mu_h * (lambda_h - p_boundaries) * ds
# a += mu_h * lambda_h * ds
# ###
# a += (
# (mu_h - q) * (lambda_h - p_boundaries) * ds
# ) # maybe this is not a good way to impose BC, but this necessary
_A = Tensor(a)
A = _A.blocks
S = A[2, 2] - A[2, :2] * A[:2, :2].inv * A[:2, 2]
Smat = assemble(S, bcs=bcs)
petsc_mat = Smat.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = Mnp.shape[0]
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=Smat,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric,
bcs=bcs
)
return result
def hp_refinement_cond_number_calculation(
solver,
min_degree=1,
max_degree=4,
numel_xy=(5, 10, 15, 20, 25),
quadrilateral=True,
name="",
**kwargs
):
results_dict = {
"Element": list(),
"Number of Elements": list(),
"Degree": list(),
"Symmetric": list(),
"nnz": list(),
"dofs": list(),
"h": list(),
"Condition Number": list(),
}
element_kind = "Quad" if quadrilateral else "Tri"
pbar = tqdm(range(min_degree, max_degree))
for degree in pbar:
for n in numel_xy:
pbar.set_description(f"Processing {name} - degree = {degree} - N = {n}")
mesh = UnitSquareMesh(n, n, quadrilateral=quadrilateral)
result = solver(mesh, degree=degree)
current_cell_size = mesh.cell_sizes.dat.data_ro.min() if not quadrilateral else 1 / n
results_dict["Element"].append(element_kind)
results_dict["Number of Elements"].append(n * n)
results_dict["Degree"].append(degree)
results_dict["Symmetric"].append(result.is_operator_symmetric)
results_dict["nnz"].append(result.nnz)
results_dict["dofs"].append(result.number_of_dofs)
results_dict["h"].append(current_cell_size)
results_dict["Condition Number"].append(result.condition_number)
os.makedirs("./cond_number_results/results_%s" % name, exist_ok=True)
df_cond_number = pd.DataFrame(data=results_dict)
path_to_save_results = "./cond_number_results/results_%s/cond_numbers.csv" % name
df_cond_number.to_csv(path_to_save_results)
return df_cond_number
# Solver options
solvers_options = {
# "cg": solve_poisson_cg,
# "cgls": solve_poisson_cgls,
# "dgls": solve_poisson_dgls,
# "sdhm": solve_poisson_sdhm,
# "ls": solve_poisson_ls,
# "dls": solve_poisson_dls,
"lsh": solve_poisson_lsh,
# "vms": solve_poisson_vms,
# "dvms": solve_poisson_dvms,
# "mixed_RT": solve_poisson_mixed_RT,
# "hdg": solve_poisson_hdg,
# "cgh": solve_poisson_cgh,
# "ldgc": solve_poisson_ldgc,
# "sipg": solve_poisson_sipg,
}
degree = 1
last_degree = 1
for current_solver in solvers_options:
# Setting the output file name
name = f"{current_solver}"
# Selecting the solver and its kwargs
solver = solvers_options[current_solver]
# Performing the convergence study
hp_refinement_cond_number_calculation(
solver,
min_degree=degree,
max_degree=degree + last_degree,
quadrilateral=True,
name=name
)
# N = 5
# mesh = UnitSquareMesh(N, N, quadrilateral=True)
# result = solve_poisson_lsh(mesh, degree=1)
# print(f'Is symmetric? {result.is_operator_symmetric}')
# print(f'nnz: {result.nnz}')
# print(f'DoFs: {result.number_of_dofs}')
# print(f'Condition Number: {result.condition_number}')
# # Plotting the resulting matrix
# matplotlib.use('TkAgg')
# import copy
# my_cmap = copy.copy(plt.cm.get_cmap("winter"))
# my_cmap.set_bad(color="lightgray")
# # plot_matrix_primal_hybrid_full(result.form, result.bcs, cmap=my_cmap)
# # plot_matrix_mixed_hybrid_full(result.form, result.bcs, cmap=my_cmap)
# plot_matrix_hybrid_multiplier(result.form, trace_index=2, bcs=result.bcs, cmap=my_cmap)
# # plot_matrix(result.assembled_form, cmap=my_cmap)
# # plot_matrix_mixed(result.assembled_form, cmap=my_cmap)
# plt.tight_layout()
# plt.savefig("sparse_pattern.png")
# plt.show() | 2.328125 | 2 |
pydeap/feature_extraction/_time_domain_features.py | Wlgls/pyDEAP | 0 | 2199 | <reponame>Wlgls/pyDEAP<gh_stars>0
# -*- encoding: utf-8 -*-
'''
@File :_time_domain_features.py
@Time :2021/04/16 20:02:55
@Author :wlgls
@Version :1.0
'''
import numpy as np
def statistics(data, combined=True):
"""Statistical features, include Power, Mean, Std, 1st differece, Normalized 1st difference, 2nd difference, Normalized 2nd difference.
Parameters
----------
data array
data, for DEAP dataset, It's shape may be (n_trials, n_channels, points)
Return
----------
f:
Solved feature, It's shape is similar to the shape of your input data.
e.g. for input.shape is (n_trials, n_channels, points), the f.shape is (n_trials, n_channels, n_features)
Example
----------
In [13]: d.shape, l.shape
Out[13]: ((40, 32, 8064), (40, 1))
In [14]: statistics_feature(d).shape
Out[14]: (40, 32, 7)
"""
# Power
power = np.mean(data**2, axis=-1)
# Mean
ave = np.mean(data, axis=-1)
# Standard Deviation
std = np.std(data, axis=-1)
# the mean of the absolute values of 1st differece mean
diff_1st = np.mean(np.abs(np.diff(data,n=1, axis=-1)), axis=-1)
# the mean of the absolute values of Normalized 1st difference
normal_diff_1st = diff_1st / std
# the mean of the absolute values of 2nd difference mean
diff_2nd = np.mean(np.abs(data[..., 2:] - data[..., :-2]), axis=-1)
# the mean of the absolute values of Normalized 2nd difference
normal_diff_2nd = diff_2nd / std
# Features.append(np.concatenate((Power, Mean, Std, diff_1st, normal_diff_1st, diff_2nd, normal_diff_2nd), axis=2))
f = np.stack((power, ave, std, diff_1st, normal_diff_1st, diff_2nd, normal_diff_2nd), axis=-1)
if combined:
f = f.reshape((*f.shape[:-2]))
return f
def hjorth(data, combined=True):
"""Solving Hjorth features, include activity, mobility, complexity
Parameters
----------
data array
data, for DEAP dataset, It's shape may be (n_trials, n_channels, points)
Return
----------
f:
Solved feature, It's shape is similar to the shape of your input data.
e.g. for input.shape is (n_trials, n_channels, points), the f.shape is (n_trials, n_channels, n_features)
Example
----------
In [15]: d.shape, l.shape
Out[15]: ((40, 32, 8064), (40, 1))
In [16]: hjorth_features(d).shape
Out[16]: (40, 32, 3)
"""
data = np.array(data)
ave = np.mean(data, axis=-1)[..., np.newaxis]
diff_1st = np.diff(data, n=1, axis=-1)
# print(diff_1st.shape)
diff_2nd = data[..., 2:] - data[..., :-2]
# Activity
activity = np.mean((data-ave)**2, axis=-1)
# print(Activity.shape)
# Mobility
varfdiff = np.var(diff_1st, axis=-1)
# print(varfdiff.shape)
mobility = np.sqrt(varfdiff / activity)
# Complexity
varsdiff = np.var(diff_2nd, axis=-1)
complexity = np.sqrt(varsdiff/varfdiff) / mobility
f = np.stack((activity, mobility, complexity), axis=-1)
if combined:
f = f.reshape((*f.shape[:-2]))
return f
def higher_order_crossing(data, k=10, combined=True):
"""Solving the feature of hoc. Hoc is a high order zero crossing quantity.
Parameters
----------
data : array
data, for DEAP dataset, It's shape may be (n_trials, n_channels, points)
k : int, optional
Order, by default 10
Return
----------
nzc:
Solved feature, It's shape is similar to the shape of your input data.
e.g. for input.shape is (n_trials, n_channels, points), the f.shape is (n_trials, n_channels, n_features)
Example
----------
In [4]: d, l = load_deap(path, 0)
In [5]: hoc(d, k=10).shape
Out[5]: (40, 32, 10)
In [6]: hoc(d, k=5).shape
Out[6]: (40, 32, 5)
"""
nzc = []
for i in range(k):
curr_diff = np.diff(data, n=i)
x_t = curr_diff >= 0
x_t = np.diff(x_t)
x_t = np.abs(x_t)
count = np.count_nonzero(x_t, axis=-1)
nzc.append(count)
f = np.stack(nzc, axis=-1)
if combined:
f = f.reshape((*f.shape[:-2]))
return f
def sevcik_fd(data, combined=True):
"""Fractal dimension feature is solved, which is used to describe the shape information of EEG time series data. It seems that this feature can be used to judge the electrooculogram and EEG.The calculation methods include Sevcik, fractal Brownian motion, box counting, Higuchi and so on.
Sevcik method: fast calculation and robust analysis of noise
Higuchi: closer to the theoretical value than box counting
The Sevick method is used here because it is easier to implement
Parameters
----------
Parameters
----------
data array
data, for DEAP dataset, It's shape may be (n_trials, n_channels, points)
Return
----------
f:
Solved feature, It's shape is similar to the shape of your input data.
e.g. for input.shape is (n_trials, n_channels, points), the f.shape is (n_trials, n_channels, n_features)
Example
----------
In [7]: d.shape, l.shape
Out[7]: ((40, 32, 8064), (40, 1))
In [8]: sevcik_fd(d).shape
Out[8]: (40, 32, 1)
"""
points = data.shape[-1]
x = np.arange(1, points+1)
x_ = x / np.max(x)
miny = np.expand_dims(np.min(data, axis=-1), axis=-1)
maxy = np.expand_dims(np.max(data, axis=-1), axis=-1)
y_ = (data-miny) / (maxy-miny)
L = np.expand_dims(np.sum(np.sqrt(np.diff(y_, axis=-1)**2 + np.diff(x_)**2), axis=-1), axis=-1)
f = 1 + np.log(L) / np.log(2 * (points-1))
# print(FD.shape)
if combined:
f = f.reshape((*f.shape[:-2]))
return f
def calc_L(X, k, m):
"""
Return Lm(k) as the length of the curve.
"""
N = X.shape[-1]
n = np.floor((N-m)/k).astype(np.int64)
norm = (N-1) / (n*k)
ss = np.sum(np.abs(np.diff(X[..., m::k], n=1)), axis=-1)
Lm = (ss*norm) / k
return Lm
def calc_L_average(X, k):
"""
Return <L(k)> as the average value over k sets of Lm(k).
"""
calc_L_series = np.frompyfunc(lambda m: calc_L(X, k, m), 1, 1)
L_average = np.average(calc_L_series(np.arange(1, k+1)))
return L_average
def higuchi_fd(data, k_max, combined=True):
"""Fractal dimension feature is solved, which is used to describe the shape information of EEG time series data. It seems that this feature can be used to judge the electrooculogram and EEG.The calculation methods include Sevcik, fractal Brownian motion, box counting, Higuchi and so on.
Sevcik method: fast calculation and robust analysis of noise
Higuchi: closer to the theoretical value than box counting
The higuchi method is used here because it is easier to implement
Parameters
----------
Parameters
----------
data array
data, for DEAP dataset, It's shape may be (n_trials, n_channels, points)
Return
----------
f:
Solved feature, It's shape is similar to the shape of your input data.
e.g. for input.shape is (n_trials, n_channels, points), the f.shape is (n_trials, n_channels, n_features)
Example
----------
In [7]: d.shape, l.shape
Out[7]: ((40, 32, 8064), (40, 1))
In [8]: higuchi_fd(dif combined:
f = f
return ).shape
Out[8]: (40, 32, 1)
"""
calc_L_average_series = np.frompyfunc(lambda k: calc_L_average(data, k), 1, 1)
k = np.arange(1, k_max+1)
L = calc_L_average_series(k)
L = np.stack(L, axis=-1)
fd = np.zeros(data.shape[:-1])
for ind in np.argwhere(L[..., 0]):
tmp = L[ind[0], ind[1], ind[2]]
D, _= np.polyfit(np.log2(k), np.log2(tmp), 1)
fd[ind[0], ind[1if combined:
f = f
return ], ind[2]] = - D
f = np.expand_dims(fd, axis=-1)
if combined:
f = f.reshape((*f.shape[:-2]))
return f
| 2.8125 | 3 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.