code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def get_variable_info(cls, hads_var_name):
"""
Returns a tuple of (mmi name, units, english name, english description) or None.
"""
if hads_var_name == "UR":
return (
"wind_gust_from_direction",
"degrees from N",
"Wind Gust from Direction",
"Direction from which wind gust is blowing when maximum wind speed is observed. Meteorological Convention. Wind is motion of air relative to the surface of the earth.",
)
elif hads_var_name in ["VJA", "TX"]:
return (
"air_temperature_maximum",
"f",
"Air Temperature Maximum",
"",
)
elif hads_var_name in ["VJB", "TN"]:
return (
"air_temperature_minimum",
"f",
"Air Temperature Minumum",
"",
)
elif hads_var_name == "PC": # PC2?
return (
"precipitation_accumulated",
"in",
"Precipitation Accumulated",
"Amount of liquid equivalent precipitation accumulated or totaled for a defined period of time, usually hourly, daily, or annually.",
)
elif hads_var_name == "PP":
return (
"precipitation_rate",
"in",
"Precipitation Rate",
"Amount of wet equivalent precipitation per unit time.",
)
elif hads_var_name == "US":
return (
"wind_speed",
"mph",
"Wind Speed",
"Magnitude of wind velocity. Wind is motion of air relative to the surface of the earth.",
)
elif hads_var_name == "UD":
return (
"wind_from_direction",
"degrees_true",
"Wind from Direction",
"Direction from which wind is blowing. Meteorological Convention. Wind is motion of air relative to the surface of the earth.",
)
elif hads_var_name in ["UP", "UG", "VUP"]:
return (
"wind_gust",
"mph",
"Wind Gust Speed",
"Maximum instantaneous wind speed (usually no more than but not limited to 10 seconds) within a sample averaging interval. Wind is motion of air relative to the surface of the earth.",
)
elif hads_var_name in ["TA", "TA2"]:
return (
"air_temperature",
"f",
"Air Temperature",
"Air temperature is the bulk temperature of the air, not the surface (skin) temperature.",
)
elif hads_var_name == "MT":
return ("fuel_temperature", "f", "Fuel Temperature", "")
elif hads_var_name == "XR":
return ("relative_humidity", "percent", "Relative Humidity", "")
elif hads_var_name == "VB":
return ("battery_voltage", "voltage", "Battery Voltage", "")
elif hads_var_name == "MM":
return ("fuel_moisture", "percent", "Fuel Moisture", "")
elif hads_var_name == "RW":
return ("solar_radiation", "watt/m^2", "Solar Radiation", "")
elif hads_var_name == "RS":
return (
"photosynthetically_active_radiation",
"watt/m^2",
"Photosynthetically Active Radiation",
"",
)
elif hads_var_name == "TW": # TW2?
return (
"sea_water_temperature",
"f",
"Sea Water Temperature",
"Sea water temperature is the in situ temperature of the sea water.",
)
elif hads_var_name == "WT":
return (
"turbidity",
"nephelometric turbidity units",
"Turbidity",
"",
)
elif hads_var_name == "WC":
return (
"sea_water_electrical_conductivity",
"micro mhos/cm",
"Sea Water Electrical Conductivity",
"",
)
elif hads_var_name == "WP":
return (
"sea_water_ph_reported_on_total_scale",
"std units",
"Sea Water PH reported on Total Scale",
"the measure of acidity of seawater",
)
elif hads_var_name == "WO":
return ("dissolved_oxygen", "ppm", "Dissolved Oxygen", "")
elif hads_var_name == "WX":
return (
"dissolved_oxygen_saturation",
"percent",
"Dissolved Oxygen Saturation",
"",
)
elif hads_var_name == "TD":
return (
"dew_point_temperature",
"f",
"Dew Point Temperature",
"the temperature at which a parcel of air reaches saturation upon being cooled at constant pressure and specific humidity.",
)
elif hads_var_name == "HG": # HG2?
return ("stream_gage_height", "ft", "Stream Gage Height", "")
elif hads_var_name == "HP":
return (
"water_surface_height_above_reference_datum",
"ft",
"Water Surface Height Above Reference Datum",
"means the height of the upper surface of a body of liquid water, such as sea, lake or river, above an arbitrary reference datum.",
)
elif hads_var_name == "WS":
return ("salinity", "ppt", "Salinity", "")
elif hads_var_name == "HM":
return ("water_level", "ft", "Water Level", "")
elif hads_var_name == "PA":
return ("air_pressure", "hp", "Air Pressure", "")
elif hads_var_name == "SD":
return ("snow_depth", "in", "Snow Depth", "")
elif hads_var_name == "SW":
return ("snow_water_equivalent", "m", "Snow Water Equivalent", "")
elif hads_var_name == "TS":
return (
"soil_temperature",
"f",
"Soil Temperature",
"Soil temperature is the bulk temperature of the soil, not the surface (skin) temperature.",
)
return None | def function[get_variable_info, parameter[cls, hads_var_name]]:
constant[
Returns a tuple of (mmi name, units, english name, english description) or None.
]
if compare[name[hads_var_name] equal[==] constant[UR]] begin[:]
return[tuple[[<ast.Constant object at 0x7da1b2353d30>, <ast.Constant object at 0x7da1b2353d00>, <ast.Constant object at 0x7da1b2353cd0>, <ast.Constant object at 0x7da1b2353ca0>]]]
return[constant[None]] | keyword[def] identifier[get_variable_info] ( identifier[cls] , identifier[hads_var_name] ):
literal[string]
keyword[if] identifier[hads_var_name] == literal[string] :
keyword[return] (
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
)
keyword[elif] identifier[hads_var_name] keyword[in] [ literal[string] , literal[string] ]:
keyword[return] (
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
)
keyword[elif] identifier[hads_var_name] keyword[in] [ literal[string] , literal[string] ]:
keyword[return] (
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
)
keyword[elif] identifier[hads_var_name] == literal[string] :
keyword[return] (
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
)
keyword[elif] identifier[hads_var_name] == literal[string] :
keyword[return] (
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
)
keyword[elif] identifier[hads_var_name] == literal[string] :
keyword[return] (
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
)
keyword[elif] identifier[hads_var_name] == literal[string] :
keyword[return] (
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
)
keyword[elif] identifier[hads_var_name] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
keyword[return] (
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
)
keyword[elif] identifier[hads_var_name] keyword[in] [ literal[string] , literal[string] ]:
keyword[return] (
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
)
keyword[elif] identifier[hads_var_name] == literal[string] :
keyword[return] ( literal[string] , literal[string] , literal[string] , literal[string] )
keyword[elif] identifier[hads_var_name] == literal[string] :
keyword[return] ( literal[string] , literal[string] , literal[string] , literal[string] )
keyword[elif] identifier[hads_var_name] == literal[string] :
keyword[return] ( literal[string] , literal[string] , literal[string] , literal[string] )
keyword[elif] identifier[hads_var_name] == literal[string] :
keyword[return] ( literal[string] , literal[string] , literal[string] , literal[string] )
keyword[elif] identifier[hads_var_name] == literal[string] :
keyword[return] ( literal[string] , literal[string] , literal[string] , literal[string] )
keyword[elif] identifier[hads_var_name] == literal[string] :
keyword[return] (
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
)
keyword[elif] identifier[hads_var_name] == literal[string] :
keyword[return] (
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
)
keyword[elif] identifier[hads_var_name] == literal[string] :
keyword[return] (
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
)
keyword[elif] identifier[hads_var_name] == literal[string] :
keyword[return] (
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
)
keyword[elif] identifier[hads_var_name] == literal[string] :
keyword[return] (
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
)
keyword[elif] identifier[hads_var_name] == literal[string] :
keyword[return] ( literal[string] , literal[string] , literal[string] , literal[string] )
keyword[elif] identifier[hads_var_name] == literal[string] :
keyword[return] (
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
)
keyword[elif] identifier[hads_var_name] == literal[string] :
keyword[return] (
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
)
keyword[elif] identifier[hads_var_name] == literal[string] :
keyword[return] ( literal[string] , literal[string] , literal[string] , literal[string] )
keyword[elif] identifier[hads_var_name] == literal[string] :
keyword[return] (
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
)
keyword[elif] identifier[hads_var_name] == literal[string] :
keyword[return] ( literal[string] , literal[string] , literal[string] , literal[string] )
keyword[elif] identifier[hads_var_name] == literal[string] :
keyword[return] ( literal[string] , literal[string] , literal[string] , literal[string] )
keyword[elif] identifier[hads_var_name] == literal[string] :
keyword[return] ( literal[string] , literal[string] , literal[string] , literal[string] )
keyword[elif] identifier[hads_var_name] == literal[string] :
keyword[return] ( literal[string] , literal[string] , literal[string] , literal[string] )
keyword[elif] identifier[hads_var_name] == literal[string] :
keyword[return] ( literal[string] , literal[string] , literal[string] , literal[string] )
keyword[elif] identifier[hads_var_name] == literal[string] :
keyword[return] (
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
)
keyword[return] keyword[None] | def get_variable_info(cls, hads_var_name):
"""
Returns a tuple of (mmi name, units, english name, english description) or None.
"""
if hads_var_name == 'UR':
return ('wind_gust_from_direction', 'degrees from N', 'Wind Gust from Direction', 'Direction from which wind gust is blowing when maximum wind speed is observed. Meteorological Convention. Wind is motion of air relative to the surface of the earth.') # depends on [control=['if'], data=[]]
elif hads_var_name in ['VJA', 'TX']:
return ('air_temperature_maximum', 'f', 'Air Temperature Maximum', '') # depends on [control=['if'], data=[]]
elif hads_var_name in ['VJB', 'TN']:
return ('air_temperature_minimum', 'f', 'Air Temperature Minumum', '') # depends on [control=['if'], data=[]]
elif hads_var_name == 'PC': # PC2?
return ('precipitation_accumulated', 'in', 'Precipitation Accumulated', 'Amount of liquid equivalent precipitation accumulated or totaled for a defined period of time, usually hourly, daily, or annually.') # depends on [control=['if'], data=[]]
elif hads_var_name == 'PP':
return ('precipitation_rate', 'in', 'Precipitation Rate', 'Amount of wet equivalent precipitation per unit time.') # depends on [control=['if'], data=[]]
elif hads_var_name == 'US':
return ('wind_speed', 'mph', 'Wind Speed', 'Magnitude of wind velocity. Wind is motion of air relative to the surface of the earth.') # depends on [control=['if'], data=[]]
elif hads_var_name == 'UD':
return ('wind_from_direction', 'degrees_true', 'Wind from Direction', 'Direction from which wind is blowing. Meteorological Convention. Wind is motion of air relative to the surface of the earth.') # depends on [control=['if'], data=[]]
elif hads_var_name in ['UP', 'UG', 'VUP']:
return ('wind_gust', 'mph', 'Wind Gust Speed', 'Maximum instantaneous wind speed (usually no more than but not limited to 10 seconds) within a sample averaging interval. Wind is motion of air relative to the surface of the earth.') # depends on [control=['if'], data=[]]
elif hads_var_name in ['TA', 'TA2']:
return ('air_temperature', 'f', 'Air Temperature', 'Air temperature is the bulk temperature of the air, not the surface (skin) temperature.') # depends on [control=['if'], data=[]]
elif hads_var_name == 'MT':
return ('fuel_temperature', 'f', 'Fuel Temperature', '') # depends on [control=['if'], data=[]]
elif hads_var_name == 'XR':
return ('relative_humidity', 'percent', 'Relative Humidity', '') # depends on [control=['if'], data=[]]
elif hads_var_name == 'VB':
return ('battery_voltage', 'voltage', 'Battery Voltage', '') # depends on [control=['if'], data=[]]
elif hads_var_name == 'MM':
return ('fuel_moisture', 'percent', 'Fuel Moisture', '') # depends on [control=['if'], data=[]]
elif hads_var_name == 'RW':
return ('solar_radiation', 'watt/m^2', 'Solar Radiation', '') # depends on [control=['if'], data=[]]
elif hads_var_name == 'RS':
return ('photosynthetically_active_radiation', 'watt/m^2', 'Photosynthetically Active Radiation', '') # depends on [control=['if'], data=[]]
elif hads_var_name == 'TW': # TW2?
return ('sea_water_temperature', 'f', 'Sea Water Temperature', 'Sea water temperature is the in situ temperature of the sea water.') # depends on [control=['if'], data=[]]
elif hads_var_name == 'WT':
return ('turbidity', 'nephelometric turbidity units', 'Turbidity', '') # depends on [control=['if'], data=[]]
elif hads_var_name == 'WC':
return ('sea_water_electrical_conductivity', 'micro mhos/cm', 'Sea Water Electrical Conductivity', '') # depends on [control=['if'], data=[]]
elif hads_var_name == 'WP':
return ('sea_water_ph_reported_on_total_scale', 'std units', 'Sea Water PH reported on Total Scale', 'the measure of acidity of seawater') # depends on [control=['if'], data=[]]
elif hads_var_name == 'WO':
return ('dissolved_oxygen', 'ppm', 'Dissolved Oxygen', '') # depends on [control=['if'], data=[]]
elif hads_var_name == 'WX':
return ('dissolved_oxygen_saturation', 'percent', 'Dissolved Oxygen Saturation', '') # depends on [control=['if'], data=[]]
elif hads_var_name == 'TD':
return ('dew_point_temperature', 'f', 'Dew Point Temperature', 'the temperature at which a parcel of air reaches saturation upon being cooled at constant pressure and specific humidity.') # depends on [control=['if'], data=[]]
elif hads_var_name == 'HG': # HG2?
return ('stream_gage_height', 'ft', 'Stream Gage Height', '') # depends on [control=['if'], data=[]]
elif hads_var_name == 'HP':
return ('water_surface_height_above_reference_datum', 'ft', 'Water Surface Height Above Reference Datum', 'means the height of the upper surface of a body of liquid water, such as sea, lake or river, above an arbitrary reference datum.') # depends on [control=['if'], data=[]]
elif hads_var_name == 'WS':
return ('salinity', 'ppt', 'Salinity', '') # depends on [control=['if'], data=[]]
elif hads_var_name == 'HM':
return ('water_level', 'ft', 'Water Level', '') # depends on [control=['if'], data=[]]
elif hads_var_name == 'PA':
return ('air_pressure', 'hp', 'Air Pressure', '') # depends on [control=['if'], data=[]]
elif hads_var_name == 'SD':
return ('snow_depth', 'in', 'Snow Depth', '') # depends on [control=['if'], data=[]]
elif hads_var_name == 'SW':
return ('snow_water_equivalent', 'm', 'Snow Water Equivalent', '') # depends on [control=['if'], data=[]]
elif hads_var_name == 'TS':
return ('soil_temperature', 'f', 'Soil Temperature', 'Soil temperature is the bulk temperature of the soil, not the surface (skin) temperature.') # depends on [control=['if'], data=[]]
return None |
def exit(self):
"""Quits this octave session and cleans up.
"""
if self._engine:
self._engine.repl.terminate()
self._engine = None | def function[exit, parameter[self]]:
constant[Quits this octave session and cleans up.
]
if name[self]._engine begin[:]
call[name[self]._engine.repl.terminate, parameter[]]
name[self]._engine assign[=] constant[None] | keyword[def] identifier[exit] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_engine] :
identifier[self] . identifier[_engine] . identifier[repl] . identifier[terminate] ()
identifier[self] . identifier[_engine] = keyword[None] | def exit(self):
"""Quits this octave session and cleans up.
"""
if self._engine:
self._engine.repl.terminate() # depends on [control=['if'], data=[]]
self._engine = None |
def Rowlinson_Poling(T, Tc, omega, Cpgm):
r'''Calculate liquid constant-pressure heat capacitiy with the [1]_ CSP method.
This equation is not terrible accurate.
The heat capacity of a liquid is given by:
.. math::
\frac{Cp^{L} - Cp^{g}}{R} = 1.586 + \frac{0.49}{1-T_r} +
\omega\left[ 4.2775 + \frac{6.3(1-T_r)^{1/3}}{T_r} + \frac{0.4355}{1-T_r}\right]
Parameters
----------
T : float
Temperature of fluid [K]
Tc : float
Critical temperature of fluid [K]
omega : float
Acentric factor for fluid, [-]
Cpgm : float
Constant-pressure gas heat capacity, [J/mol/K]
Returns
-------
Cplm : float
Liquid constant-pressure heat capacitiy, [J/mol/K]
Notes
-----
Poling compared 212 substances, and found error at 298K larger than 10%
for 18 of them, mostly associating. Of the other 194 compounds, AARD is 2.5%.
Examples
--------
>>> Rowlinson_Poling(350.0, 435.5, 0.203, 91.21)
143.80194441498296
References
----------
.. [1] Poling, Bruce E. The Properties of Gases and Liquids. 5th edition.
New York: McGraw-Hill Professional, 2000.
'''
Tr = T/Tc
Cplm = Cpgm+ R*(1.586 + 0.49/(1.-Tr) + omega*(4.2775
+ 6.3*(1-Tr)**(1/3.)/Tr + 0.4355/(1.-Tr)))
return Cplm | def function[Rowlinson_Poling, parameter[T, Tc, omega, Cpgm]]:
constant[Calculate liquid constant-pressure heat capacitiy with the [1]_ CSP method.
This equation is not terrible accurate.
The heat capacity of a liquid is given by:
.. math::
\frac{Cp^{L} - Cp^{g}}{R} = 1.586 + \frac{0.49}{1-T_r} +
\omega\left[ 4.2775 + \frac{6.3(1-T_r)^{1/3}}{T_r} + \frac{0.4355}{1-T_r}\right]
Parameters
----------
T : float
Temperature of fluid [K]
Tc : float
Critical temperature of fluid [K]
omega : float
Acentric factor for fluid, [-]
Cpgm : float
Constant-pressure gas heat capacity, [J/mol/K]
Returns
-------
Cplm : float
Liquid constant-pressure heat capacitiy, [J/mol/K]
Notes
-----
Poling compared 212 substances, and found error at 298K larger than 10%
for 18 of them, mostly associating. Of the other 194 compounds, AARD is 2.5%.
Examples
--------
>>> Rowlinson_Poling(350.0, 435.5, 0.203, 91.21)
143.80194441498296
References
----------
.. [1] Poling, Bruce E. The Properties of Gases and Liquids. 5th edition.
New York: McGraw-Hill Professional, 2000.
]
variable[Tr] assign[=] binary_operation[name[T] / name[Tc]]
variable[Cplm] assign[=] binary_operation[name[Cpgm] + binary_operation[name[R] * binary_operation[binary_operation[constant[1.586] + binary_operation[constant[0.49] / binary_operation[constant[1.0] - name[Tr]]]] + binary_operation[name[omega] * binary_operation[binary_operation[constant[4.2775] + binary_operation[binary_operation[constant[6.3] * binary_operation[binary_operation[constant[1] - name[Tr]] ** binary_operation[constant[1] / constant[3.0]]]] / name[Tr]]] + binary_operation[constant[0.4355] / binary_operation[constant[1.0] - name[Tr]]]]]]]]
return[name[Cplm]] | keyword[def] identifier[Rowlinson_Poling] ( identifier[T] , identifier[Tc] , identifier[omega] , identifier[Cpgm] ):
literal[string]
identifier[Tr] = identifier[T] / identifier[Tc]
identifier[Cplm] = identifier[Cpgm] + identifier[R] *( literal[int] + literal[int] /( literal[int] - identifier[Tr] )+ identifier[omega] *( literal[int]
+ literal[int] *( literal[int] - identifier[Tr] )**( literal[int] / literal[int] )/ identifier[Tr] + literal[int] /( literal[int] - identifier[Tr] )))
keyword[return] identifier[Cplm] | def Rowlinson_Poling(T, Tc, omega, Cpgm):
"""Calculate liquid constant-pressure heat capacitiy with the [1]_ CSP method.
This equation is not terrible accurate.
The heat capacity of a liquid is given by:
.. math::
\\frac{Cp^{L} - Cp^{g}}{R} = 1.586 + \\frac{0.49}{1-T_r} +
\\omega\\left[ 4.2775 + \\frac{6.3(1-T_r)^{1/3}}{T_r} + \\frac{0.4355}{1-T_r}\\right]
Parameters
----------
T : float
Temperature of fluid [K]
Tc : float
Critical temperature of fluid [K]
omega : float
Acentric factor for fluid, [-]
Cpgm : float
Constant-pressure gas heat capacity, [J/mol/K]
Returns
-------
Cplm : float
Liquid constant-pressure heat capacitiy, [J/mol/K]
Notes
-----
Poling compared 212 substances, and found error at 298K larger than 10%
for 18 of them, mostly associating. Of the other 194 compounds, AARD is 2.5%.
Examples
--------
>>> Rowlinson_Poling(350.0, 435.5, 0.203, 91.21)
143.80194441498296
References
----------
.. [1] Poling, Bruce E. The Properties of Gases and Liquids. 5th edition.
New York: McGraw-Hill Professional, 2000.
"""
Tr = T / Tc
Cplm = Cpgm + R * (1.586 + 0.49 / (1.0 - Tr) + omega * (4.2775 + 6.3 * (1 - Tr) ** (1 / 3.0) / Tr + 0.4355 / (1.0 - Tr)))
return Cplm |
def str2dict(dotted_str, value=None, separator='.'):
""" Convert dotted string to dict splitting by :separator: """
dict_ = {}
parts = dotted_str.split(separator)
d, prev = dict_, None
for part in parts:
prev = d
d = d.setdefault(part, {})
else:
if value is not None:
prev[part] = value
return dict_ | def function[str2dict, parameter[dotted_str, value, separator]]:
constant[ Convert dotted string to dict splitting by :separator: ]
variable[dict_] assign[=] dictionary[[], []]
variable[parts] assign[=] call[name[dotted_str].split, parameter[name[separator]]]
<ast.Tuple object at 0x7da20e956d40> assign[=] tuple[[<ast.Name object at 0x7da20e957850>, <ast.Constant object at 0x7da20e957700>]]
for taget[name[part]] in starred[name[parts]] begin[:]
variable[prev] assign[=] name[d]
variable[d] assign[=] call[name[d].setdefault, parameter[name[part], dictionary[[], []]]]
return[name[dict_]] | keyword[def] identifier[str2dict] ( identifier[dotted_str] , identifier[value] = keyword[None] , identifier[separator] = literal[string] ):
literal[string]
identifier[dict_] ={}
identifier[parts] = identifier[dotted_str] . identifier[split] ( identifier[separator] )
identifier[d] , identifier[prev] = identifier[dict_] , keyword[None]
keyword[for] identifier[part] keyword[in] identifier[parts] :
identifier[prev] = identifier[d]
identifier[d] = identifier[d] . identifier[setdefault] ( identifier[part] ,{})
keyword[else] :
keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] :
identifier[prev] [ identifier[part] ]= identifier[value]
keyword[return] identifier[dict_] | def str2dict(dotted_str, value=None, separator='.'):
""" Convert dotted string to dict splitting by :separator: """
dict_ = {}
parts = dotted_str.split(separator)
(d, prev) = (dict_, None)
for part in parts:
prev = d
d = d.setdefault(part, {}) # depends on [control=['for'], data=['part']]
else:
if value is not None:
prev[part] = value # depends on [control=['if'], data=['value']]
return dict_ |
def route(self, req, node, path):
'''
Looks up a controller from a node based upon the specified path.
:param node: The node, such as a root controller object.
:param path: The path to look up on this node.
'''
path = path.split('/')[1:]
try:
node, remainder = lookup_controller(node, path, req)
return node, remainder
except NonCanonicalPath as e:
if self.force_canonical and \
not _cfg(e.controller).get('accept_noncanonical', False):
if req.method == 'POST':
raise RuntimeError(
"You have POSTed to a URL '%s' which "
"requires a slash. Most browsers will not maintain "
"POST data when redirected. Please update your code "
"to POST to '%s/' or set force_canonical to False" %
(req.pecan['routing_path'],
req.pecan['routing_path'])
)
redirect(code=302, add_slash=True, request=req)
return e.controller, e.remainder | def function[route, parameter[self, req, node, path]]:
constant[
Looks up a controller from a node based upon the specified path.
:param node: The node, such as a root controller object.
:param path: The path to look up on this node.
]
variable[path] assign[=] call[call[name[path].split, parameter[constant[/]]]][<ast.Slice object at 0x7da18f00df60>]
<ast.Try object at 0x7da18f00f1f0> | keyword[def] identifier[route] ( identifier[self] , identifier[req] , identifier[node] , identifier[path] ):
literal[string]
identifier[path] = identifier[path] . identifier[split] ( literal[string] )[ literal[int] :]
keyword[try] :
identifier[node] , identifier[remainder] = identifier[lookup_controller] ( identifier[node] , identifier[path] , identifier[req] )
keyword[return] identifier[node] , identifier[remainder]
keyword[except] identifier[NonCanonicalPath] keyword[as] identifier[e] :
keyword[if] identifier[self] . identifier[force_canonical] keyword[and] keyword[not] identifier[_cfg] ( identifier[e] . identifier[controller] ). identifier[get] ( literal[string] , keyword[False] ):
keyword[if] identifier[req] . identifier[method] == literal[string] :
keyword[raise] identifier[RuntimeError] (
literal[string]
literal[string]
literal[string]
literal[string] %
( identifier[req] . identifier[pecan] [ literal[string] ],
identifier[req] . identifier[pecan] [ literal[string] ])
)
identifier[redirect] ( identifier[code] = literal[int] , identifier[add_slash] = keyword[True] , identifier[request] = identifier[req] )
keyword[return] identifier[e] . identifier[controller] , identifier[e] . identifier[remainder] | def route(self, req, node, path):
"""
Looks up a controller from a node based upon the specified path.
:param node: The node, such as a root controller object.
:param path: The path to look up on this node.
"""
path = path.split('/')[1:]
try:
(node, remainder) = lookup_controller(node, path, req)
return (node, remainder) # depends on [control=['try'], data=[]]
except NonCanonicalPath as e:
if self.force_canonical and (not _cfg(e.controller).get('accept_noncanonical', False)):
if req.method == 'POST':
raise RuntimeError("You have POSTed to a URL '%s' which requires a slash. Most browsers will not maintain POST data when redirected. Please update your code to POST to '%s/' or set force_canonical to False" % (req.pecan['routing_path'], req.pecan['routing_path'])) # depends on [control=['if'], data=[]]
redirect(code=302, add_slash=True, request=req) # depends on [control=['if'], data=[]]
return (e.controller, e.remainder) # depends on [control=['except'], data=['e']] |
def is_module_reloadable(self, module, modname):
"""Decide if a module is reloadable or not."""
if self.has_cython:
# Don't return cached inline compiled .PYX files
return False
else:
if (self.is_module_in_pathlist(module) or
self.is_module_in_namelist(modname)):
return False
else:
return True | def function[is_module_reloadable, parameter[self, module, modname]]:
constant[Decide if a module is reloadable or not.]
if name[self].has_cython begin[:]
return[constant[False]] | keyword[def] identifier[is_module_reloadable] ( identifier[self] , identifier[module] , identifier[modname] ):
literal[string]
keyword[if] identifier[self] . identifier[has_cython] :
keyword[return] keyword[False]
keyword[else] :
keyword[if] ( identifier[self] . identifier[is_module_in_pathlist] ( identifier[module] ) keyword[or]
identifier[self] . identifier[is_module_in_namelist] ( identifier[modname] )):
keyword[return] keyword[False]
keyword[else] :
keyword[return] keyword[True] | def is_module_reloadable(self, module, modname):
"""Decide if a module is reloadable or not."""
if self.has_cython:
# Don't return cached inline compiled .PYX files
return False # depends on [control=['if'], data=[]]
elif self.is_module_in_pathlist(module) or self.is_module_in_namelist(modname):
return False # depends on [control=['if'], data=[]]
else:
return True |
def execute(script, *args, **kwargs):
"""
Executes a command through the shell. Spaces should breakup the args. Usage: execute('grep', 'TODO', '*')
NOTE: Any kwargs will be converted to args in the destination command.
E.g. execute('grep', 'TODO', '*', **{'--before-context': 5}) will be $grep todo * --before-context=5
"""
popen_args = [script] + list(args)
if kwargs:
popen_args.extend(_kwargs_to_execute_args(kwargs))
try:
return check_call(popen_args, shell=False)
except CalledProcessError as ex:
_print(ex)
sys.exit(ex.returncode)
except Exception as ex:
_print('Error: {} with script: {} and args {}'.format(ex, script, args))
sys.exit(1) | def function[execute, parameter[script]]:
constant[
Executes a command through the shell. Spaces should breakup the args. Usage: execute('grep', 'TODO', '*')
NOTE: Any kwargs will be converted to args in the destination command.
E.g. execute('grep', 'TODO', '*', **{'--before-context': 5}) will be $grep todo * --before-context=5
]
variable[popen_args] assign[=] binary_operation[list[[<ast.Name object at 0x7da18f810b20>]] + call[name[list], parameter[name[args]]]]
if name[kwargs] begin[:]
call[name[popen_args].extend, parameter[call[name[_kwargs_to_execute_args], parameter[name[kwargs]]]]]
<ast.Try object at 0x7da20e957a00> | keyword[def] identifier[execute] ( identifier[script] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[popen_args] =[ identifier[script] ]+ identifier[list] ( identifier[args] )
keyword[if] identifier[kwargs] :
identifier[popen_args] . identifier[extend] ( identifier[_kwargs_to_execute_args] ( identifier[kwargs] ))
keyword[try] :
keyword[return] identifier[check_call] ( identifier[popen_args] , identifier[shell] = keyword[False] )
keyword[except] identifier[CalledProcessError] keyword[as] identifier[ex] :
identifier[_print] ( identifier[ex] )
identifier[sys] . identifier[exit] ( identifier[ex] . identifier[returncode] )
keyword[except] identifier[Exception] keyword[as] identifier[ex] :
identifier[_print] ( literal[string] . identifier[format] ( identifier[ex] , identifier[script] , identifier[args] ))
identifier[sys] . identifier[exit] ( literal[int] ) | def execute(script, *args, **kwargs):
"""
Executes a command through the shell. Spaces should breakup the args. Usage: execute('grep', 'TODO', '*')
NOTE: Any kwargs will be converted to args in the destination command.
E.g. execute('grep', 'TODO', '*', **{'--before-context': 5}) will be $grep todo * --before-context=5
"""
popen_args = [script] + list(args)
if kwargs:
popen_args.extend(_kwargs_to_execute_args(kwargs)) # depends on [control=['if'], data=[]]
try:
return check_call(popen_args, shell=False) # depends on [control=['try'], data=[]]
except CalledProcessError as ex:
_print(ex)
sys.exit(ex.returncode) # depends on [control=['except'], data=['ex']]
except Exception as ex:
_print('Error: {} with script: {} and args {}'.format(ex, script, args))
sys.exit(1) # depends on [control=['except'], data=['ex']] |
def _genpath(self, filename, mhash):
"""Generate the path to a file in the cache.
Does not check to see if the file exists. Just constructs the path
where it should be.
"""
mhash = mhash.hexdigest()
return os.path.join(self.mh_cachedir, mhash[0:2], mhash[2:4],
mhash, filename) | def function[_genpath, parameter[self, filename, mhash]]:
constant[Generate the path to a file in the cache.
Does not check to see if the file exists. Just constructs the path
where it should be.
]
variable[mhash] assign[=] call[name[mhash].hexdigest, parameter[]]
return[call[name[os].path.join, parameter[name[self].mh_cachedir, call[name[mhash]][<ast.Slice object at 0x7da20e9b04f0>], call[name[mhash]][<ast.Slice object at 0x7da20e9b2860>], name[mhash], name[filename]]]] | keyword[def] identifier[_genpath] ( identifier[self] , identifier[filename] , identifier[mhash] ):
literal[string]
identifier[mhash] = identifier[mhash] . identifier[hexdigest] ()
keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[mh_cachedir] , identifier[mhash] [ literal[int] : literal[int] ], identifier[mhash] [ literal[int] : literal[int] ],
identifier[mhash] , identifier[filename] ) | def _genpath(self, filename, mhash):
"""Generate the path to a file in the cache.
Does not check to see if the file exists. Just constructs the path
where it should be.
"""
mhash = mhash.hexdigest()
return os.path.join(self.mh_cachedir, mhash[0:2], mhash[2:4], mhash, filename) |
def parse(cls, fptr, offset, length):
"""Parse CaptureResolutionBox.
Parameters
----------
fptr : file
Open file object.
offset : int
Start position of box in bytes.
length : int
Length of the box in bytes.
Returns
-------
CaptureResolutionBox
Instance of the current capture resolution box.
"""
read_buffer = fptr.read(10)
(rn1, rd1, rn2, rd2, re1, re2) = struct.unpack('>HHHHBB', read_buffer)
vres = rn1 / rd1 * math.pow(10, re1)
hres = rn2 / rd2 * math.pow(10, re2)
return cls(vres, hres, length=length, offset=offset) | def function[parse, parameter[cls, fptr, offset, length]]:
constant[Parse CaptureResolutionBox.
Parameters
----------
fptr : file
Open file object.
offset : int
Start position of box in bytes.
length : int
Length of the box in bytes.
Returns
-------
CaptureResolutionBox
Instance of the current capture resolution box.
]
variable[read_buffer] assign[=] call[name[fptr].read, parameter[constant[10]]]
<ast.Tuple object at 0x7da1b04a77f0> assign[=] call[name[struct].unpack, parameter[constant[>HHHHBB], name[read_buffer]]]
variable[vres] assign[=] binary_operation[binary_operation[name[rn1] / name[rd1]] * call[name[math].pow, parameter[constant[10], name[re1]]]]
variable[hres] assign[=] binary_operation[binary_operation[name[rn2] / name[rd2]] * call[name[math].pow, parameter[constant[10], name[re2]]]]
return[call[name[cls], parameter[name[vres], name[hres]]]] | keyword[def] identifier[parse] ( identifier[cls] , identifier[fptr] , identifier[offset] , identifier[length] ):
literal[string]
identifier[read_buffer] = identifier[fptr] . identifier[read] ( literal[int] )
( identifier[rn1] , identifier[rd1] , identifier[rn2] , identifier[rd2] , identifier[re1] , identifier[re2] )= identifier[struct] . identifier[unpack] ( literal[string] , identifier[read_buffer] )
identifier[vres] = identifier[rn1] / identifier[rd1] * identifier[math] . identifier[pow] ( literal[int] , identifier[re1] )
identifier[hres] = identifier[rn2] / identifier[rd2] * identifier[math] . identifier[pow] ( literal[int] , identifier[re2] )
keyword[return] identifier[cls] ( identifier[vres] , identifier[hres] , identifier[length] = identifier[length] , identifier[offset] = identifier[offset] ) | def parse(cls, fptr, offset, length):
"""Parse CaptureResolutionBox.
Parameters
----------
fptr : file
Open file object.
offset : int
Start position of box in bytes.
length : int
Length of the box in bytes.
Returns
-------
CaptureResolutionBox
Instance of the current capture resolution box.
"""
read_buffer = fptr.read(10)
(rn1, rd1, rn2, rd2, re1, re2) = struct.unpack('>HHHHBB', read_buffer)
vres = rn1 / rd1 * math.pow(10, re1)
hres = rn2 / rd2 * math.pow(10, re2)
return cls(vres, hres, length=length, offset=offset) |
def predict_expectation(self, X):
r"""
Compute the expected lifetime, :math:`E[T]`, using covariates X. This algorithm to compute the expectation is
to use the fact that :math:`E[T] = \int_0^\inf P(T > t) dt = \int_0^\inf S(t) dt`. To compute the integral, we use the trapizoidal rule to approximate the integral.
Caution
--------
However, if the survival function doesn't converge to 0, the the expectation is really infinity and the returned
values are meaningless/too large. In that case, using ``predict_median`` or ``predict_percentile`` would be better.
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
Returns
-------
expectations : DataFrame
Notes
-----
If X is a DataFrame, the order of the columns do not matter. But
if X is an array, then the column ordering is assumed to be the
same as the training dataset.
See Also
--------
predict_median
predict_percentile
"""
subjects = _get_index(X)
v = self.predict_survival_function(X)[subjects]
return pd.DataFrame(trapz(v.values.T, v.index), index=subjects) | def function[predict_expectation, parameter[self, X]]:
constant[
Compute the expected lifetime, :math:`E[T]`, using covariates X. This algorithm to compute the expectation is
to use the fact that :math:`E[T] = \int_0^\inf P(T > t) dt = \int_0^\inf S(t) dt`. To compute the integral, we use the trapizoidal rule to approximate the integral.
Caution
--------
However, if the survival function doesn't converge to 0, the the expectation is really infinity and the returned
values are meaningless/too large. In that case, using ``predict_median`` or ``predict_percentile`` would be better.
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
Returns
-------
expectations : DataFrame
Notes
-----
If X is a DataFrame, the order of the columns do not matter. But
if X is an array, then the column ordering is assumed to be the
same as the training dataset.
See Also
--------
predict_median
predict_percentile
]
variable[subjects] assign[=] call[name[_get_index], parameter[name[X]]]
variable[v] assign[=] call[call[name[self].predict_survival_function, parameter[name[X]]]][name[subjects]]
return[call[name[pd].DataFrame, parameter[call[name[trapz], parameter[name[v].values.T, name[v].index]]]]] | keyword[def] identifier[predict_expectation] ( identifier[self] , identifier[X] ):
literal[string]
identifier[subjects] = identifier[_get_index] ( identifier[X] )
identifier[v] = identifier[self] . identifier[predict_survival_function] ( identifier[X] )[ identifier[subjects] ]
keyword[return] identifier[pd] . identifier[DataFrame] ( identifier[trapz] ( identifier[v] . identifier[values] . identifier[T] , identifier[v] . identifier[index] ), identifier[index] = identifier[subjects] ) | def predict_expectation(self, X):
"""
Compute the expected lifetime, :math:`E[T]`, using covariates X. This algorithm to compute the expectation is
to use the fact that :math:`E[T] = \\int_0^\\inf P(T > t) dt = \\int_0^\\inf S(t) dt`. To compute the integral, we use the trapizoidal rule to approximate the integral.
Caution
--------
However, if the survival function doesn't converge to 0, the the expectation is really infinity and the returned
values are meaningless/too large. In that case, using ``predict_median`` or ``predict_percentile`` would be better.
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
Returns
-------
expectations : DataFrame
Notes
-----
If X is a DataFrame, the order of the columns do not matter. But
if X is an array, then the column ordering is assumed to be the
same as the training dataset.
See Also
--------
predict_median
predict_percentile
"""
subjects = _get_index(X)
v = self.predict_survival_function(X)[subjects]
return pd.DataFrame(trapz(v.values.T, v.index), index=subjects) |
def process_spider_input(self, response, spider):
'''
Ensures the meta data from the response is passed
through in any Request's generated from the spider
'''
self.logger.debug("processing redis stats middleware")
if self.settings['STATS_STATUS_CODES']:
if spider.name not in self.stats_dict:
self._setup_stats_status_codes(spider.name)
if 'status_codes' in self.stats_dict[spider.name]:
code = response.status
if code in self.settings['STATS_RESPONSE_CODES']:
for key in self.stats_dict[spider.name]['status_codes'][code]:
try:
if key == 'lifetime':
unique = response.url + str(response.status)\
+ str(time.time())
self.stats_dict[spider.name]['status_codes'][code][key].increment(unique)
else:
self.stats_dict[spider.name]['status_codes'][code][key].increment()
except Exception as e:
self.logger.warn("Error in spider redis stats")
self.logger.debug("Incremented status_code '{c}' stats"\
.format(c=code)) | def function[process_spider_input, parameter[self, response, spider]]:
constant[
Ensures the meta data from the response is passed
through in any Request's generated from the spider
]
call[name[self].logger.debug, parameter[constant[processing redis stats middleware]]]
if call[name[self].settings][constant[STATS_STATUS_CODES]] begin[:]
if compare[name[spider].name <ast.NotIn object at 0x7da2590d7190> name[self].stats_dict] begin[:]
call[name[self]._setup_stats_status_codes, parameter[name[spider].name]]
if compare[constant[status_codes] in call[name[self].stats_dict][name[spider].name]] begin[:]
variable[code] assign[=] name[response].status
if compare[name[code] in call[name[self].settings][constant[STATS_RESPONSE_CODES]]] begin[:]
for taget[name[key]] in starred[call[call[call[name[self].stats_dict][name[spider].name]][constant[status_codes]]][name[code]]] begin[:]
<ast.Try object at 0x7da1b18bece0>
call[name[self].logger.debug, parameter[call[constant[Incremented status_code '{c}' stats].format, parameter[]]]] | keyword[def] identifier[process_spider_input] ( identifier[self] , identifier[response] , identifier[spider] ):
literal[string]
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] )
keyword[if] identifier[self] . identifier[settings] [ literal[string] ]:
keyword[if] identifier[spider] . identifier[name] keyword[not] keyword[in] identifier[self] . identifier[stats_dict] :
identifier[self] . identifier[_setup_stats_status_codes] ( identifier[spider] . identifier[name] )
keyword[if] literal[string] keyword[in] identifier[self] . identifier[stats_dict] [ identifier[spider] . identifier[name] ]:
identifier[code] = identifier[response] . identifier[status]
keyword[if] identifier[code] keyword[in] identifier[self] . identifier[settings] [ literal[string] ]:
keyword[for] identifier[key] keyword[in] identifier[self] . identifier[stats_dict] [ identifier[spider] . identifier[name] ][ literal[string] ][ identifier[code] ]:
keyword[try] :
keyword[if] identifier[key] == literal[string] :
identifier[unique] = identifier[response] . identifier[url] + identifier[str] ( identifier[response] . identifier[status] )+ identifier[str] ( identifier[time] . identifier[time] ())
identifier[self] . identifier[stats_dict] [ identifier[spider] . identifier[name] ][ literal[string] ][ identifier[code] ][ identifier[key] ]. identifier[increment] ( identifier[unique] )
keyword[else] :
identifier[self] . identifier[stats_dict] [ identifier[spider] . identifier[name] ][ literal[string] ][ identifier[code] ][ identifier[key] ]. identifier[increment] ()
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[self] . identifier[logger] . identifier[warn] ( literal[string] )
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[c] = identifier[code] )) | def process_spider_input(self, response, spider):
"""
Ensures the meta data from the response is passed
through in any Request's generated from the spider
"""
self.logger.debug('processing redis stats middleware')
if self.settings['STATS_STATUS_CODES']:
if spider.name not in self.stats_dict:
self._setup_stats_status_codes(spider.name) # depends on [control=['if'], data=[]]
if 'status_codes' in self.stats_dict[spider.name]:
code = response.status
if code in self.settings['STATS_RESPONSE_CODES']:
for key in self.stats_dict[spider.name]['status_codes'][code]:
try:
if key == 'lifetime':
unique = response.url + str(response.status) + str(time.time())
self.stats_dict[spider.name]['status_codes'][code][key].increment(unique) # depends on [control=['if'], data=['key']]
else:
self.stats_dict[spider.name]['status_codes'][code][key].increment() # depends on [control=['try'], data=[]]
except Exception as e:
self.logger.warn('Error in spider redis stats') # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['key']]
self.logger.debug("Incremented status_code '{c}' stats".format(c=code)) # depends on [control=['if'], data=['code']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def _start_keep_alive(self):
'''
Start the keep alive thread as a daemon
'''
keep_alive_thread = threading.Thread(target=self.keep_alive)
keep_alive_thread.daemon = True
keep_alive_thread.start() | def function[_start_keep_alive, parameter[self]]:
constant[
Start the keep alive thread as a daemon
]
variable[keep_alive_thread] assign[=] call[name[threading].Thread, parameter[]]
name[keep_alive_thread].daemon assign[=] constant[True]
call[name[keep_alive_thread].start, parameter[]] | keyword[def] identifier[_start_keep_alive] ( identifier[self] ):
literal[string]
identifier[keep_alive_thread] = identifier[threading] . identifier[Thread] ( identifier[target] = identifier[self] . identifier[keep_alive] )
identifier[keep_alive_thread] . identifier[daemon] = keyword[True]
identifier[keep_alive_thread] . identifier[start] () | def _start_keep_alive(self):
"""
Start the keep alive thread as a daemon
"""
keep_alive_thread = threading.Thread(target=self.keep_alive)
keep_alive_thread.daemon = True
keep_alive_thread.start() |
def factory(opts, **kwargs):
'''
If we have additional IPC transports other than UxD and TCP, add them here
'''
# FIXME for now, just UXD
# Obviously, this makes the factory approach pointless, but we'll extend later
import salt.transport.ipc
return salt.transport.ipc.IPCMessageClient(opts, **kwargs) | def function[factory, parameter[opts]]:
constant[
If we have additional IPC transports other than UxD and TCP, add them here
]
import module[salt.transport.ipc]
return[call[name[salt].transport.ipc.IPCMessageClient, parameter[name[opts]]]] | keyword[def] identifier[factory] ( identifier[opts] ,** identifier[kwargs] ):
literal[string]
keyword[import] identifier[salt] . identifier[transport] . identifier[ipc]
keyword[return] identifier[salt] . identifier[transport] . identifier[ipc] . identifier[IPCMessageClient] ( identifier[opts] ,** identifier[kwargs] ) | def factory(opts, **kwargs):
"""
If we have additional IPC transports other than UxD and TCP, add them here
"""
# FIXME for now, just UXD
# Obviously, this makes the factory approach pointless, but we'll extend later
import salt.transport.ipc
return salt.transport.ipc.IPCMessageClient(opts, **kwargs) |
def doExperiment(numColumns, objects, l2Overrides, noiseLevels, numInitialTraversals,
noisyFeature, noisyLocation):
"""
Touch every point on an object 'numInitialTraversals' times, then evaluate
whether it has inferred the object by touching every point once more and
checking the number of correctly active and incorrectly active cells.
@param numColumns (int)
The number of sensors to use
@param l2Overrides (dict)
Parameters for the ColumnPooler
@param objects (dict)
A mapping of object names to their features.
See 'createRandomObjects'.
@param noiseLevels (list of floats)
The noise levels to experiment with. The experiment is run once per noise
level. Noise is applied at a constant rate to exactly one cortical column.
It's applied to the same cortical column every time, and this is the cortical
column that is measured.
@param noisyFeature (bool)
Whether to use a noisy feature
@param noisyLocation (bool)
Whether to use a noisy location
"""
featureSDR = lambda : set(random.sample(xrange(NUM_L4_COLUMNS), 40))
locationSDR = lambda : set(random.sample(xrange(1024), 40))
featureSDRsByColumn = [defaultdict(featureSDR) for _ in xrange(numColumns)]
locationSDRsByColumn = [defaultdict(locationSDR) for _ in xrange(numColumns)]
exp = L4L2Experiment(
"Experiment",
numCorticalColumns=numColumns,
inputSize=NUM_L4_COLUMNS,
externalInputSize=1024,
seed=random.randint(2048, 4096)
)
exp.learnObjects(
dict((objectName,
[dict((column,
(locationSDRsByColumn[column][location],
featureSDRsByColumn[column][features[location]]))
for column in xrange(numColumns))
for location in xrange(len(features))])
for objectName, features in objects.iteritems()))
results = defaultdict(list)
for noiseLevel in noiseLevels:
# Try to infer the objects
for objectName, features in objects.iteritems():
exp.sendReset()
inferredL2 = exp.objectL2Representations[objectName]
sensorPositionsIterator = greedySensorPositions(numColumns, len(features))
# Touch each location at least numInitialTouches times, and then touch it
# once more, testing it. For each traversal, touch each point on the object
# ~once. Not once per sensor -- just once. So we translate the "number of
# traversals" into a "number of touches" according to the number of sensors.
numTouchesPerTraversal = len(features) / float(numColumns)
numInitialTouches = int(math.ceil(numInitialTraversals * numTouchesPerTraversal))
numTestTouches = len(features)
for touch in xrange(numInitialTouches + numTestTouches):
sensorPositions = next(sensorPositionsIterator)
sensation = dict(
(column,
(locationSDRsByColumn[column][sensorPositions[column]],
featureSDRsByColumn[column][features[sensorPositions[column]]]))
for column in xrange(1, numColumns))
# Add noise to the first column.
featureSDR = featureSDRsByColumn[0][features[sensorPositions[0]]]
if noisyFeature:
featureSDR = noisy(featureSDR, noiseLevel, NUM_L4_COLUMNS)
locationSDR = locationSDRsByColumn[0][sensorPositions[0]]
if noisyLocation:
locationSDR = noisy(locationSDR, noiseLevel, 1024)
sensation[0] = (locationSDR, featureSDR)
exp.infer([sensation]*TIMESTEPS_PER_SENSATION, reset=False,
objectName=objectName)
if touch >= numInitialTouches:
activeCells = exp.getL2Representations()[0]
correctCells = inferredL2[0]
results[noiseLevel].append((len(activeCells & correctCells),
len(activeCells - correctCells)))
return results | def function[doExperiment, parameter[numColumns, objects, l2Overrides, noiseLevels, numInitialTraversals, noisyFeature, noisyLocation]]:
constant[
Touch every point on an object 'numInitialTraversals' times, then evaluate
whether it has inferred the object by touching every point once more and
checking the number of correctly active and incorrectly active cells.
@param numColumns (int)
The number of sensors to use
@param l2Overrides (dict)
Parameters for the ColumnPooler
@param objects (dict)
A mapping of object names to their features.
See 'createRandomObjects'.
@param noiseLevels (list of floats)
The noise levels to experiment with. The experiment is run once per noise
level. Noise is applied at a constant rate to exactly one cortical column.
It's applied to the same cortical column every time, and this is the cortical
column that is measured.
@param noisyFeature (bool)
Whether to use a noisy feature
@param noisyLocation (bool)
Whether to use a noisy location
]
variable[featureSDR] assign[=] <ast.Lambda object at 0x7da1b08e44c0>
variable[locationSDR] assign[=] <ast.Lambda object at 0x7da1b08e72b0>
variable[featureSDRsByColumn] assign[=] <ast.ListComp object at 0x7da1b08e7610>
variable[locationSDRsByColumn] assign[=] <ast.ListComp object at 0x7da1b08e42b0>
variable[exp] assign[=] call[name[L4L2Experiment], parameter[constant[Experiment]]]
call[name[exp].learnObjects, parameter[call[name[dict], parameter[<ast.GeneratorExp object at 0x7da1b08e5450>]]]]
variable[results] assign[=] call[name[defaultdict], parameter[name[list]]]
for taget[name[noiseLevel]] in starred[name[noiseLevels]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b08d7610>, <ast.Name object at 0x7da1b08d7580>]]] in starred[call[name[objects].iteritems, parameter[]]] begin[:]
call[name[exp].sendReset, parameter[]]
variable[inferredL2] assign[=] call[name[exp].objectL2Representations][name[objectName]]
variable[sensorPositionsIterator] assign[=] call[name[greedySensorPositions], parameter[name[numColumns], call[name[len], parameter[name[features]]]]]
variable[numTouchesPerTraversal] assign[=] binary_operation[call[name[len], parameter[name[features]]] / call[name[float], parameter[name[numColumns]]]]
variable[numInitialTouches] assign[=] call[name[int], parameter[call[name[math].ceil, parameter[binary_operation[name[numInitialTraversals] * name[numTouchesPerTraversal]]]]]]
variable[numTestTouches] assign[=] call[name[len], parameter[name[features]]]
for taget[name[touch]] in starred[call[name[xrange], parameter[binary_operation[name[numInitialTouches] + name[numTestTouches]]]]] begin[:]
variable[sensorPositions] assign[=] call[name[next], parameter[name[sensorPositionsIterator]]]
variable[sensation] assign[=] call[name[dict], parameter[<ast.GeneratorExp object at 0x7da1b08d51e0>]]
variable[featureSDR] assign[=] call[call[name[featureSDRsByColumn]][constant[0]]][call[name[features]][call[name[sensorPositions]][constant[0]]]]
if name[noisyFeature] begin[:]
variable[featureSDR] assign[=] call[name[noisy], parameter[name[featureSDR], name[noiseLevel], name[NUM_L4_COLUMNS]]]
variable[locationSDR] assign[=] call[call[name[locationSDRsByColumn]][constant[0]]][call[name[sensorPositions]][constant[0]]]
if name[noisyLocation] begin[:]
variable[locationSDR] assign[=] call[name[noisy], parameter[name[locationSDR], name[noiseLevel], constant[1024]]]
call[name[sensation]][constant[0]] assign[=] tuple[[<ast.Name object at 0x7da1b08d5cc0>, <ast.Name object at 0x7da1b08d5ba0>]]
call[name[exp].infer, parameter[binary_operation[list[[<ast.Name object at 0x7da1b08d4460>]] * name[TIMESTEPS_PER_SENSATION]]]]
if compare[name[touch] greater_or_equal[>=] name[numInitialTouches]] begin[:]
variable[activeCells] assign[=] call[call[name[exp].getL2Representations, parameter[]]][constant[0]]
variable[correctCells] assign[=] call[name[inferredL2]][constant[0]]
call[call[name[results]][name[noiseLevel]].append, parameter[tuple[[<ast.Call object at 0x7da1b08d69e0>, <ast.Call object at 0x7da1b08d4ac0>]]]]
return[name[results]] | keyword[def] identifier[doExperiment] ( identifier[numColumns] , identifier[objects] , identifier[l2Overrides] , identifier[noiseLevels] , identifier[numInitialTraversals] ,
identifier[noisyFeature] , identifier[noisyLocation] ):
literal[string]
identifier[featureSDR] = keyword[lambda] : identifier[set] ( identifier[random] . identifier[sample] ( identifier[xrange] ( identifier[NUM_L4_COLUMNS] ), literal[int] ))
identifier[locationSDR] = keyword[lambda] : identifier[set] ( identifier[random] . identifier[sample] ( identifier[xrange] ( literal[int] ), literal[int] ))
identifier[featureSDRsByColumn] =[ identifier[defaultdict] ( identifier[featureSDR] ) keyword[for] identifier[_] keyword[in] identifier[xrange] ( identifier[numColumns] )]
identifier[locationSDRsByColumn] =[ identifier[defaultdict] ( identifier[locationSDR] ) keyword[for] identifier[_] keyword[in] identifier[xrange] ( identifier[numColumns] )]
identifier[exp] = identifier[L4L2Experiment] (
literal[string] ,
identifier[numCorticalColumns] = identifier[numColumns] ,
identifier[inputSize] = identifier[NUM_L4_COLUMNS] ,
identifier[externalInputSize] = literal[int] ,
identifier[seed] = identifier[random] . identifier[randint] ( literal[int] , literal[int] )
)
identifier[exp] . identifier[learnObjects] (
identifier[dict] (( identifier[objectName] ,
[ identifier[dict] (( identifier[column] ,
( identifier[locationSDRsByColumn] [ identifier[column] ][ identifier[location] ],
identifier[featureSDRsByColumn] [ identifier[column] ][ identifier[features] [ identifier[location] ]]))
keyword[for] identifier[column] keyword[in] identifier[xrange] ( identifier[numColumns] ))
keyword[for] identifier[location] keyword[in] identifier[xrange] ( identifier[len] ( identifier[features] ))])
keyword[for] identifier[objectName] , identifier[features] keyword[in] identifier[objects] . identifier[iteritems] ()))
identifier[results] = identifier[defaultdict] ( identifier[list] )
keyword[for] identifier[noiseLevel] keyword[in] identifier[noiseLevels] :
keyword[for] identifier[objectName] , identifier[features] keyword[in] identifier[objects] . identifier[iteritems] ():
identifier[exp] . identifier[sendReset] ()
identifier[inferredL2] = identifier[exp] . identifier[objectL2Representations] [ identifier[objectName] ]
identifier[sensorPositionsIterator] = identifier[greedySensorPositions] ( identifier[numColumns] , identifier[len] ( identifier[features] ))
identifier[numTouchesPerTraversal] = identifier[len] ( identifier[features] )/ identifier[float] ( identifier[numColumns] )
identifier[numInitialTouches] = identifier[int] ( identifier[math] . identifier[ceil] ( identifier[numInitialTraversals] * identifier[numTouchesPerTraversal] ))
identifier[numTestTouches] = identifier[len] ( identifier[features] )
keyword[for] identifier[touch] keyword[in] identifier[xrange] ( identifier[numInitialTouches] + identifier[numTestTouches] ):
identifier[sensorPositions] = identifier[next] ( identifier[sensorPositionsIterator] )
identifier[sensation] = identifier[dict] (
( identifier[column] ,
( identifier[locationSDRsByColumn] [ identifier[column] ][ identifier[sensorPositions] [ identifier[column] ]],
identifier[featureSDRsByColumn] [ identifier[column] ][ identifier[features] [ identifier[sensorPositions] [ identifier[column] ]]]))
keyword[for] identifier[column] keyword[in] identifier[xrange] ( literal[int] , identifier[numColumns] ))
identifier[featureSDR] = identifier[featureSDRsByColumn] [ literal[int] ][ identifier[features] [ identifier[sensorPositions] [ literal[int] ]]]
keyword[if] identifier[noisyFeature] :
identifier[featureSDR] = identifier[noisy] ( identifier[featureSDR] , identifier[noiseLevel] , identifier[NUM_L4_COLUMNS] )
identifier[locationSDR] = identifier[locationSDRsByColumn] [ literal[int] ][ identifier[sensorPositions] [ literal[int] ]]
keyword[if] identifier[noisyLocation] :
identifier[locationSDR] = identifier[noisy] ( identifier[locationSDR] , identifier[noiseLevel] , literal[int] )
identifier[sensation] [ literal[int] ]=( identifier[locationSDR] , identifier[featureSDR] )
identifier[exp] . identifier[infer] ([ identifier[sensation] ]* identifier[TIMESTEPS_PER_SENSATION] , identifier[reset] = keyword[False] ,
identifier[objectName] = identifier[objectName] )
keyword[if] identifier[touch] >= identifier[numInitialTouches] :
identifier[activeCells] = identifier[exp] . identifier[getL2Representations] ()[ literal[int] ]
identifier[correctCells] = identifier[inferredL2] [ literal[int] ]
identifier[results] [ identifier[noiseLevel] ]. identifier[append] (( identifier[len] ( identifier[activeCells] & identifier[correctCells] ),
identifier[len] ( identifier[activeCells] - identifier[correctCells] )))
keyword[return] identifier[results] | def doExperiment(numColumns, objects, l2Overrides, noiseLevels, numInitialTraversals, noisyFeature, noisyLocation):
"""
Touch every point on an object 'numInitialTraversals' times, then evaluate
whether it has inferred the object by touching every point once more and
checking the number of correctly active and incorrectly active cells.
@param numColumns (int)
The number of sensors to use
@param l2Overrides (dict)
Parameters for the ColumnPooler
@param objects (dict)
A mapping of object names to their features.
See 'createRandomObjects'.
@param noiseLevels (list of floats)
The noise levels to experiment with. The experiment is run once per noise
level. Noise is applied at a constant rate to exactly one cortical column.
It's applied to the same cortical column every time, and this is the cortical
column that is measured.
@param noisyFeature (bool)
Whether to use a noisy feature
@param noisyLocation (bool)
Whether to use a noisy location
"""
featureSDR = lambda : set(random.sample(xrange(NUM_L4_COLUMNS), 40))
locationSDR = lambda : set(random.sample(xrange(1024), 40))
featureSDRsByColumn = [defaultdict(featureSDR) for _ in xrange(numColumns)]
locationSDRsByColumn = [defaultdict(locationSDR) for _ in xrange(numColumns)]
exp = L4L2Experiment('Experiment', numCorticalColumns=numColumns, inputSize=NUM_L4_COLUMNS, externalInputSize=1024, seed=random.randint(2048, 4096))
exp.learnObjects(dict(((objectName, [dict(((column, (locationSDRsByColumn[column][location], featureSDRsByColumn[column][features[location]])) for column in xrange(numColumns))) for location in xrange(len(features))]) for (objectName, features) in objects.iteritems())))
results = defaultdict(list)
for noiseLevel in noiseLevels:
# Try to infer the objects
for (objectName, features) in objects.iteritems():
exp.sendReset()
inferredL2 = exp.objectL2Representations[objectName]
sensorPositionsIterator = greedySensorPositions(numColumns, len(features))
# Touch each location at least numInitialTouches times, and then touch it
# once more, testing it. For each traversal, touch each point on the object
# ~once. Not once per sensor -- just once. So we translate the "number of
# traversals" into a "number of touches" according to the number of sensors.
numTouchesPerTraversal = len(features) / float(numColumns)
numInitialTouches = int(math.ceil(numInitialTraversals * numTouchesPerTraversal))
numTestTouches = len(features)
for touch in xrange(numInitialTouches + numTestTouches):
sensorPositions = next(sensorPositionsIterator)
sensation = dict(((column, (locationSDRsByColumn[column][sensorPositions[column]], featureSDRsByColumn[column][features[sensorPositions[column]]])) for column in xrange(1, numColumns)))
# Add noise to the first column.
featureSDR = featureSDRsByColumn[0][features[sensorPositions[0]]]
if noisyFeature:
featureSDR = noisy(featureSDR, noiseLevel, NUM_L4_COLUMNS) # depends on [control=['if'], data=[]]
locationSDR = locationSDRsByColumn[0][sensorPositions[0]]
if noisyLocation:
locationSDR = noisy(locationSDR, noiseLevel, 1024) # depends on [control=['if'], data=[]]
sensation[0] = (locationSDR, featureSDR)
exp.infer([sensation] * TIMESTEPS_PER_SENSATION, reset=False, objectName=objectName)
if touch >= numInitialTouches:
activeCells = exp.getL2Representations()[0]
correctCells = inferredL2[0]
results[noiseLevel].append((len(activeCells & correctCells), len(activeCells - correctCells))) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['touch']] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['noiseLevel']]
return results |
def detect(self, volume_system, vstype='detect'):
"""Gather information about lvolumes, gathering their label, size and raw path"""
volume_group = volume_system.parent.info.get('volume_group')
result = _util.check_output_(["lvm", "lvdisplay", volume_group])
cur_v = None
for l in result.splitlines():
if "--- Logical volume ---" in l:
cur_v = volume_system._make_subvolume(
index=self._format_index(volume_system, len(volume_system)),
flag='alloc'
)
cur_v.info['fsdescription'] = 'Logical Volume'
if "LV Name" in l:
cur_v.info['label'] = l.replace("LV Name", "").strip()
if "LV Size" in l:
size, unit = l.replace("LV Size", "").strip().split(" ", 1)
cur_v.size = int(float(size.replace(',', '.')) * {'KiB': 1024, 'MiB': 1024 ** 2,
'GiB': 1024 ** 3, 'TiB': 1024 ** 4}.get(unit, 1))
if "LV Path" in l:
cur_v._paths['lv'] = l.replace("LV Path", "").strip()
cur_v.offset = 0
logger.info("{0} volumes found".format(len(volume_system)))
volume_system.volume_source = 'multi'
return volume_system.volumes | def function[detect, parameter[self, volume_system, vstype]]:
constant[Gather information about lvolumes, gathering their label, size and raw path]
variable[volume_group] assign[=] call[name[volume_system].parent.info.get, parameter[constant[volume_group]]]
variable[result] assign[=] call[name[_util].check_output_, parameter[list[[<ast.Constant object at 0x7da1b040be20>, <ast.Constant object at 0x7da1b040ac50>, <ast.Name object at 0x7da1b0408400>]]]]
variable[cur_v] assign[=] constant[None]
for taget[name[l]] in starred[call[name[result].splitlines, parameter[]]] begin[:]
if compare[constant[--- Logical volume ---] in name[l]] begin[:]
variable[cur_v] assign[=] call[name[volume_system]._make_subvolume, parameter[]]
call[name[cur_v].info][constant[fsdescription]] assign[=] constant[Logical Volume]
if compare[constant[LV Name] in name[l]] begin[:]
call[name[cur_v].info][constant[label]] assign[=] call[call[name[l].replace, parameter[constant[LV Name], constant[]]].strip, parameter[]]
if compare[constant[LV Size] in name[l]] begin[:]
<ast.Tuple object at 0x7da1b0446890> assign[=] call[call[call[name[l].replace, parameter[constant[LV Size], constant[]]].strip, parameter[]].split, parameter[constant[ ], constant[1]]]
name[cur_v].size assign[=] call[name[int], parameter[binary_operation[call[name[float], parameter[call[name[size].replace, parameter[constant[,], constant[.]]]]] * call[dictionary[[<ast.Constant object at 0x7da1b0446410>, <ast.Constant object at 0x7da1b0446b60>, <ast.Constant object at 0x7da1b0447250>, <ast.Constant object at 0x7da1b0444e50>], [<ast.Constant object at 0x7da1b0445c90>, <ast.BinOp object at 0x7da1b0445f30>, <ast.BinOp object at 0x7da1b0445630>, <ast.BinOp object at 0x7da1b0446bc0>]].get, parameter[name[unit], constant[1]]]]]]
if compare[constant[LV Path] in name[l]] begin[:]
call[name[cur_v]._paths][constant[lv]] assign[=] call[call[name[l].replace, parameter[constant[LV Path], constant[]]].strip, parameter[]]
name[cur_v].offset assign[=] constant[0]
call[name[logger].info, parameter[call[constant[{0} volumes found].format, parameter[call[name[len], parameter[name[volume_system]]]]]]]
name[volume_system].volume_source assign[=] constant[multi]
return[name[volume_system].volumes] | keyword[def] identifier[detect] ( identifier[self] , identifier[volume_system] , identifier[vstype] = literal[string] ):
literal[string]
identifier[volume_group] = identifier[volume_system] . identifier[parent] . identifier[info] . identifier[get] ( literal[string] )
identifier[result] = identifier[_util] . identifier[check_output_] ([ literal[string] , literal[string] , identifier[volume_group] ])
identifier[cur_v] = keyword[None]
keyword[for] identifier[l] keyword[in] identifier[result] . identifier[splitlines] ():
keyword[if] literal[string] keyword[in] identifier[l] :
identifier[cur_v] = identifier[volume_system] . identifier[_make_subvolume] (
identifier[index] = identifier[self] . identifier[_format_index] ( identifier[volume_system] , identifier[len] ( identifier[volume_system] )),
identifier[flag] = literal[string]
)
identifier[cur_v] . identifier[info] [ literal[string] ]= literal[string]
keyword[if] literal[string] keyword[in] identifier[l] :
identifier[cur_v] . identifier[info] [ literal[string] ]= identifier[l] . identifier[replace] ( literal[string] , literal[string] ). identifier[strip] ()
keyword[if] literal[string] keyword[in] identifier[l] :
identifier[size] , identifier[unit] = identifier[l] . identifier[replace] ( literal[string] , literal[string] ). identifier[strip] (). identifier[split] ( literal[string] , literal[int] )
identifier[cur_v] . identifier[size] = identifier[int] ( identifier[float] ( identifier[size] . identifier[replace] ( literal[string] , literal[string] ))*{ literal[string] : literal[int] , literal[string] : literal[int] ** literal[int] ,
literal[string] : literal[int] ** literal[int] , literal[string] : literal[int] ** literal[int] }. identifier[get] ( identifier[unit] , literal[int] ))
keyword[if] literal[string] keyword[in] identifier[l] :
identifier[cur_v] . identifier[_paths] [ literal[string] ]= identifier[l] . identifier[replace] ( literal[string] , literal[string] ). identifier[strip] ()
identifier[cur_v] . identifier[offset] = literal[int]
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[len] ( identifier[volume_system] )))
identifier[volume_system] . identifier[volume_source] = literal[string]
keyword[return] identifier[volume_system] . identifier[volumes] | def detect(self, volume_system, vstype='detect'):
"""Gather information about lvolumes, gathering their label, size and raw path"""
volume_group = volume_system.parent.info.get('volume_group')
result = _util.check_output_(['lvm', 'lvdisplay', volume_group])
cur_v = None
for l in result.splitlines():
if '--- Logical volume ---' in l:
cur_v = volume_system._make_subvolume(index=self._format_index(volume_system, len(volume_system)), flag='alloc')
cur_v.info['fsdescription'] = 'Logical Volume' # depends on [control=['if'], data=[]]
if 'LV Name' in l:
cur_v.info['label'] = l.replace('LV Name', '').strip() # depends on [control=['if'], data=['l']]
if 'LV Size' in l:
(size, unit) = l.replace('LV Size', '').strip().split(' ', 1)
cur_v.size = int(float(size.replace(',', '.')) * {'KiB': 1024, 'MiB': 1024 ** 2, 'GiB': 1024 ** 3, 'TiB': 1024 ** 4}.get(unit, 1)) # depends on [control=['if'], data=['l']]
if 'LV Path' in l:
cur_v._paths['lv'] = l.replace('LV Path', '').strip()
cur_v.offset = 0 # depends on [control=['if'], data=['l']] # depends on [control=['for'], data=['l']]
logger.info('{0} volumes found'.format(len(volume_system)))
volume_system.volume_source = 'multi'
return volume_system.volumes |
def format_ring_double_bond(mol):
"""Set double bonds around the ring.
"""
mol.require("Topology")
mol.require("ScaleAndCenter")
for r in sorted(mol.rings, key=len, reverse=True):
vertices = [mol.atom(n).coords for n in r]
try:
if geometry.is_clockwise(vertices):
cpath = iterator.consecutive(itertools.cycle(r), 2)
else:
cpath = iterator.consecutive(itertools.cycle(reversed(r)), 2)
except ValueError:
continue
for _ in r:
u, v = next(cpath)
b = mol.bond(u, v)
if b.order == 2:
b.type = int((u > v) == b.is_lower_first) | def function[format_ring_double_bond, parameter[mol]]:
constant[Set double bonds around the ring.
]
call[name[mol].require, parameter[constant[Topology]]]
call[name[mol].require, parameter[constant[ScaleAndCenter]]]
for taget[name[r]] in starred[call[name[sorted], parameter[name[mol].rings]]] begin[:]
variable[vertices] assign[=] <ast.ListComp object at 0x7da18f09f8e0>
<ast.Try object at 0x7da18f09e740>
for taget[name[_]] in starred[name[r]] begin[:]
<ast.Tuple object at 0x7da1b24e1b10> assign[=] call[name[next], parameter[name[cpath]]]
variable[b] assign[=] call[name[mol].bond, parameter[name[u], name[v]]]
if compare[name[b].order equal[==] constant[2]] begin[:]
name[b].type assign[=] call[name[int], parameter[compare[compare[name[u] greater[>] name[v]] equal[==] name[b].is_lower_first]]] | keyword[def] identifier[format_ring_double_bond] ( identifier[mol] ):
literal[string]
identifier[mol] . identifier[require] ( literal[string] )
identifier[mol] . identifier[require] ( literal[string] )
keyword[for] identifier[r] keyword[in] identifier[sorted] ( identifier[mol] . identifier[rings] , identifier[key] = identifier[len] , identifier[reverse] = keyword[True] ):
identifier[vertices] =[ identifier[mol] . identifier[atom] ( identifier[n] ). identifier[coords] keyword[for] identifier[n] keyword[in] identifier[r] ]
keyword[try] :
keyword[if] identifier[geometry] . identifier[is_clockwise] ( identifier[vertices] ):
identifier[cpath] = identifier[iterator] . identifier[consecutive] ( identifier[itertools] . identifier[cycle] ( identifier[r] ), literal[int] )
keyword[else] :
identifier[cpath] = identifier[iterator] . identifier[consecutive] ( identifier[itertools] . identifier[cycle] ( identifier[reversed] ( identifier[r] )), literal[int] )
keyword[except] identifier[ValueError] :
keyword[continue]
keyword[for] identifier[_] keyword[in] identifier[r] :
identifier[u] , identifier[v] = identifier[next] ( identifier[cpath] )
identifier[b] = identifier[mol] . identifier[bond] ( identifier[u] , identifier[v] )
keyword[if] identifier[b] . identifier[order] == literal[int] :
identifier[b] . identifier[type] = identifier[int] (( identifier[u] > identifier[v] )== identifier[b] . identifier[is_lower_first] ) | def format_ring_double_bond(mol):
"""Set double bonds around the ring.
"""
mol.require('Topology')
mol.require('ScaleAndCenter')
for r in sorted(mol.rings, key=len, reverse=True):
vertices = [mol.atom(n).coords for n in r]
try:
if geometry.is_clockwise(vertices):
cpath = iterator.consecutive(itertools.cycle(r), 2) # depends on [control=['if'], data=[]]
else:
cpath = iterator.consecutive(itertools.cycle(reversed(r)), 2) # depends on [control=['try'], data=[]]
except ValueError:
continue # depends on [control=['except'], data=[]]
for _ in r:
(u, v) = next(cpath)
b = mol.bond(u, v)
if b.order == 2:
b.type = int((u > v) == b.is_lower_first) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['r']] |
def format_mtime(mtime):
"""
Format the date associated with a file to be displayed in directory listing.
"""
now = datetime.now()
dt = datetime.fromtimestamp(mtime)
return '%s %2d %5s' % (
dt.strftime('%b'), dt.day,
dt.year if dt.year != now.year else dt.strftime('%H:%M')) | def function[format_mtime, parameter[mtime]]:
constant[
Format the date associated with a file to be displayed in directory listing.
]
variable[now] assign[=] call[name[datetime].now, parameter[]]
variable[dt] assign[=] call[name[datetime].fromtimestamp, parameter[name[mtime]]]
return[binary_operation[constant[%s %2d %5s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da204960850>, <ast.Attribute object at 0x7da2049609a0>, <ast.IfExp object at 0x7da2049638b0>]]]] | keyword[def] identifier[format_mtime] ( identifier[mtime] ):
literal[string]
identifier[now] = identifier[datetime] . identifier[now] ()
identifier[dt] = identifier[datetime] . identifier[fromtimestamp] ( identifier[mtime] )
keyword[return] literal[string] %(
identifier[dt] . identifier[strftime] ( literal[string] ), identifier[dt] . identifier[day] ,
identifier[dt] . identifier[year] keyword[if] identifier[dt] . identifier[year] != identifier[now] . identifier[year] keyword[else] identifier[dt] . identifier[strftime] ( literal[string] )) | def format_mtime(mtime):
"""
Format the date associated with a file to be displayed in directory listing.
"""
now = datetime.now()
dt = datetime.fromtimestamp(mtime)
return '%s %2d %5s' % (dt.strftime('%b'), dt.day, dt.year if dt.year != now.year else dt.strftime('%H:%M')) |
def get_all_classify():
"""获取全部菜谱分类"""
url = "https://www.xinshipu.com/%E8%8F%9C%E8%B0%B1%E5%A4%A7%E5%85%A8.html"
response = requests.get(url, headers=get_header())
html = BeautifulSoup(response.text, "lxml")
all_a = html.find("div", {'class': "detail-cate-list clearfix mt20"}).find_all('a')
classify = dict()
for a in all_a:
if a.has_attr('rel') and not a.has_attr('class'):
class_url = urljoin(HOME_URL, a['href'])
classify[a.text] = class_url
return classify | def function[get_all_classify, parameter[]]:
constant[获取全部菜谱分类]
variable[url] assign[=] constant[https://www.xinshipu.com/%E8%8F%9C%E8%B0%B1%E5%A4%A7%E5%85%A8.html]
variable[response] assign[=] call[name[requests].get, parameter[name[url]]]
variable[html] assign[=] call[name[BeautifulSoup], parameter[name[response].text, constant[lxml]]]
variable[all_a] assign[=] call[call[name[html].find, parameter[constant[div], dictionary[[<ast.Constant object at 0x7da1b25d4d90>], [<ast.Constant object at 0x7da1b25d4eb0>]]]].find_all, parameter[constant[a]]]
variable[classify] assign[=] call[name[dict], parameter[]]
for taget[name[a]] in starred[name[all_a]] begin[:]
if <ast.BoolOp object at 0x7da2054a4e20> begin[:]
variable[class_url] assign[=] call[name[urljoin], parameter[name[HOME_URL], call[name[a]][constant[href]]]]
call[name[classify]][name[a].text] assign[=] name[class_url]
return[name[classify]] | keyword[def] identifier[get_all_classify] ():
literal[string]
identifier[url] = literal[string]
identifier[response] = identifier[requests] . identifier[get] ( identifier[url] , identifier[headers] = identifier[get_header] ())
identifier[html] = identifier[BeautifulSoup] ( identifier[response] . identifier[text] , literal[string] )
identifier[all_a] = identifier[html] . identifier[find] ( literal[string] ,{ literal[string] : literal[string] }). identifier[find_all] ( literal[string] )
identifier[classify] = identifier[dict] ()
keyword[for] identifier[a] keyword[in] identifier[all_a] :
keyword[if] identifier[a] . identifier[has_attr] ( literal[string] ) keyword[and] keyword[not] identifier[a] . identifier[has_attr] ( literal[string] ):
identifier[class_url] = identifier[urljoin] ( identifier[HOME_URL] , identifier[a] [ literal[string] ])
identifier[classify] [ identifier[a] . identifier[text] ]= identifier[class_url]
keyword[return] identifier[classify] | def get_all_classify():
"""获取全部菜谱分类"""
url = 'https://www.xinshipu.com/%E8%8F%9C%E8%B0%B1%E5%A4%A7%E5%85%A8.html'
response = requests.get(url, headers=get_header())
html = BeautifulSoup(response.text, 'lxml')
all_a = html.find('div', {'class': 'detail-cate-list clearfix mt20'}).find_all('a')
classify = dict()
for a in all_a:
if a.has_attr('rel') and (not a.has_attr('class')):
class_url = urljoin(HOME_URL, a['href'])
classify[a.text] = class_url # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['a']]
return classify |
def quit(self):
"""
This could be called from another thread, so let's do this via alarm
"""
def q(*args):
raise urwid.ExitMainLoop()
self.worker.shutdown(wait=False)
self.ui_worker.shutdown(wait=False)
self.loop.set_alarm_in(0, q) | def function[quit, parameter[self]]:
constant[
This could be called from another thread, so let's do this via alarm
]
def function[q, parameter[]]:
<ast.Raise object at 0x7da1b0723e50>
call[name[self].worker.shutdown, parameter[]]
call[name[self].ui_worker.shutdown, parameter[]]
call[name[self].loop.set_alarm_in, parameter[constant[0], name[q]]] | keyword[def] identifier[quit] ( identifier[self] ):
literal[string]
keyword[def] identifier[q] (* identifier[args] ):
keyword[raise] identifier[urwid] . identifier[ExitMainLoop] ()
identifier[self] . identifier[worker] . identifier[shutdown] ( identifier[wait] = keyword[False] )
identifier[self] . identifier[ui_worker] . identifier[shutdown] ( identifier[wait] = keyword[False] )
identifier[self] . identifier[loop] . identifier[set_alarm_in] ( literal[int] , identifier[q] ) | def quit(self):
"""
This could be called from another thread, so let's do this via alarm
"""
def q(*args):
raise urwid.ExitMainLoop()
self.worker.shutdown(wait=False)
self.ui_worker.shutdown(wait=False)
self.loop.set_alarm_in(0, q) |
def fetch_by_ids(TableName,iso_id_list,numin,numax,ParameterGroups=[],Parameters=[]):
"""
INPUT PARAMETERS:
TableName: local table name to fetch in (required)
iso_id_list: list of isotopologue id's (required)
numin: lower wavenumber bound (required)
numax: upper wavenumber bound (required)
OUTPUT PARAMETERS:
none
---
DESCRIPTION:
Download line-by-line data from HITRANonline server
and save it to local table. The input parameter iso_id_list
contains list of "global" isotopologue Ids (see help on ISO_ID).
Note: this function is required if user wants to download
multiple species into single table.
---
EXAMPLE OF USAGE:
fetch_by_ids('water',[1,2,3,4],4000,4100)
---
"""
if type(iso_id_list) not in set([list,tuple]):
iso_id_list = [iso_id_list]
queryHITRAN(TableName,iso_id_list,numin,numax,
pargroups=ParameterGroups,params=Parameters)
iso_names = [ISO_ID[i][ISO_ID_INDEX['iso_name']] for i in iso_id_list]
Comment = 'Contains lines for '+','.join(iso_names)
Comment += ('\n in %.3f-%.3f wavenumber range' % (numin,numax))
comment(TableName,Comment) | def function[fetch_by_ids, parameter[TableName, iso_id_list, numin, numax, ParameterGroups, Parameters]]:
constant[
INPUT PARAMETERS:
TableName: local table name to fetch in (required)
iso_id_list: list of isotopologue id's (required)
numin: lower wavenumber bound (required)
numax: upper wavenumber bound (required)
OUTPUT PARAMETERS:
none
---
DESCRIPTION:
Download line-by-line data from HITRANonline server
and save it to local table. The input parameter iso_id_list
contains list of "global" isotopologue Ids (see help on ISO_ID).
Note: this function is required if user wants to download
multiple species into single table.
---
EXAMPLE OF USAGE:
fetch_by_ids('water',[1,2,3,4],4000,4100)
---
]
if compare[call[name[type], parameter[name[iso_id_list]]] <ast.NotIn object at 0x7da2590d7190> call[name[set], parameter[list[[<ast.Name object at 0x7da2043473a0>, <ast.Name object at 0x7da204345030>]]]]] begin[:]
variable[iso_id_list] assign[=] list[[<ast.Name object at 0x7da204346650>]]
call[name[queryHITRAN], parameter[name[TableName], name[iso_id_list], name[numin], name[numax]]]
variable[iso_names] assign[=] <ast.ListComp object at 0x7da204346920>
variable[Comment] assign[=] binary_operation[constant[Contains lines for ] + call[constant[,].join, parameter[name[iso_names]]]]
<ast.AugAssign object at 0x7da2043457b0>
call[name[comment], parameter[name[TableName], name[Comment]]] | keyword[def] identifier[fetch_by_ids] ( identifier[TableName] , identifier[iso_id_list] , identifier[numin] , identifier[numax] , identifier[ParameterGroups] =[], identifier[Parameters] =[]):
literal[string]
keyword[if] identifier[type] ( identifier[iso_id_list] ) keyword[not] keyword[in] identifier[set] ([ identifier[list] , identifier[tuple] ]):
identifier[iso_id_list] =[ identifier[iso_id_list] ]
identifier[queryHITRAN] ( identifier[TableName] , identifier[iso_id_list] , identifier[numin] , identifier[numax] ,
identifier[pargroups] = identifier[ParameterGroups] , identifier[params] = identifier[Parameters] )
identifier[iso_names] =[ identifier[ISO_ID] [ identifier[i] ][ identifier[ISO_ID_INDEX] [ literal[string] ]] keyword[for] identifier[i] keyword[in] identifier[iso_id_list] ]
identifier[Comment] = literal[string] + literal[string] . identifier[join] ( identifier[iso_names] )
identifier[Comment] +=( literal[string] %( identifier[numin] , identifier[numax] ))
identifier[comment] ( identifier[TableName] , identifier[Comment] ) | def fetch_by_ids(TableName, iso_id_list, numin, numax, ParameterGroups=[], Parameters=[]):
"""
INPUT PARAMETERS:
TableName: local table name to fetch in (required)
iso_id_list: list of isotopologue id's (required)
numin: lower wavenumber bound (required)
numax: upper wavenumber bound (required)
OUTPUT PARAMETERS:
none
---
DESCRIPTION:
Download line-by-line data from HITRANonline server
and save it to local table. The input parameter iso_id_list
contains list of "global" isotopologue Ids (see help on ISO_ID).
Note: this function is required if user wants to download
multiple species into single table.
---
EXAMPLE OF USAGE:
fetch_by_ids('water',[1,2,3,4],4000,4100)
---
"""
if type(iso_id_list) not in set([list, tuple]):
iso_id_list = [iso_id_list] # depends on [control=['if'], data=[]]
queryHITRAN(TableName, iso_id_list, numin, numax, pargroups=ParameterGroups, params=Parameters)
iso_names = [ISO_ID[i][ISO_ID_INDEX['iso_name']] for i in iso_id_list]
Comment = 'Contains lines for ' + ','.join(iso_names)
Comment += '\n in %.3f-%.3f wavenumber range' % (numin, numax)
comment(TableName, Comment) |
def build_profile_variant(variant):
"""Returns a ProfileVariant object
Args:
variant (cyvcf2.Variant)
Returns:
variant (models.ProfileVariant)
"""
chrom = variant.CHROM
if chrom.startswith(('chr', 'CHR', 'Chr')):
chrom = chrom[3:]
pos = int(variant.POS)
variant_id = get_variant_id(variant)
ref = variant.REF
alt = variant.ALT[0]
maf = get_maf(variant)
profile_variant = ProfileVariant(
variant_id=variant_id,
chrom=chrom,
pos=pos,
ref=ref,
alt=alt,
maf=maf,
id_column = variant.ID
)
return profile_variant | def function[build_profile_variant, parameter[variant]]:
constant[Returns a ProfileVariant object
Args:
variant (cyvcf2.Variant)
Returns:
variant (models.ProfileVariant)
]
variable[chrom] assign[=] name[variant].CHROM
if call[name[chrom].startswith, parameter[tuple[[<ast.Constant object at 0x7da1b1b0d870>, <ast.Constant object at 0x7da1b1b0f280>, <ast.Constant object at 0x7da1b1b0f1f0>]]]] begin[:]
variable[chrom] assign[=] call[name[chrom]][<ast.Slice object at 0x7da2044c13f0>]
variable[pos] assign[=] call[name[int], parameter[name[variant].POS]]
variable[variant_id] assign[=] call[name[get_variant_id], parameter[name[variant]]]
variable[ref] assign[=] name[variant].REF
variable[alt] assign[=] call[name[variant].ALT][constant[0]]
variable[maf] assign[=] call[name[get_maf], parameter[name[variant]]]
variable[profile_variant] assign[=] call[name[ProfileVariant], parameter[]]
return[name[profile_variant]] | keyword[def] identifier[build_profile_variant] ( identifier[variant] ):
literal[string]
identifier[chrom] = identifier[variant] . identifier[CHROM]
keyword[if] identifier[chrom] . identifier[startswith] (( literal[string] , literal[string] , literal[string] )):
identifier[chrom] = identifier[chrom] [ literal[int] :]
identifier[pos] = identifier[int] ( identifier[variant] . identifier[POS] )
identifier[variant_id] = identifier[get_variant_id] ( identifier[variant] )
identifier[ref] = identifier[variant] . identifier[REF]
identifier[alt] = identifier[variant] . identifier[ALT] [ literal[int] ]
identifier[maf] = identifier[get_maf] ( identifier[variant] )
identifier[profile_variant] = identifier[ProfileVariant] (
identifier[variant_id] = identifier[variant_id] ,
identifier[chrom] = identifier[chrom] ,
identifier[pos] = identifier[pos] ,
identifier[ref] = identifier[ref] ,
identifier[alt] = identifier[alt] ,
identifier[maf] = identifier[maf] ,
identifier[id_column] = identifier[variant] . identifier[ID]
)
keyword[return] identifier[profile_variant] | def build_profile_variant(variant):
"""Returns a ProfileVariant object
Args:
variant (cyvcf2.Variant)
Returns:
variant (models.ProfileVariant)
"""
chrom = variant.CHROM
if chrom.startswith(('chr', 'CHR', 'Chr')):
chrom = chrom[3:] # depends on [control=['if'], data=[]]
pos = int(variant.POS)
variant_id = get_variant_id(variant)
ref = variant.REF
alt = variant.ALT[0]
maf = get_maf(variant)
profile_variant = ProfileVariant(variant_id=variant_id, chrom=chrom, pos=pos, ref=ref, alt=alt, maf=maf, id_column=variant.ID)
return profile_variant |
def issur_melacha_in_effect(self):
"""At the given time, return whether issur melacha is in effect."""
# TODO: Rewrite this in terms of candle_lighting/havdalah properties.
weekday = self.date.weekday()
tomorrow = self.date + dt.timedelta(days=1)
tomorrow_holiday_type = HDate(
gdate=tomorrow, diaspora=self.location.diaspora).holiday_type
today_holiday_type = HDate(
gdate=self.date, diaspora=self.location.diaspora).holiday_type
if weekday == 4 or tomorrow_holiday_type == HolidayTypes.YOM_TOV:
if self.time > (self.zmanim["sunset"] -
dt.timedelta(minutes=self.candle_lighting_offset)):
return True
if weekday == 5 or today_holiday_type == HolidayTypes.YOM_TOV:
if self.time < self.zmanim["three_stars"]:
return True
return False | def function[issur_melacha_in_effect, parameter[self]]:
constant[At the given time, return whether issur melacha is in effect.]
variable[weekday] assign[=] call[name[self].date.weekday, parameter[]]
variable[tomorrow] assign[=] binary_operation[name[self].date + call[name[dt].timedelta, parameter[]]]
variable[tomorrow_holiday_type] assign[=] call[name[HDate], parameter[]].holiday_type
variable[today_holiday_type] assign[=] call[name[HDate], parameter[]].holiday_type
if <ast.BoolOp object at 0x7da20e954e50> begin[:]
if compare[name[self].time greater[>] binary_operation[call[name[self].zmanim][constant[sunset]] - call[name[dt].timedelta, parameter[]]]] begin[:]
return[constant[True]]
if <ast.BoolOp object at 0x7da20e9564d0> begin[:]
if compare[name[self].time less[<] call[name[self].zmanim][constant[three_stars]]] begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[issur_melacha_in_effect] ( identifier[self] ):
literal[string]
identifier[weekday] = identifier[self] . identifier[date] . identifier[weekday] ()
identifier[tomorrow] = identifier[self] . identifier[date] + identifier[dt] . identifier[timedelta] ( identifier[days] = literal[int] )
identifier[tomorrow_holiday_type] = identifier[HDate] (
identifier[gdate] = identifier[tomorrow] , identifier[diaspora] = identifier[self] . identifier[location] . identifier[diaspora] ). identifier[holiday_type]
identifier[today_holiday_type] = identifier[HDate] (
identifier[gdate] = identifier[self] . identifier[date] , identifier[diaspora] = identifier[self] . identifier[location] . identifier[diaspora] ). identifier[holiday_type]
keyword[if] identifier[weekday] == literal[int] keyword[or] identifier[tomorrow_holiday_type] == identifier[HolidayTypes] . identifier[YOM_TOV] :
keyword[if] identifier[self] . identifier[time] >( identifier[self] . identifier[zmanim] [ literal[string] ]-
identifier[dt] . identifier[timedelta] ( identifier[minutes] = identifier[self] . identifier[candle_lighting_offset] )):
keyword[return] keyword[True]
keyword[if] identifier[weekday] == literal[int] keyword[or] identifier[today_holiday_type] == identifier[HolidayTypes] . identifier[YOM_TOV] :
keyword[if] identifier[self] . identifier[time] < identifier[self] . identifier[zmanim] [ literal[string] ]:
keyword[return] keyword[True]
keyword[return] keyword[False] | def issur_melacha_in_effect(self):
"""At the given time, return whether issur melacha is in effect."""
# TODO: Rewrite this in terms of candle_lighting/havdalah properties.
weekday = self.date.weekday()
tomorrow = self.date + dt.timedelta(days=1)
tomorrow_holiday_type = HDate(gdate=tomorrow, diaspora=self.location.diaspora).holiday_type
today_holiday_type = HDate(gdate=self.date, diaspora=self.location.diaspora).holiday_type
if weekday == 4 or tomorrow_holiday_type == HolidayTypes.YOM_TOV:
if self.time > self.zmanim['sunset'] - dt.timedelta(minutes=self.candle_lighting_offset):
return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if weekday == 5 or today_holiday_type == HolidayTypes.YOM_TOV:
if self.time < self.zmanim['three_stars']:
return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return False |
def iterkeys(self):
"""
Enumerate the keys found at any scope for the current plugin.
rtype: Generator[str]
"""
visited_keys = set()
try:
for key in self.idb.iterkeys():
if key not in visited_keys:
yield key
visited_keys.add(key)
except (PermissionError, EnvironmentError):
pass
try:
for key in self.directory.iterkeys():
if key not in visited_keys:
yield key
visited_keys.add(key)
except (PermissionError, EnvironmentError):
pass
try:
for key in self.user.iterkeys():
if key not in visited_keys:
yield key
visited_keys.add(key)
except (PermissionError, EnvironmentError):
pass
try:
for key in self.system.iterkeys():
if key not in visited_keys:
yield key
visited_keys.add(key)
except (PermissionError, EnvironmentError):
pass | def function[iterkeys, parameter[self]]:
constant[
Enumerate the keys found at any scope for the current plugin.
rtype: Generator[str]
]
variable[visited_keys] assign[=] call[name[set], parameter[]]
<ast.Try object at 0x7da1b026c910>
<ast.Try object at 0x7da1b026e860>
<ast.Try object at 0x7da1b026c2b0>
<ast.Try object at 0x7da1b026f790> | keyword[def] identifier[iterkeys] ( identifier[self] ):
literal[string]
identifier[visited_keys] = identifier[set] ()
keyword[try] :
keyword[for] identifier[key] keyword[in] identifier[self] . identifier[idb] . identifier[iterkeys] ():
keyword[if] identifier[key] keyword[not] keyword[in] identifier[visited_keys] :
keyword[yield] identifier[key]
identifier[visited_keys] . identifier[add] ( identifier[key] )
keyword[except] ( identifier[PermissionError] , identifier[EnvironmentError] ):
keyword[pass]
keyword[try] :
keyword[for] identifier[key] keyword[in] identifier[self] . identifier[directory] . identifier[iterkeys] ():
keyword[if] identifier[key] keyword[not] keyword[in] identifier[visited_keys] :
keyword[yield] identifier[key]
identifier[visited_keys] . identifier[add] ( identifier[key] )
keyword[except] ( identifier[PermissionError] , identifier[EnvironmentError] ):
keyword[pass]
keyword[try] :
keyword[for] identifier[key] keyword[in] identifier[self] . identifier[user] . identifier[iterkeys] ():
keyword[if] identifier[key] keyword[not] keyword[in] identifier[visited_keys] :
keyword[yield] identifier[key]
identifier[visited_keys] . identifier[add] ( identifier[key] )
keyword[except] ( identifier[PermissionError] , identifier[EnvironmentError] ):
keyword[pass]
keyword[try] :
keyword[for] identifier[key] keyword[in] identifier[self] . identifier[system] . identifier[iterkeys] ():
keyword[if] identifier[key] keyword[not] keyword[in] identifier[visited_keys] :
keyword[yield] identifier[key]
identifier[visited_keys] . identifier[add] ( identifier[key] )
keyword[except] ( identifier[PermissionError] , identifier[EnvironmentError] ):
keyword[pass] | def iterkeys(self):
"""
Enumerate the keys found at any scope for the current plugin.
rtype: Generator[str]
"""
visited_keys = set()
try:
for key in self.idb.iterkeys():
if key not in visited_keys:
yield key
visited_keys.add(key) # depends on [control=['if'], data=['key', 'visited_keys']] # depends on [control=['for'], data=['key']] # depends on [control=['try'], data=[]]
except (PermissionError, EnvironmentError):
pass # depends on [control=['except'], data=[]]
try:
for key in self.directory.iterkeys():
if key not in visited_keys:
yield key
visited_keys.add(key) # depends on [control=['if'], data=['key', 'visited_keys']] # depends on [control=['for'], data=['key']] # depends on [control=['try'], data=[]]
except (PermissionError, EnvironmentError):
pass # depends on [control=['except'], data=[]]
try:
for key in self.user.iterkeys():
if key not in visited_keys:
yield key
visited_keys.add(key) # depends on [control=['if'], data=['key', 'visited_keys']] # depends on [control=['for'], data=['key']] # depends on [control=['try'], data=[]]
except (PermissionError, EnvironmentError):
pass # depends on [control=['except'], data=[]]
try:
for key in self.system.iterkeys():
if key not in visited_keys:
yield key
visited_keys.add(key) # depends on [control=['if'], data=['key', 'visited_keys']] # depends on [control=['for'], data=['key']] # depends on [control=['try'], data=[]]
except (PermissionError, EnvironmentError):
pass # depends on [control=['except'], data=[]] |
def parse_cif_structure(self):
"""Parse a `StructureData` from the cleaned `CifData` returned by the `CifSelectCalculation`."""
from aiida_codtools.workflows.functions.primitive_structure_from_cif import primitive_structure_from_cif
if self.ctx.cif.has_unknown_species:
self.ctx.exit_code = self.exit_codes.ERROR_CIF_HAS_UNKNOWN_SPECIES
self.report(self.ctx.exit_code.message)
return
if self.ctx.cif.has_undefined_atomic_sites:
self.ctx.exit_code = self.exit_codes.ERROR_CIF_HAS_UNDEFINED_ATOMIC_SITES
self.report(self.ctx.exit_code.message)
return
if self.ctx.cif.has_attached_hydrogens:
self.ctx.exit_code = self.exit_codes.ERROR_CIF_HAS_ATTACHED_HYDROGENS
self.report(self.ctx.exit_code.message)
return
parse_inputs = {
'cif': self.ctx.cif,
'parse_engine': self.inputs.parse_engine,
'site_tolerance': self.inputs.site_tolerance,
'symprec': self.inputs.symprec,
}
try:
structure, node = primitive_structure_from_cif.run_get_node(**parse_inputs)
except Exception: # pylint: disable=broad-except
self.ctx.exit_code = self.exit_codes.ERROR_CIF_STRUCTURE_PARSING_FAILED
self.report(self.ctx.exit_code.message)
return
if node.is_failed:
self.ctx.exit_code = self.exit_codes(node.exit_status) # pylint: disable=too-many-function-args
self.report(self.ctx.exit_code.message)
else:
self.ctx.structure = structure | def function[parse_cif_structure, parameter[self]]:
constant[Parse a `StructureData` from the cleaned `CifData` returned by the `CifSelectCalculation`.]
from relative_module[aiida_codtools.workflows.functions.primitive_structure_from_cif] import module[primitive_structure_from_cif]
if name[self].ctx.cif.has_unknown_species begin[:]
name[self].ctx.exit_code assign[=] name[self].exit_codes.ERROR_CIF_HAS_UNKNOWN_SPECIES
call[name[self].report, parameter[name[self].ctx.exit_code.message]]
return[None]
if name[self].ctx.cif.has_undefined_atomic_sites begin[:]
name[self].ctx.exit_code assign[=] name[self].exit_codes.ERROR_CIF_HAS_UNDEFINED_ATOMIC_SITES
call[name[self].report, parameter[name[self].ctx.exit_code.message]]
return[None]
if name[self].ctx.cif.has_attached_hydrogens begin[:]
name[self].ctx.exit_code assign[=] name[self].exit_codes.ERROR_CIF_HAS_ATTACHED_HYDROGENS
call[name[self].report, parameter[name[self].ctx.exit_code.message]]
return[None]
variable[parse_inputs] assign[=] dictionary[[<ast.Constant object at 0x7da18fe90c10>, <ast.Constant object at 0x7da18fe91ea0>, <ast.Constant object at 0x7da18fe91ed0>, <ast.Constant object at 0x7da18fe92230>], [<ast.Attribute object at 0x7da18fe90370>, <ast.Attribute object at 0x7da18fe93b50>, <ast.Attribute object at 0x7da18fe904c0>, <ast.Attribute object at 0x7da18fe92800>]]
<ast.Try object at 0x7da18fe91180>
if name[node].is_failed begin[:]
name[self].ctx.exit_code assign[=] call[name[self].exit_codes, parameter[name[node].exit_status]]
call[name[self].report, parameter[name[self].ctx.exit_code.message]] | keyword[def] identifier[parse_cif_structure] ( identifier[self] ):
literal[string]
keyword[from] identifier[aiida_codtools] . identifier[workflows] . identifier[functions] . identifier[primitive_structure_from_cif] keyword[import] identifier[primitive_structure_from_cif]
keyword[if] identifier[self] . identifier[ctx] . identifier[cif] . identifier[has_unknown_species] :
identifier[self] . identifier[ctx] . identifier[exit_code] = identifier[self] . identifier[exit_codes] . identifier[ERROR_CIF_HAS_UNKNOWN_SPECIES]
identifier[self] . identifier[report] ( identifier[self] . identifier[ctx] . identifier[exit_code] . identifier[message] )
keyword[return]
keyword[if] identifier[self] . identifier[ctx] . identifier[cif] . identifier[has_undefined_atomic_sites] :
identifier[self] . identifier[ctx] . identifier[exit_code] = identifier[self] . identifier[exit_codes] . identifier[ERROR_CIF_HAS_UNDEFINED_ATOMIC_SITES]
identifier[self] . identifier[report] ( identifier[self] . identifier[ctx] . identifier[exit_code] . identifier[message] )
keyword[return]
keyword[if] identifier[self] . identifier[ctx] . identifier[cif] . identifier[has_attached_hydrogens] :
identifier[self] . identifier[ctx] . identifier[exit_code] = identifier[self] . identifier[exit_codes] . identifier[ERROR_CIF_HAS_ATTACHED_HYDROGENS]
identifier[self] . identifier[report] ( identifier[self] . identifier[ctx] . identifier[exit_code] . identifier[message] )
keyword[return]
identifier[parse_inputs] ={
literal[string] : identifier[self] . identifier[ctx] . identifier[cif] ,
literal[string] : identifier[self] . identifier[inputs] . identifier[parse_engine] ,
literal[string] : identifier[self] . identifier[inputs] . identifier[site_tolerance] ,
literal[string] : identifier[self] . identifier[inputs] . identifier[symprec] ,
}
keyword[try] :
identifier[structure] , identifier[node] = identifier[primitive_structure_from_cif] . identifier[run_get_node] (** identifier[parse_inputs] )
keyword[except] identifier[Exception] :
identifier[self] . identifier[ctx] . identifier[exit_code] = identifier[self] . identifier[exit_codes] . identifier[ERROR_CIF_STRUCTURE_PARSING_FAILED]
identifier[self] . identifier[report] ( identifier[self] . identifier[ctx] . identifier[exit_code] . identifier[message] )
keyword[return]
keyword[if] identifier[node] . identifier[is_failed] :
identifier[self] . identifier[ctx] . identifier[exit_code] = identifier[self] . identifier[exit_codes] ( identifier[node] . identifier[exit_status] )
identifier[self] . identifier[report] ( identifier[self] . identifier[ctx] . identifier[exit_code] . identifier[message] )
keyword[else] :
identifier[self] . identifier[ctx] . identifier[structure] = identifier[structure] | def parse_cif_structure(self):
"""Parse a `StructureData` from the cleaned `CifData` returned by the `CifSelectCalculation`."""
from aiida_codtools.workflows.functions.primitive_structure_from_cif import primitive_structure_from_cif
if self.ctx.cif.has_unknown_species:
self.ctx.exit_code = self.exit_codes.ERROR_CIF_HAS_UNKNOWN_SPECIES
self.report(self.ctx.exit_code.message)
return # depends on [control=['if'], data=[]]
if self.ctx.cif.has_undefined_atomic_sites:
self.ctx.exit_code = self.exit_codes.ERROR_CIF_HAS_UNDEFINED_ATOMIC_SITES
self.report(self.ctx.exit_code.message)
return # depends on [control=['if'], data=[]]
if self.ctx.cif.has_attached_hydrogens:
self.ctx.exit_code = self.exit_codes.ERROR_CIF_HAS_ATTACHED_HYDROGENS
self.report(self.ctx.exit_code.message)
return # depends on [control=['if'], data=[]]
parse_inputs = {'cif': self.ctx.cif, 'parse_engine': self.inputs.parse_engine, 'site_tolerance': self.inputs.site_tolerance, 'symprec': self.inputs.symprec}
try:
(structure, node) = primitive_structure_from_cif.run_get_node(**parse_inputs) # depends on [control=['try'], data=[]]
except Exception: # pylint: disable=broad-except
self.ctx.exit_code = self.exit_codes.ERROR_CIF_STRUCTURE_PARSING_FAILED
self.report(self.ctx.exit_code.message)
return # depends on [control=['except'], data=[]]
if node.is_failed:
self.ctx.exit_code = self.exit_codes(node.exit_status) # pylint: disable=too-many-function-args
self.report(self.ctx.exit_code.message) # depends on [control=['if'], data=[]]
else:
self.ctx.structure = structure |
def create_context(self, state_hash, base_contexts, inputs, outputs):
"""Create a ExecutionContext to run a transaction against.
Args:
state_hash: (str): Merkle root to base state on.
base_contexts (list of str): Context ids of contexts that will
have their state applied to make this context.
inputs (list of str): Addresses that can be read from.
outputs (list of str): Addresses that can be written to.
Returns:
context_id (str): the unique context_id of the session
"""
for address in inputs:
if not self.namespace_is_valid(address):
raise CreateContextException(
"Address or namespace {} listed in inputs is not "
"valid".format(address))
for address in outputs:
if not self.namespace_is_valid(address):
raise CreateContextException(
"Address or namespace {} listed in outputs is not "
"valid".format(address))
addresses_to_find = [add for add in inputs if len(add) == 70]
address_values, reads = self._find_address_values_in_chain(
base_contexts=base_contexts,
addresses_to_find=addresses_to_find)
context = ExecutionContext(
state_hash=state_hash,
read_list=inputs,
write_list=outputs,
base_context_ids=base_contexts)
contexts_asked_not_found = [cid for cid in base_contexts
if cid not in self._contexts]
if contexts_asked_not_found:
raise KeyError(
"Basing a new context off of context ids {} "
"that are not in context manager".format(
contexts_asked_not_found))
context.create_initial(address_values)
self._contexts[context.session_id] = context
if reads:
context.create_prefetch(reads)
self._address_queue.put_nowait(
(context.session_id, state_hash, reads))
return context.session_id | def function[create_context, parameter[self, state_hash, base_contexts, inputs, outputs]]:
constant[Create a ExecutionContext to run a transaction against.
Args:
state_hash: (str): Merkle root to base state on.
base_contexts (list of str): Context ids of contexts that will
have their state applied to make this context.
inputs (list of str): Addresses that can be read from.
outputs (list of str): Addresses that can be written to.
Returns:
context_id (str): the unique context_id of the session
]
for taget[name[address]] in starred[name[inputs]] begin[:]
if <ast.UnaryOp object at 0x7da20c6e5630> begin[:]
<ast.Raise object at 0x7da20c6e7400>
for taget[name[address]] in starred[name[outputs]] begin[:]
if <ast.UnaryOp object at 0x7da20c6e5120> begin[:]
<ast.Raise object at 0x7da20c6e46a0>
variable[addresses_to_find] assign[=] <ast.ListComp object at 0x7da20c6e60e0>
<ast.Tuple object at 0x7da18ede6b60> assign[=] call[name[self]._find_address_values_in_chain, parameter[]]
variable[context] assign[=] call[name[ExecutionContext], parameter[]]
variable[contexts_asked_not_found] assign[=] <ast.ListComp object at 0x7da237d35c30>
if name[contexts_asked_not_found] begin[:]
<ast.Raise object at 0x7da20c6e6740>
call[name[context].create_initial, parameter[name[address_values]]]
call[name[self]._contexts][name[context].session_id] assign[=] name[context]
if name[reads] begin[:]
call[name[context].create_prefetch, parameter[name[reads]]]
call[name[self]._address_queue.put_nowait, parameter[tuple[[<ast.Attribute object at 0x7da20c6e6aa0>, <ast.Name object at 0x7da20c6e7220>, <ast.Name object at 0x7da20c6e70a0>]]]]
return[name[context].session_id] | keyword[def] identifier[create_context] ( identifier[self] , identifier[state_hash] , identifier[base_contexts] , identifier[inputs] , identifier[outputs] ):
literal[string]
keyword[for] identifier[address] keyword[in] identifier[inputs] :
keyword[if] keyword[not] identifier[self] . identifier[namespace_is_valid] ( identifier[address] ):
keyword[raise] identifier[CreateContextException] (
literal[string]
literal[string] . identifier[format] ( identifier[address] ))
keyword[for] identifier[address] keyword[in] identifier[outputs] :
keyword[if] keyword[not] identifier[self] . identifier[namespace_is_valid] ( identifier[address] ):
keyword[raise] identifier[CreateContextException] (
literal[string]
literal[string] . identifier[format] ( identifier[address] ))
identifier[addresses_to_find] =[ identifier[add] keyword[for] identifier[add] keyword[in] identifier[inputs] keyword[if] identifier[len] ( identifier[add] )== literal[int] ]
identifier[address_values] , identifier[reads] = identifier[self] . identifier[_find_address_values_in_chain] (
identifier[base_contexts] = identifier[base_contexts] ,
identifier[addresses_to_find] = identifier[addresses_to_find] )
identifier[context] = identifier[ExecutionContext] (
identifier[state_hash] = identifier[state_hash] ,
identifier[read_list] = identifier[inputs] ,
identifier[write_list] = identifier[outputs] ,
identifier[base_context_ids] = identifier[base_contexts] )
identifier[contexts_asked_not_found] =[ identifier[cid] keyword[for] identifier[cid] keyword[in] identifier[base_contexts]
keyword[if] identifier[cid] keyword[not] keyword[in] identifier[self] . identifier[_contexts] ]
keyword[if] identifier[contexts_asked_not_found] :
keyword[raise] identifier[KeyError] (
literal[string]
literal[string] . identifier[format] (
identifier[contexts_asked_not_found] ))
identifier[context] . identifier[create_initial] ( identifier[address_values] )
identifier[self] . identifier[_contexts] [ identifier[context] . identifier[session_id] ]= identifier[context]
keyword[if] identifier[reads] :
identifier[context] . identifier[create_prefetch] ( identifier[reads] )
identifier[self] . identifier[_address_queue] . identifier[put_nowait] (
( identifier[context] . identifier[session_id] , identifier[state_hash] , identifier[reads] ))
keyword[return] identifier[context] . identifier[session_id] | def create_context(self, state_hash, base_contexts, inputs, outputs):
"""Create a ExecutionContext to run a transaction against.
Args:
state_hash: (str): Merkle root to base state on.
base_contexts (list of str): Context ids of contexts that will
have their state applied to make this context.
inputs (list of str): Addresses that can be read from.
outputs (list of str): Addresses that can be written to.
Returns:
context_id (str): the unique context_id of the session
"""
for address in inputs:
if not self.namespace_is_valid(address):
raise CreateContextException('Address or namespace {} listed in inputs is not valid'.format(address)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['address']]
for address in outputs:
if not self.namespace_is_valid(address):
raise CreateContextException('Address or namespace {} listed in outputs is not valid'.format(address)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['address']]
addresses_to_find = [add for add in inputs if len(add) == 70]
(address_values, reads) = self._find_address_values_in_chain(base_contexts=base_contexts, addresses_to_find=addresses_to_find)
context = ExecutionContext(state_hash=state_hash, read_list=inputs, write_list=outputs, base_context_ids=base_contexts)
contexts_asked_not_found = [cid for cid in base_contexts if cid not in self._contexts]
if contexts_asked_not_found:
raise KeyError('Basing a new context off of context ids {} that are not in context manager'.format(contexts_asked_not_found)) # depends on [control=['if'], data=[]]
context.create_initial(address_values)
self._contexts[context.session_id] = context
if reads:
context.create_prefetch(reads)
self._address_queue.put_nowait((context.session_id, state_hash, reads)) # depends on [control=['if'], data=[]]
return context.session_id |
def extend(self, iterable):
"""Extend the list by appending all the items in the given list."""
return super(Collection, self).extend(
self._ensure_iterable_is_valid(iterable)) | def function[extend, parameter[self, iterable]]:
constant[Extend the list by appending all the items in the given list.]
return[call[call[name[super], parameter[name[Collection], name[self]]].extend, parameter[call[name[self]._ensure_iterable_is_valid, parameter[name[iterable]]]]]] | keyword[def] identifier[extend] ( identifier[self] , identifier[iterable] ):
literal[string]
keyword[return] identifier[super] ( identifier[Collection] , identifier[self] ). identifier[extend] (
identifier[self] . identifier[_ensure_iterable_is_valid] ( identifier[iterable] )) | def extend(self, iterable):
"""Extend the list by appending all the items in the given list."""
return super(Collection, self).extend(self._ensure_iterable_is_valid(iterable)) |
def assertFileSizeEqual(self, filename, size, msg=None):
'''Fail if ``filename`` does not have the given ``size`` as
determined by the '==' operator.
Parameters
----------
filename : str, bytes, file-like
size : int, float
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
'''
fsize = self._get_file_size(filename)
self.assertEqual(fsize, size, msg=msg) | def function[assertFileSizeEqual, parameter[self, filename, size, msg]]:
constant[Fail if ``filename`` does not have the given ``size`` as
determined by the '==' operator.
Parameters
----------
filename : str, bytes, file-like
size : int, float
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
]
variable[fsize] assign[=] call[name[self]._get_file_size, parameter[name[filename]]]
call[name[self].assertEqual, parameter[name[fsize], name[size]]] | keyword[def] identifier[assertFileSizeEqual] ( identifier[self] , identifier[filename] , identifier[size] , identifier[msg] = keyword[None] ):
literal[string]
identifier[fsize] = identifier[self] . identifier[_get_file_size] ( identifier[filename] )
identifier[self] . identifier[assertEqual] ( identifier[fsize] , identifier[size] , identifier[msg] = identifier[msg] ) | def assertFileSizeEqual(self, filename, size, msg=None):
"""Fail if ``filename`` does not have the given ``size`` as
determined by the '==' operator.
Parameters
----------
filename : str, bytes, file-like
size : int, float
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
"""
fsize = self._get_file_size(filename)
self.assertEqual(fsize, size, msg=msg) |
def dbg(message, *args):
""" Looks at the stack, to see if a debug message should be printed. """
if debug_function and enable_notice:
frm = inspect.stack()[1]
mod = inspect.getmodule(frm[0])
if not (mod.__name__ in ignored_modules):
i = ' ' * _debug_indent
debug_function(NOTICE, i + 'dbg: ' + message % args) | def function[dbg, parameter[message]]:
constant[ Looks at the stack, to see if a debug message should be printed. ]
if <ast.BoolOp object at 0x7da18fe90820> begin[:]
variable[frm] assign[=] call[call[name[inspect].stack, parameter[]]][constant[1]]
variable[mod] assign[=] call[name[inspect].getmodule, parameter[call[name[frm]][constant[0]]]]
if <ast.UnaryOp object at 0x7da18fe93e50> begin[:]
variable[i] assign[=] binary_operation[constant[ ] * name[_debug_indent]]
call[name[debug_function], parameter[name[NOTICE], binary_operation[binary_operation[name[i] + constant[dbg: ]] + binary_operation[name[message] <ast.Mod object at 0x7da2590d6920> name[args]]]]] | keyword[def] identifier[dbg] ( identifier[message] ,* identifier[args] ):
literal[string]
keyword[if] identifier[debug_function] keyword[and] identifier[enable_notice] :
identifier[frm] = identifier[inspect] . identifier[stack] ()[ literal[int] ]
identifier[mod] = identifier[inspect] . identifier[getmodule] ( identifier[frm] [ literal[int] ])
keyword[if] keyword[not] ( identifier[mod] . identifier[__name__] keyword[in] identifier[ignored_modules] ):
identifier[i] = literal[string] * identifier[_debug_indent]
identifier[debug_function] ( identifier[NOTICE] , identifier[i] + literal[string] + identifier[message] % identifier[args] ) | def dbg(message, *args):
""" Looks at the stack, to see if a debug message should be printed. """
if debug_function and enable_notice:
frm = inspect.stack()[1]
mod = inspect.getmodule(frm[0])
if not mod.__name__ in ignored_modules:
i = ' ' * _debug_indent
debug_function(NOTICE, i + 'dbg: ' + message % args) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def set_state(self, state):
"""
Set the state of this device to on or off.
"""
self.basicevent.SetBinaryState(BinaryState=int(state))
self._state = int(state) | def function[set_state, parameter[self, state]]:
constant[
Set the state of this device to on or off.
]
call[name[self].basicevent.SetBinaryState, parameter[]]
name[self]._state assign[=] call[name[int], parameter[name[state]]] | keyword[def] identifier[set_state] ( identifier[self] , identifier[state] ):
literal[string]
identifier[self] . identifier[basicevent] . identifier[SetBinaryState] ( identifier[BinaryState] = identifier[int] ( identifier[state] ))
identifier[self] . identifier[_state] = identifier[int] ( identifier[state] ) | def set_state(self, state):
"""
Set the state of this device to on or off.
"""
self.basicevent.SetBinaryState(BinaryState=int(state))
self._state = int(state) |
def remover(self, id_tipo_acesso):
"""Removes access type by its identifier.
:param id_tipo_acesso: Access type identifier.
:return: None
:raise TipoAcessoError: Access type associated with equipment, cannot be removed.
:raise InvalidParameterError: Protocol value is invalid or none.
:raise TipoAcessoNaoExisteError: Access type doesn't exist.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(id_tipo_acesso):
raise InvalidParameterError(
u'Access type id is invalid or was not informed.')
url = 'tipoacesso/' + str(id_tipo_acesso) + '/'
code, xml = self.submit(None, 'DELETE', url)
return self.response(code, xml) | def function[remover, parameter[self, id_tipo_acesso]]:
constant[Removes access type by its identifier.
:param id_tipo_acesso: Access type identifier.
:return: None
:raise TipoAcessoError: Access type associated with equipment, cannot be removed.
:raise InvalidParameterError: Protocol value is invalid or none.
:raise TipoAcessoNaoExisteError: Access type doesn't exist.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
]
if <ast.UnaryOp object at 0x7da1b2344f40> begin[:]
<ast.Raise object at 0x7da1b2346c80>
variable[url] assign[=] binary_operation[binary_operation[constant[tipoacesso/] + call[name[str], parameter[name[id_tipo_acesso]]]] + constant[/]]
<ast.Tuple object at 0x7da1b2344940> assign[=] call[name[self].submit, parameter[constant[None], constant[DELETE], name[url]]]
return[call[name[self].response, parameter[name[code], name[xml]]]] | keyword[def] identifier[remover] ( identifier[self] , identifier[id_tipo_acesso] ):
literal[string]
keyword[if] keyword[not] identifier[is_valid_int_param] ( identifier[id_tipo_acesso] ):
keyword[raise] identifier[InvalidParameterError] (
literal[string] )
identifier[url] = literal[string] + identifier[str] ( identifier[id_tipo_acesso] )+ literal[string]
identifier[code] , identifier[xml] = identifier[self] . identifier[submit] ( keyword[None] , literal[string] , identifier[url] )
keyword[return] identifier[self] . identifier[response] ( identifier[code] , identifier[xml] ) | def remover(self, id_tipo_acesso):
"""Removes access type by its identifier.
:param id_tipo_acesso: Access type identifier.
:return: None
:raise TipoAcessoError: Access type associated with equipment, cannot be removed.
:raise InvalidParameterError: Protocol value is invalid or none.
:raise TipoAcessoNaoExisteError: Access type doesn't exist.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(id_tipo_acesso):
raise InvalidParameterError(u'Access type id is invalid or was not informed.') # depends on [control=['if'], data=[]]
url = 'tipoacesso/' + str(id_tipo_acesso) + '/'
(code, xml) = self.submit(None, 'DELETE', url)
return self.response(code, xml) |
def create(self, identity, role_sid=values.unset, attributes=values.unset,
friendly_name=values.unset):
"""
Create a new UserInstance
:param unicode identity: The `identity` value that identifies the new resource's User
:param unicode role_sid: The SID of the Role assigned to this user
:param unicode attributes: A valid JSON string that contains application-specific data
:param unicode friendly_name: A string to describe the new resource
:returns: Newly created UserInstance
:rtype: twilio.rest.chat.v2.service.user.UserInstance
"""
data = values.of({
'Identity': identity,
'RoleSid': role_sid,
'Attributes': attributes,
'FriendlyName': friendly_name,
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return UserInstance(self._version, payload, service_sid=self._solution['service_sid'], ) | def function[create, parameter[self, identity, role_sid, attributes, friendly_name]]:
constant[
Create a new UserInstance
:param unicode identity: The `identity` value that identifies the new resource's User
:param unicode role_sid: The SID of the Role assigned to this user
:param unicode attributes: A valid JSON string that contains application-specific data
:param unicode friendly_name: A string to describe the new resource
:returns: Newly created UserInstance
:rtype: twilio.rest.chat.v2.service.user.UserInstance
]
variable[data] assign[=] call[name[values].of, parameter[dictionary[[<ast.Constant object at 0x7da1b1eed150>, <ast.Constant object at 0x7da1b1eee0e0>, <ast.Constant object at 0x7da1b1eec940>, <ast.Constant object at 0x7da1b1eeeec0>], [<ast.Name object at 0x7da1b1eec610>, <ast.Name object at 0x7da1b1eec280>, <ast.Name object at 0x7da1b1eed6f0>, <ast.Name object at 0x7da1b1eecd60>]]]]
variable[payload] assign[=] call[name[self]._version.create, parameter[constant[POST], name[self]._uri]]
return[call[name[UserInstance], parameter[name[self]._version, name[payload]]]] | keyword[def] identifier[create] ( identifier[self] , identifier[identity] , identifier[role_sid] = identifier[values] . identifier[unset] , identifier[attributes] = identifier[values] . identifier[unset] ,
identifier[friendly_name] = identifier[values] . identifier[unset] ):
literal[string]
identifier[data] = identifier[values] . identifier[of] ({
literal[string] : identifier[identity] ,
literal[string] : identifier[role_sid] ,
literal[string] : identifier[attributes] ,
literal[string] : identifier[friendly_name] ,
})
identifier[payload] = identifier[self] . identifier[_version] . identifier[create] (
literal[string] ,
identifier[self] . identifier[_uri] ,
identifier[data] = identifier[data] ,
)
keyword[return] identifier[UserInstance] ( identifier[self] . identifier[_version] , identifier[payload] , identifier[service_sid] = identifier[self] . identifier[_solution] [ literal[string] ],) | def create(self, identity, role_sid=values.unset, attributes=values.unset, friendly_name=values.unset):
"""
Create a new UserInstance
:param unicode identity: The `identity` value that identifies the new resource's User
:param unicode role_sid: The SID of the Role assigned to this user
:param unicode attributes: A valid JSON string that contains application-specific data
:param unicode friendly_name: A string to describe the new resource
:returns: Newly created UserInstance
:rtype: twilio.rest.chat.v2.service.user.UserInstance
"""
data = values.of({'Identity': identity, 'RoleSid': role_sid, 'Attributes': attributes, 'FriendlyName': friendly_name})
payload = self._version.create('POST', self._uri, data=data)
return UserInstance(self._version, payload, service_sid=self._solution['service_sid']) |
def attach_service(cls, service):
""" Allows you to attach one TCP and one HTTP service
deprecated:: 2.1.73 use http and tcp specific methods
:param service: A trellio TCP or HTTP service that needs to be hosted
"""
if isinstance(service, HTTPService):
cls._http_service = service
elif isinstance(service, TCPService):
cls._tcp_service = service
else:
cls._logger.error('Invalid argument attached as service')
cls._set_bus(service) | def function[attach_service, parameter[cls, service]]:
constant[ Allows you to attach one TCP and one HTTP service
deprecated:: 2.1.73 use http and tcp specific methods
:param service: A trellio TCP or HTTP service that needs to be hosted
]
if call[name[isinstance], parameter[name[service], name[HTTPService]]] begin[:]
name[cls]._http_service assign[=] name[service]
call[name[cls]._set_bus, parameter[name[service]]] | keyword[def] identifier[attach_service] ( identifier[cls] , identifier[service] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[service] , identifier[HTTPService] ):
identifier[cls] . identifier[_http_service] = identifier[service]
keyword[elif] identifier[isinstance] ( identifier[service] , identifier[TCPService] ):
identifier[cls] . identifier[_tcp_service] = identifier[service]
keyword[else] :
identifier[cls] . identifier[_logger] . identifier[error] ( literal[string] )
identifier[cls] . identifier[_set_bus] ( identifier[service] ) | def attach_service(cls, service):
""" Allows you to attach one TCP and one HTTP service
deprecated:: 2.1.73 use http and tcp specific methods
:param service: A trellio TCP or HTTP service that needs to be hosted
"""
if isinstance(service, HTTPService):
cls._http_service = service # depends on [control=['if'], data=[]]
elif isinstance(service, TCPService):
cls._tcp_service = service # depends on [control=['if'], data=[]]
else:
cls._logger.error('Invalid argument attached as service')
cls._set_bus(service) |
def encrypt_cbc_cts(self, data, init_vector):
"""
Return an iterator that encrypts `data` using the Cipher-Block Chaining
with Ciphertext Stealing (CBC-CTS) mode of operation.
CBC-CTS mode can only operate on `data` that is greater than 8 bytes in
length.
Each iteration, except the last, always returns a block-sized :obj:`bytes`
object (i.e. 8 bytes). The last iteration may return a :obj:`bytes` object
with a length less than the block-size, if `data` is not a multiple of the
block-size in length.
`data` should be a :obj:`bytes`-like object that is greater than 8 bytes in
length.
If it is not, a :exc:`ValueError` exception is raised.
`init_vector` is the initialization vector and should be a
:obj:`bytes`-like object with exactly 8 bytes.
If it is not, a :exc:`ValueError` exception is raised.
"""
data_len = len(data)
if data_len <= 8:
raise ValueError("data is not greater than 8 bytes in length")
S1, S2, S3, S4 = self.S
P = self.P
u4_1_pack = self._u4_1_pack
u1_4_unpack = self._u1_4_unpack
u4_2_pack = self._u4_2_pack
u4_2_unpack = self._u4_2_unpack
encrypt = self._encrypt
try:
prev_cipher_L, prev_cipher_R = u4_2_unpack(init_vector)
except struct_error:
raise ValueError("initialization vector is not 8 bytes in length")
extra_bytes = data_len % 8
last_block_stop_i = data_len - extra_bytes
plain_L, plain_R = u4_2_unpack(data[0:8])
prev_cipher_L, prev_cipher_R = encrypt(
plain_L ^ prev_cipher_L,
plain_R ^ prev_cipher_R,
P, S1, S2, S3, S4,
u4_1_pack, u1_4_unpack
)
cipher_block = u4_2_pack(prev_cipher_L, prev_cipher_R)
for plain_L, plain_R in self._u4_2_iter_unpack(data[8:last_block_stop_i]):
yield cipher_block
prev_cipher_L, prev_cipher_R = encrypt(
plain_L ^ prev_cipher_L,
plain_R ^ prev_cipher_R,
P, S1, S2, S3, S4,
u4_1_pack, u1_4_unpack
)
cipher_block = u4_2_pack(prev_cipher_L, prev_cipher_R)
P_L, P_R = u4_2_unpack(data[last_block_stop_i:] + bytes(8 - extra_bytes))
yield u4_2_pack(
*encrypt(
prev_cipher_L ^ P_L,
prev_cipher_R ^ P_R,
P, S1, S2, S3, S4,
u4_1_pack, u1_4_unpack
)
)
yield cipher_block[:extra_bytes] | def function[encrypt_cbc_cts, parameter[self, data, init_vector]]:
constant[
Return an iterator that encrypts `data` using the Cipher-Block Chaining
with Ciphertext Stealing (CBC-CTS) mode of operation.
CBC-CTS mode can only operate on `data` that is greater than 8 bytes in
length.
Each iteration, except the last, always returns a block-sized :obj:`bytes`
object (i.e. 8 bytes). The last iteration may return a :obj:`bytes` object
with a length less than the block-size, if `data` is not a multiple of the
block-size in length.
`data` should be a :obj:`bytes`-like object that is greater than 8 bytes in
length.
If it is not, a :exc:`ValueError` exception is raised.
`init_vector` is the initialization vector and should be a
:obj:`bytes`-like object with exactly 8 bytes.
If it is not, a :exc:`ValueError` exception is raised.
]
variable[data_len] assign[=] call[name[len], parameter[name[data]]]
if compare[name[data_len] less_or_equal[<=] constant[8]] begin[:]
<ast.Raise object at 0x7da18f09ece0>
<ast.Tuple object at 0x7da18f09f790> assign[=] name[self].S
variable[P] assign[=] name[self].P
variable[u4_1_pack] assign[=] name[self]._u4_1_pack
variable[u1_4_unpack] assign[=] name[self]._u1_4_unpack
variable[u4_2_pack] assign[=] name[self]._u4_2_pack
variable[u4_2_unpack] assign[=] name[self]._u4_2_unpack
variable[encrypt] assign[=] name[self]._encrypt
<ast.Try object at 0x7da18f09c3d0>
variable[extra_bytes] assign[=] binary_operation[name[data_len] <ast.Mod object at 0x7da2590d6920> constant[8]]
variable[last_block_stop_i] assign[=] binary_operation[name[data_len] - name[extra_bytes]]
<ast.Tuple object at 0x7da20e9b3b20> assign[=] call[name[u4_2_unpack], parameter[call[name[data]][<ast.Slice object at 0x7da20e9b21d0>]]]
<ast.Tuple object at 0x7da20e9b3760> assign[=] call[name[encrypt], parameter[binary_operation[name[plain_L] <ast.BitXor object at 0x7da2590d6b00> name[prev_cipher_L]], binary_operation[name[plain_R] <ast.BitXor object at 0x7da2590d6b00> name[prev_cipher_R]], name[P], name[S1], name[S2], name[S3], name[S4], name[u4_1_pack], name[u1_4_unpack]]]
variable[cipher_block] assign[=] call[name[u4_2_pack], parameter[name[prev_cipher_L], name[prev_cipher_R]]]
for taget[tuple[[<ast.Name object at 0x7da20e9b0880>, <ast.Name object at 0x7da20e9b26e0>]]] in starred[call[name[self]._u4_2_iter_unpack, parameter[call[name[data]][<ast.Slice object at 0x7da20e9b3d00>]]]] begin[:]
<ast.Yield object at 0x7da20e9b30a0>
<ast.Tuple object at 0x7da20e9b2710> assign[=] call[name[encrypt], parameter[binary_operation[name[plain_L] <ast.BitXor object at 0x7da2590d6b00> name[prev_cipher_L]], binary_operation[name[plain_R] <ast.BitXor object at 0x7da2590d6b00> name[prev_cipher_R]], name[P], name[S1], name[S2], name[S3], name[S4], name[u4_1_pack], name[u1_4_unpack]]]
variable[cipher_block] assign[=] call[name[u4_2_pack], parameter[name[prev_cipher_L], name[prev_cipher_R]]]
<ast.Tuple object at 0x7da20e9b1d50> assign[=] call[name[u4_2_unpack], parameter[binary_operation[call[name[data]][<ast.Slice object at 0x7da20e9b33d0>] + call[name[bytes], parameter[binary_operation[constant[8] - name[extra_bytes]]]]]]]
<ast.Yield object at 0x7da20e9b12d0>
<ast.Yield object at 0x7da20e9b2ad0> | keyword[def] identifier[encrypt_cbc_cts] ( identifier[self] , identifier[data] , identifier[init_vector] ):
literal[string]
identifier[data_len] = identifier[len] ( identifier[data] )
keyword[if] identifier[data_len] <= literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[S1] , identifier[S2] , identifier[S3] , identifier[S4] = identifier[self] . identifier[S]
identifier[P] = identifier[self] . identifier[P]
identifier[u4_1_pack] = identifier[self] . identifier[_u4_1_pack]
identifier[u1_4_unpack] = identifier[self] . identifier[_u1_4_unpack]
identifier[u4_2_pack] = identifier[self] . identifier[_u4_2_pack]
identifier[u4_2_unpack] = identifier[self] . identifier[_u4_2_unpack]
identifier[encrypt] = identifier[self] . identifier[_encrypt]
keyword[try] :
identifier[prev_cipher_L] , identifier[prev_cipher_R] = identifier[u4_2_unpack] ( identifier[init_vector] )
keyword[except] identifier[struct_error] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[extra_bytes] = identifier[data_len] % literal[int]
identifier[last_block_stop_i] = identifier[data_len] - identifier[extra_bytes]
identifier[plain_L] , identifier[plain_R] = identifier[u4_2_unpack] ( identifier[data] [ literal[int] : literal[int] ])
identifier[prev_cipher_L] , identifier[prev_cipher_R] = identifier[encrypt] (
identifier[plain_L] ^ identifier[prev_cipher_L] ,
identifier[plain_R] ^ identifier[prev_cipher_R] ,
identifier[P] , identifier[S1] , identifier[S2] , identifier[S3] , identifier[S4] ,
identifier[u4_1_pack] , identifier[u1_4_unpack]
)
identifier[cipher_block] = identifier[u4_2_pack] ( identifier[prev_cipher_L] , identifier[prev_cipher_R] )
keyword[for] identifier[plain_L] , identifier[plain_R] keyword[in] identifier[self] . identifier[_u4_2_iter_unpack] ( identifier[data] [ literal[int] : identifier[last_block_stop_i] ]):
keyword[yield] identifier[cipher_block]
identifier[prev_cipher_L] , identifier[prev_cipher_R] = identifier[encrypt] (
identifier[plain_L] ^ identifier[prev_cipher_L] ,
identifier[plain_R] ^ identifier[prev_cipher_R] ,
identifier[P] , identifier[S1] , identifier[S2] , identifier[S3] , identifier[S4] ,
identifier[u4_1_pack] , identifier[u1_4_unpack]
)
identifier[cipher_block] = identifier[u4_2_pack] ( identifier[prev_cipher_L] , identifier[prev_cipher_R] )
identifier[P_L] , identifier[P_R] = identifier[u4_2_unpack] ( identifier[data] [ identifier[last_block_stop_i] :]+ identifier[bytes] ( literal[int] - identifier[extra_bytes] ))
keyword[yield] identifier[u4_2_pack] (
* identifier[encrypt] (
identifier[prev_cipher_L] ^ identifier[P_L] ,
identifier[prev_cipher_R] ^ identifier[P_R] ,
identifier[P] , identifier[S1] , identifier[S2] , identifier[S3] , identifier[S4] ,
identifier[u4_1_pack] , identifier[u1_4_unpack]
)
)
keyword[yield] identifier[cipher_block] [: identifier[extra_bytes] ] | def encrypt_cbc_cts(self, data, init_vector):
"""
Return an iterator that encrypts `data` using the Cipher-Block Chaining
with Ciphertext Stealing (CBC-CTS) mode of operation.
CBC-CTS mode can only operate on `data` that is greater than 8 bytes in
length.
Each iteration, except the last, always returns a block-sized :obj:`bytes`
object (i.e. 8 bytes). The last iteration may return a :obj:`bytes` object
with a length less than the block-size, if `data` is not a multiple of the
block-size in length.
`data` should be a :obj:`bytes`-like object that is greater than 8 bytes in
length.
If it is not, a :exc:`ValueError` exception is raised.
`init_vector` is the initialization vector and should be a
:obj:`bytes`-like object with exactly 8 bytes.
If it is not, a :exc:`ValueError` exception is raised.
"""
data_len = len(data)
if data_len <= 8:
raise ValueError('data is not greater than 8 bytes in length') # depends on [control=['if'], data=[]]
(S1, S2, S3, S4) = self.S
P = self.P
u4_1_pack = self._u4_1_pack
u1_4_unpack = self._u1_4_unpack
u4_2_pack = self._u4_2_pack
u4_2_unpack = self._u4_2_unpack
encrypt = self._encrypt
try:
(prev_cipher_L, prev_cipher_R) = u4_2_unpack(init_vector) # depends on [control=['try'], data=[]]
except struct_error:
raise ValueError('initialization vector is not 8 bytes in length') # depends on [control=['except'], data=[]]
extra_bytes = data_len % 8
last_block_stop_i = data_len - extra_bytes
(plain_L, plain_R) = u4_2_unpack(data[0:8])
(prev_cipher_L, prev_cipher_R) = encrypt(plain_L ^ prev_cipher_L, plain_R ^ prev_cipher_R, P, S1, S2, S3, S4, u4_1_pack, u1_4_unpack)
cipher_block = u4_2_pack(prev_cipher_L, prev_cipher_R)
for (plain_L, plain_R) in self._u4_2_iter_unpack(data[8:last_block_stop_i]):
yield cipher_block
(prev_cipher_L, prev_cipher_R) = encrypt(plain_L ^ prev_cipher_L, plain_R ^ prev_cipher_R, P, S1, S2, S3, S4, u4_1_pack, u1_4_unpack)
cipher_block = u4_2_pack(prev_cipher_L, prev_cipher_R) # depends on [control=['for'], data=[]]
(P_L, P_R) = u4_2_unpack(data[last_block_stop_i:] + bytes(8 - extra_bytes))
yield u4_2_pack(*encrypt(prev_cipher_L ^ P_L, prev_cipher_R ^ P_R, P, S1, S2, S3, S4, u4_1_pack, u1_4_unpack))
yield cipher_block[:extra_bytes] |
def format_env(key, value: Union[None, bytes, str]) -> str:
"""
Formats envs from {key:value} to ['key=value']
"""
if value is None:
return key
if isinstance(value, bytes):
value = value.decode("utf-8")
return "{key}={value}".format(key=key, value=value) | def function[format_env, parameter[key, value]]:
constant[
Formats envs from {key:value} to ['key=value']
]
if compare[name[value] is constant[None]] begin[:]
return[name[key]]
if call[name[isinstance], parameter[name[value], name[bytes]]] begin[:]
variable[value] assign[=] call[name[value].decode, parameter[constant[utf-8]]]
return[call[constant[{key}={value}].format, parameter[]]] | keyword[def] identifier[format_env] ( identifier[key] , identifier[value] : identifier[Union] [ keyword[None] , identifier[bytes] , identifier[str] ])-> identifier[str] :
literal[string]
keyword[if] identifier[value] keyword[is] keyword[None] :
keyword[return] identifier[key]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[bytes] ):
identifier[value] = identifier[value] . identifier[decode] ( literal[string] )
keyword[return] literal[string] . identifier[format] ( identifier[key] = identifier[key] , identifier[value] = identifier[value] ) | def format_env(key, value: Union[None, bytes, str]) -> str:
"""
Formats envs from {key:value} to ['key=value']
"""
if value is None:
return key # depends on [control=['if'], data=[]]
if isinstance(value, bytes):
value = value.decode('utf-8') # depends on [control=['if'], data=[]]
return '{key}={value}'.format(key=key, value=value) |
def thermal_source(self):
"""Apply emissivity to an existing beam to produce a thermal
source spectrum (without optical counterpart).
Thermal source spectrum is calculated as follow:
#. Create a blackbody spectrum in PHOTLAM per square arcsec
with `temperature`.
#. Multiply the blackbody with `beam_fill_factor` and ``self``.
Returns
-------
sp : `~synphot.spectrum.SourceSpectrum`
Thermal source spectrum.
"""
sp = (SourceSpectrum(BlackBody1D, temperature=self.temperature) *
units.SR_PER_ARCSEC2 * self.beam_fill_factor * self)
sp.meta['temperature'] = self.temperature
sp.meta['beam_fill_factor'] = self.beam_fill_factor
return sp | def function[thermal_source, parameter[self]]:
constant[Apply emissivity to an existing beam to produce a thermal
source spectrum (without optical counterpart).
Thermal source spectrum is calculated as follow:
#. Create a blackbody spectrum in PHOTLAM per square arcsec
with `temperature`.
#. Multiply the blackbody with `beam_fill_factor` and ``self``.
Returns
-------
sp : `~synphot.spectrum.SourceSpectrum`
Thermal source spectrum.
]
variable[sp] assign[=] binary_operation[binary_operation[binary_operation[call[name[SourceSpectrum], parameter[name[BlackBody1D]]] * name[units].SR_PER_ARCSEC2] * name[self].beam_fill_factor] * name[self]]
call[name[sp].meta][constant[temperature]] assign[=] name[self].temperature
call[name[sp].meta][constant[beam_fill_factor]] assign[=] name[self].beam_fill_factor
return[name[sp]] | keyword[def] identifier[thermal_source] ( identifier[self] ):
literal[string]
identifier[sp] =( identifier[SourceSpectrum] ( identifier[BlackBody1D] , identifier[temperature] = identifier[self] . identifier[temperature] )*
identifier[units] . identifier[SR_PER_ARCSEC2] * identifier[self] . identifier[beam_fill_factor] * identifier[self] )
identifier[sp] . identifier[meta] [ literal[string] ]= identifier[self] . identifier[temperature]
identifier[sp] . identifier[meta] [ literal[string] ]= identifier[self] . identifier[beam_fill_factor]
keyword[return] identifier[sp] | def thermal_source(self):
"""Apply emissivity to an existing beam to produce a thermal
source spectrum (without optical counterpart).
Thermal source spectrum is calculated as follow:
#. Create a blackbody spectrum in PHOTLAM per square arcsec
with `temperature`.
#. Multiply the blackbody with `beam_fill_factor` and ``self``.
Returns
-------
sp : `~synphot.spectrum.SourceSpectrum`
Thermal source spectrum.
"""
sp = SourceSpectrum(BlackBody1D, temperature=self.temperature) * units.SR_PER_ARCSEC2 * self.beam_fill_factor * self
sp.meta['temperature'] = self.temperature
sp.meta['beam_fill_factor'] = self.beam_fill_factor
return sp |
def block_code(self):
inputs = self._get_all_input_values()
outputs = {}
"""
self.f = self.user_function(**inputs)
try:
outputs = self.f.send(inputs)
except StopIteration:
self.terminate()
"""
if self.first_time:
self.f = self.user_function(**inputs)
outputs = self.f.next()
self.first_time = False
else:
try:
outputs = self.f.send(inputs)
except StopIteration:
self.terminate()
if outputs:
for key in outputs.keys():
self.set_output_data(key, outputs[key])
if 'previous_outputs' in self.output_channels.keys():
self.output_channels['previous_outputs'].set_value(Data(self.time, copy.deepcopy(outputs))) | def function[block_code, parameter[self]]:
variable[inputs] assign[=] call[name[self]._get_all_input_values, parameter[]]
variable[outputs] assign[=] dictionary[[], []]
constant[
self.f = self.user_function(**inputs)
try:
outputs = self.f.send(inputs)
except StopIteration:
self.terminate()
]
if name[self].first_time begin[:]
name[self].f assign[=] call[name[self].user_function, parameter[]]
variable[outputs] assign[=] call[name[self].f.next, parameter[]]
name[self].first_time assign[=] constant[False]
if name[outputs] begin[:]
for taget[name[key]] in starred[call[name[outputs].keys, parameter[]]] begin[:]
call[name[self].set_output_data, parameter[name[key], call[name[outputs]][name[key]]]]
if compare[constant[previous_outputs] in call[name[self].output_channels.keys, parameter[]]] begin[:]
call[call[name[self].output_channels][constant[previous_outputs]].set_value, parameter[call[name[Data], parameter[name[self].time, call[name[copy].deepcopy, parameter[name[outputs]]]]]]] | keyword[def] identifier[block_code] ( identifier[self] ):
identifier[inputs] = identifier[self] . identifier[_get_all_input_values] ()
identifier[outputs] ={}
literal[string]
keyword[if] identifier[self] . identifier[first_time] :
identifier[self] . identifier[f] = identifier[self] . identifier[user_function] (** identifier[inputs] )
identifier[outputs] = identifier[self] . identifier[f] . identifier[next] ()
identifier[self] . identifier[first_time] = keyword[False]
keyword[else] :
keyword[try] :
identifier[outputs] = identifier[self] . identifier[f] . identifier[send] ( identifier[inputs] )
keyword[except] identifier[StopIteration] :
identifier[self] . identifier[terminate] ()
keyword[if] identifier[outputs] :
keyword[for] identifier[key] keyword[in] identifier[outputs] . identifier[keys] ():
identifier[self] . identifier[set_output_data] ( identifier[key] , identifier[outputs] [ identifier[key] ])
keyword[if] literal[string] keyword[in] identifier[self] . identifier[output_channels] . identifier[keys] ():
identifier[self] . identifier[output_channels] [ literal[string] ]. identifier[set_value] ( identifier[Data] ( identifier[self] . identifier[time] , identifier[copy] . identifier[deepcopy] ( identifier[outputs] ))) | def block_code(self):
inputs = self._get_all_input_values()
outputs = {}
'\n self.f = self.user_function(**inputs)\n try:\n outputs = self.f.send(inputs)\n except StopIteration:\n self.terminate()\n '
if self.first_time:
self.f = self.user_function(**inputs)
outputs = self.f.next()
self.first_time = False # depends on [control=['if'], data=[]]
else:
try:
outputs = self.f.send(inputs) # depends on [control=['try'], data=[]]
except StopIteration:
self.terminate() # depends on [control=['except'], data=[]]
if outputs:
for key in outputs.keys():
self.set_output_data(key, outputs[key]) # depends on [control=['for'], data=['key']] # depends on [control=['if'], data=[]]
if 'previous_outputs' in self.output_channels.keys():
self.output_channels['previous_outputs'].set_value(Data(self.time, copy.deepcopy(outputs))) # depends on [control=['if'], data=[]] |
def flatten_multi_dim(sequence):
"""Flatten a multi-dimensional array-like to a single dimensional sequence
(as a generator).
"""
for x in sequence:
if (isinstance(x, collections.Iterable)
and not isinstance(x, six.string_types)):
for y in flatten_multi_dim(x):
yield y
else:
yield x | def function[flatten_multi_dim, parameter[sequence]]:
constant[Flatten a multi-dimensional array-like to a single dimensional sequence
(as a generator).
]
for taget[name[x]] in starred[name[sequence]] begin[:]
if <ast.BoolOp object at 0x7da1b0f70250> begin[:]
for taget[name[y]] in starred[call[name[flatten_multi_dim], parameter[name[x]]]] begin[:]
<ast.Yield object at 0x7da1b0f710f0> | keyword[def] identifier[flatten_multi_dim] ( identifier[sequence] ):
literal[string]
keyword[for] identifier[x] keyword[in] identifier[sequence] :
keyword[if] ( identifier[isinstance] ( identifier[x] , identifier[collections] . identifier[Iterable] )
keyword[and] keyword[not] identifier[isinstance] ( identifier[x] , identifier[six] . identifier[string_types] )):
keyword[for] identifier[y] keyword[in] identifier[flatten_multi_dim] ( identifier[x] ):
keyword[yield] identifier[y]
keyword[else] :
keyword[yield] identifier[x] | def flatten_multi_dim(sequence):
"""Flatten a multi-dimensional array-like to a single dimensional sequence
(as a generator).
"""
for x in sequence:
if isinstance(x, collections.Iterable) and (not isinstance(x, six.string_types)):
for y in flatten_multi_dim(x):
yield y # depends on [control=['for'], data=['y']] # depends on [control=['if'], data=[]]
else:
yield x # depends on [control=['for'], data=['x']] |
def get_v_total_stress_at_depth(self, z):
"""
Determine the vertical total stress at depth z, where z can be a number or an array of numbers.
"""
if not hasattr(z, "__len__"):
return self.one_vertical_total_stress(z)
else:
sigma_v_effs = []
for value in z:
sigma_v_effs.append(self.one_vertical_total_stress(value))
return np.array(sigma_v_effs) | def function[get_v_total_stress_at_depth, parameter[self, z]]:
constant[
Determine the vertical total stress at depth z, where z can be a number or an array of numbers.
]
if <ast.UnaryOp object at 0x7da20e9b3be0> begin[:]
return[call[name[self].one_vertical_total_stress, parameter[name[z]]]] | keyword[def] identifier[get_v_total_stress_at_depth] ( identifier[self] , identifier[z] ):
literal[string]
keyword[if] keyword[not] identifier[hasattr] ( identifier[z] , literal[string] ):
keyword[return] identifier[self] . identifier[one_vertical_total_stress] ( identifier[z] )
keyword[else] :
identifier[sigma_v_effs] =[]
keyword[for] identifier[value] keyword[in] identifier[z] :
identifier[sigma_v_effs] . identifier[append] ( identifier[self] . identifier[one_vertical_total_stress] ( identifier[value] ))
keyword[return] identifier[np] . identifier[array] ( identifier[sigma_v_effs] ) | def get_v_total_stress_at_depth(self, z):
"""
Determine the vertical total stress at depth z, where z can be a number or an array of numbers.
"""
if not hasattr(z, '__len__'):
return self.one_vertical_total_stress(z) # depends on [control=['if'], data=[]]
else:
sigma_v_effs = []
for value in z:
sigma_v_effs.append(self.one_vertical_total_stress(value)) # depends on [control=['for'], data=['value']]
return np.array(sigma_v_effs) |
def connectToBroker(self, protocol):
'''
Connect to MQTT broker
'''
self.protocol = protocol
self.protocol.onPublish = self.onPublish
self.protocol.onDisconnection = self.onDisconnection
self.protocol.setWindowSize(3)
try:
yield self.protocol.connect("TwistedMQTT-subs", keepalive=60)
yield self.subscribe()
except Exception as e:
log.error("Connecting to {broker} raised {excp!s}",
broker=BROKER, excp=e)
else:
log.info("Connected and subscribed to {broker}", broker=BROKER) | def function[connectToBroker, parameter[self, protocol]]:
constant[
Connect to MQTT broker
]
name[self].protocol assign[=] name[protocol]
name[self].protocol.onPublish assign[=] name[self].onPublish
name[self].protocol.onDisconnection assign[=] name[self].onDisconnection
call[name[self].protocol.setWindowSize, parameter[constant[3]]]
<ast.Try object at 0x7da1b025d600> | keyword[def] identifier[connectToBroker] ( identifier[self] , identifier[protocol] ):
literal[string]
identifier[self] . identifier[protocol] = identifier[protocol]
identifier[self] . identifier[protocol] . identifier[onPublish] = identifier[self] . identifier[onPublish]
identifier[self] . identifier[protocol] . identifier[onDisconnection] = identifier[self] . identifier[onDisconnection]
identifier[self] . identifier[protocol] . identifier[setWindowSize] ( literal[int] )
keyword[try] :
keyword[yield] identifier[self] . identifier[protocol] . identifier[connect] ( literal[string] , identifier[keepalive] = literal[int] )
keyword[yield] identifier[self] . identifier[subscribe] ()
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[log] . identifier[error] ( literal[string] ,
identifier[broker] = identifier[BROKER] , identifier[excp] = identifier[e] )
keyword[else] :
identifier[log] . identifier[info] ( literal[string] , identifier[broker] = identifier[BROKER] ) | def connectToBroker(self, protocol):
"""
Connect to MQTT broker
"""
self.protocol = protocol
self.protocol.onPublish = self.onPublish
self.protocol.onDisconnection = self.onDisconnection
self.protocol.setWindowSize(3)
try:
yield self.protocol.connect('TwistedMQTT-subs', keepalive=60)
yield self.subscribe() # depends on [control=['try'], data=[]]
except Exception as e:
log.error('Connecting to {broker} raised {excp!s}', broker=BROKER, excp=e) # depends on [control=['except'], data=['e']]
else:
log.info('Connected and subscribed to {broker}', broker=BROKER) |
def _generate_bokeh_file(file_name):
"""
-----
Brief
-----
Auxiliary function responsible for the creation of a directory where Bokeh figures will be
stored.
The "active" output file for Bokeh will also be updated for the new one.
-----------
Description
-----------
To ensure that Bokeh plots are correctly observed in the HTML version of the Notebooks, it is
necessary to embed the plots inside Iframes.
Taking this into consideration, the source file of the plot is mandatory to use an Iframe, and
this function ensures the generation of a Bokeh file for each plot, storing it in an adequate
place.
----------
Parameters
----------
file_name : str
Name given to the file.
Returns
-------
out : str
String containing the file name.
"""
# Creation of our output file instance.
if file_name is None:
file_name = "plot_" + time_package.strftime("%Y_%m_%d_%H_%M_%S.html")
else:
file_name += ".html"
if not os.path.exists("generated_plots"):
os.makedirs("generated_plots")
output_file(os.getcwd().replace("\\", "/") + "/generated_plots/" + file_name)
return file_name | def function[_generate_bokeh_file, parameter[file_name]]:
constant[
-----
Brief
-----
Auxiliary function responsible for the creation of a directory where Bokeh figures will be
stored.
The "active" output file for Bokeh will also be updated for the new one.
-----------
Description
-----------
To ensure that Bokeh plots are correctly observed in the HTML version of the Notebooks, it is
necessary to embed the plots inside Iframes.
Taking this into consideration, the source file of the plot is mandatory to use an Iframe, and
this function ensures the generation of a Bokeh file for each plot, storing it in an adequate
place.
----------
Parameters
----------
file_name : str
Name given to the file.
Returns
-------
out : str
String containing the file name.
]
if compare[name[file_name] is constant[None]] begin[:]
variable[file_name] assign[=] binary_operation[constant[plot_] + call[name[time_package].strftime, parameter[constant[%Y_%m_%d_%H_%M_%S.html]]]]
if <ast.UnaryOp object at 0x7da18f812710> begin[:]
call[name[os].makedirs, parameter[constant[generated_plots]]]
call[name[output_file], parameter[binary_operation[binary_operation[call[call[name[os].getcwd, parameter[]].replace, parameter[constant[\], constant[/]]] + constant[/generated_plots/]] + name[file_name]]]]
return[name[file_name]] | keyword[def] identifier[_generate_bokeh_file] ( identifier[file_name] ):
literal[string]
keyword[if] identifier[file_name] keyword[is] keyword[None] :
identifier[file_name] = literal[string] + identifier[time_package] . identifier[strftime] ( literal[string] )
keyword[else] :
identifier[file_name] += literal[string]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( literal[string] ):
identifier[os] . identifier[makedirs] ( literal[string] )
identifier[output_file] ( identifier[os] . identifier[getcwd] (). identifier[replace] ( literal[string] , literal[string] )+ literal[string] + identifier[file_name] )
keyword[return] identifier[file_name] | def _generate_bokeh_file(file_name):
"""
-----
Brief
-----
Auxiliary function responsible for the creation of a directory where Bokeh figures will be
stored.
The "active" output file for Bokeh will also be updated for the new one.
-----------
Description
-----------
To ensure that Bokeh plots are correctly observed in the HTML version of the Notebooks, it is
necessary to embed the plots inside Iframes.
Taking this into consideration, the source file of the plot is mandatory to use an Iframe, and
this function ensures the generation of a Bokeh file for each plot, storing it in an adequate
place.
----------
Parameters
----------
file_name : str
Name given to the file.
Returns
-------
out : str
String containing the file name.
"""
# Creation of our output file instance.
if file_name is None:
file_name = 'plot_' + time_package.strftime('%Y_%m_%d_%H_%M_%S.html') # depends on [control=['if'], data=['file_name']]
else:
file_name += '.html'
if not os.path.exists('generated_plots'):
os.makedirs('generated_plots') # depends on [control=['if'], data=[]]
output_file(os.getcwd().replace('\\', '/') + '/generated_plots/' + file_name)
return file_name |
def emitRemoved( self ):
"""
Emits the removed signal, provided the dispatcher's signals \
are not currently blocked.
:return <bool> emitted
"""
# check the signals blocked
if ( self.signalsBlocked() ):
return False
# emit the signal
self.dispatch.removed.emit()
return True | def function[emitRemoved, parameter[self]]:
constant[
Emits the removed signal, provided the dispatcher's signals are not currently blocked.
:return <bool> emitted
]
if call[name[self].signalsBlocked, parameter[]] begin[:]
return[constant[False]]
call[name[self].dispatch.removed.emit, parameter[]]
return[constant[True]] | keyword[def] identifier[emitRemoved] ( identifier[self] ):
literal[string]
keyword[if] ( identifier[self] . identifier[signalsBlocked] ()):
keyword[return] keyword[False]
identifier[self] . identifier[dispatch] . identifier[removed] . identifier[emit] ()
keyword[return] keyword[True] | def emitRemoved(self):
"""
Emits the removed signal, provided the dispatcher's signals are not currently blocked.
:return <bool> emitted
"""
# check the signals blocked
if self.signalsBlocked():
return False # depends on [control=['if'], data=[]]
# emit the signal
self.dispatch.removed.emit()
return True |
def plotall(xargs):
"""
%prog plotall input.bed
Plot the matchings between the reconstructed pseudomolecules and the maps.
This command will plot each reconstructed object (non-singleton).
"""
p = OptionParser(plotall.__doc__)
add_allmaps_plot_options(p)
opts, args, iopts = p.set_image_options(xargs, figsize="10x6")
if len(args) != 1:
sys.exit(not p.print_help())
inputbed, = args
pf = inputbed.rsplit(".", 1)[0]
agpfile = pf + ".chr.agp"
agp = AGP(agpfile)
objects = [ob for ob, lines in agp.iter_object()]
for seqid in natsorted(objects):
plot(xargs + [seqid]) | def function[plotall, parameter[xargs]]:
constant[
%prog plotall input.bed
Plot the matchings between the reconstructed pseudomolecules and the maps.
This command will plot each reconstructed object (non-singleton).
]
variable[p] assign[=] call[name[OptionParser], parameter[name[plotall].__doc__]]
call[name[add_allmaps_plot_options], parameter[name[p]]]
<ast.Tuple object at 0x7da20c990f40> assign[=] call[name[p].set_image_options, parameter[name[xargs]]]
if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[1]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da20c990280>]]
<ast.Tuple object at 0x7da20c991510> assign[=] name[args]
variable[pf] assign[=] call[call[name[inputbed].rsplit, parameter[constant[.], constant[1]]]][constant[0]]
variable[agpfile] assign[=] binary_operation[name[pf] + constant[.chr.agp]]
variable[agp] assign[=] call[name[AGP], parameter[name[agpfile]]]
variable[objects] assign[=] <ast.ListComp object at 0x7da20c9900d0>
for taget[name[seqid]] in starred[call[name[natsorted], parameter[name[objects]]]] begin[:]
call[name[plot], parameter[binary_operation[name[xargs] + list[[<ast.Name object at 0x7da20c991f90>]]]]] | keyword[def] identifier[plotall] ( identifier[xargs] ):
literal[string]
identifier[p] = identifier[OptionParser] ( identifier[plotall] . identifier[__doc__] )
identifier[add_allmaps_plot_options] ( identifier[p] )
identifier[opts] , identifier[args] , identifier[iopts] = identifier[p] . identifier[set_image_options] ( identifier[xargs] , identifier[figsize] = literal[string] )
keyword[if] identifier[len] ( identifier[args] )!= literal[int] :
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
identifier[inputbed] ,= identifier[args]
identifier[pf] = identifier[inputbed] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ]
identifier[agpfile] = identifier[pf] + literal[string]
identifier[agp] = identifier[AGP] ( identifier[agpfile] )
identifier[objects] =[ identifier[ob] keyword[for] identifier[ob] , identifier[lines] keyword[in] identifier[agp] . identifier[iter_object] ()]
keyword[for] identifier[seqid] keyword[in] identifier[natsorted] ( identifier[objects] ):
identifier[plot] ( identifier[xargs] +[ identifier[seqid] ]) | def plotall(xargs):
"""
%prog plotall input.bed
Plot the matchings between the reconstructed pseudomolecules and the maps.
This command will plot each reconstructed object (non-singleton).
"""
p = OptionParser(plotall.__doc__)
add_allmaps_plot_options(p)
(opts, args, iopts) = p.set_image_options(xargs, figsize='10x6')
if len(args) != 1:
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
(inputbed,) = args
pf = inputbed.rsplit('.', 1)[0]
agpfile = pf + '.chr.agp'
agp = AGP(agpfile)
objects = [ob for (ob, lines) in agp.iter_object()]
for seqid in natsorted(objects):
plot(xargs + [seqid]) # depends on [control=['for'], data=['seqid']] |
def read_inquiry_scan_activity(sock):
"""returns the current inquiry scan interval and window,
or -1 on failure"""
# save current filter
old_filter = sock.getsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, 14)
# Setup socket filter to receive only events related to the
# read_inquiry_mode command
flt = bluez.hci_filter_new()
opcode = bluez.cmd_opcode_pack(bluez.OGF_HOST_CTL,
bluez.OCF_READ_INQ_ACTIVITY)
bluez.hci_filter_set_ptype(flt, bluez.HCI_EVENT_PKT)
bluez.hci_filter_set_event(flt, bluez.EVT_CMD_COMPLETE);
bluez.hci_filter_set_opcode(flt, opcode)
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, flt )
# first read the current inquiry mode.
bluez.hci_send_cmd(sock, bluez.OGF_HOST_CTL,
bluez.OCF_READ_INQ_ACTIVITY )
pkt = sock.recv(255)
status,interval,window = struct.unpack("!xxxxxxBHH", pkt)
interval = bluez.btohs(interval)
interval = (interval >> 8) | ( (interval & 0xFF) << 8 )
window = (window >> 8) | ( (window & 0xFF) << 8 )
if status != 0: mode = -1
# restore old filter
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, old_filter )
return interval, window | def function[read_inquiry_scan_activity, parameter[sock]]:
constant[returns the current inquiry scan interval and window,
or -1 on failure]
variable[old_filter] assign[=] call[name[sock].getsockopt, parameter[name[bluez].SOL_HCI, name[bluez].HCI_FILTER, constant[14]]]
variable[flt] assign[=] call[name[bluez].hci_filter_new, parameter[]]
variable[opcode] assign[=] call[name[bluez].cmd_opcode_pack, parameter[name[bluez].OGF_HOST_CTL, name[bluez].OCF_READ_INQ_ACTIVITY]]
call[name[bluez].hci_filter_set_ptype, parameter[name[flt], name[bluez].HCI_EVENT_PKT]]
call[name[bluez].hci_filter_set_event, parameter[name[flt], name[bluez].EVT_CMD_COMPLETE]]
call[name[bluez].hci_filter_set_opcode, parameter[name[flt], name[opcode]]]
call[name[sock].setsockopt, parameter[name[bluez].SOL_HCI, name[bluez].HCI_FILTER, name[flt]]]
call[name[bluez].hci_send_cmd, parameter[name[sock], name[bluez].OGF_HOST_CTL, name[bluez].OCF_READ_INQ_ACTIVITY]]
variable[pkt] assign[=] call[name[sock].recv, parameter[constant[255]]]
<ast.Tuple object at 0x7da1b163bd00> assign[=] call[name[struct].unpack, parameter[constant[!xxxxxxBHH], name[pkt]]]
variable[interval] assign[=] call[name[bluez].btohs, parameter[name[interval]]]
variable[interval] assign[=] binary_operation[binary_operation[name[interval] <ast.RShift object at 0x7da2590d6a40> constant[8]] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[binary_operation[name[interval] <ast.BitAnd object at 0x7da2590d6b60> constant[255]] <ast.LShift object at 0x7da2590d69e0> constant[8]]]
variable[window] assign[=] binary_operation[binary_operation[name[window] <ast.RShift object at 0x7da2590d6a40> constant[8]] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[binary_operation[name[window] <ast.BitAnd object at 0x7da2590d6b60> constant[255]] <ast.LShift object at 0x7da2590d69e0> constant[8]]]
if compare[name[status] not_equal[!=] constant[0]] begin[:]
variable[mode] assign[=] <ast.UnaryOp object at 0x7da1b163a5f0>
call[name[sock].setsockopt, parameter[name[bluez].SOL_HCI, name[bluez].HCI_FILTER, name[old_filter]]]
return[tuple[[<ast.Name object at 0x7da212db4ee0>, <ast.Name object at 0x7da1b18aa4d0>]]] | keyword[def] identifier[read_inquiry_scan_activity] ( identifier[sock] ):
literal[string]
identifier[old_filter] = identifier[sock] . identifier[getsockopt] ( identifier[bluez] . identifier[SOL_HCI] , identifier[bluez] . identifier[HCI_FILTER] , literal[int] )
identifier[flt] = identifier[bluez] . identifier[hci_filter_new] ()
identifier[opcode] = identifier[bluez] . identifier[cmd_opcode_pack] ( identifier[bluez] . identifier[OGF_HOST_CTL] ,
identifier[bluez] . identifier[OCF_READ_INQ_ACTIVITY] )
identifier[bluez] . identifier[hci_filter_set_ptype] ( identifier[flt] , identifier[bluez] . identifier[HCI_EVENT_PKT] )
identifier[bluez] . identifier[hci_filter_set_event] ( identifier[flt] , identifier[bluez] . identifier[EVT_CMD_COMPLETE] );
identifier[bluez] . identifier[hci_filter_set_opcode] ( identifier[flt] , identifier[opcode] )
identifier[sock] . identifier[setsockopt] ( identifier[bluez] . identifier[SOL_HCI] , identifier[bluez] . identifier[HCI_FILTER] , identifier[flt] )
identifier[bluez] . identifier[hci_send_cmd] ( identifier[sock] , identifier[bluez] . identifier[OGF_HOST_CTL] ,
identifier[bluez] . identifier[OCF_READ_INQ_ACTIVITY] )
identifier[pkt] = identifier[sock] . identifier[recv] ( literal[int] )
identifier[status] , identifier[interval] , identifier[window] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[pkt] )
identifier[interval] = identifier[bluez] . identifier[btohs] ( identifier[interval] )
identifier[interval] =( identifier[interval] >> literal[int] )|(( identifier[interval] & literal[int] )<< literal[int] )
identifier[window] =( identifier[window] >> literal[int] )|(( identifier[window] & literal[int] )<< literal[int] )
keyword[if] identifier[status] != literal[int] : identifier[mode] =- literal[int]
identifier[sock] . identifier[setsockopt] ( identifier[bluez] . identifier[SOL_HCI] , identifier[bluez] . identifier[HCI_FILTER] , identifier[old_filter] )
keyword[return] identifier[interval] , identifier[window] | def read_inquiry_scan_activity(sock):
"""returns the current inquiry scan interval and window,
or -1 on failure"""
# save current filter
old_filter = sock.getsockopt(bluez.SOL_HCI, bluez.HCI_FILTER, 14)
# Setup socket filter to receive only events related to the
# read_inquiry_mode command
flt = bluez.hci_filter_new()
opcode = bluez.cmd_opcode_pack(bluez.OGF_HOST_CTL, bluez.OCF_READ_INQ_ACTIVITY)
bluez.hci_filter_set_ptype(flt, bluez.HCI_EVENT_PKT)
bluez.hci_filter_set_event(flt, bluez.EVT_CMD_COMPLETE)
bluez.hci_filter_set_opcode(flt, opcode)
sock.setsockopt(bluez.SOL_HCI, bluez.HCI_FILTER, flt)
# first read the current inquiry mode.
bluez.hci_send_cmd(sock, bluez.OGF_HOST_CTL, bluez.OCF_READ_INQ_ACTIVITY)
pkt = sock.recv(255)
(status, interval, window) = struct.unpack('!xxxxxxBHH', pkt)
interval = bluez.btohs(interval)
interval = interval >> 8 | (interval & 255) << 8
window = window >> 8 | (window & 255) << 8
if status != 0:
mode = -1 # depends on [control=['if'], data=[]]
# restore old filter
sock.setsockopt(bluez.SOL_HCI, bluez.HCI_FILTER, old_filter)
return (interval, window) |
def adjust_for_scratch(self):
"""
Remove certain plugins in order to handle the "scratch build"
scenario. Scratch builds must not affect subsequent builds,
and should not be imported into Koji.
"""
if self.user_params.scratch.value:
remove_plugins = [
("prebuild_plugins", "koji_parent"),
("postbuild_plugins", "compress"), # required only to make an archive for Koji
("postbuild_plugins", "pulp_pull"), # required only to make an archive for Koji
("postbuild_plugins", "compare_components"),
("postbuild_plugins", "import_image"),
("exit_plugins", "koji_promote"),
("exit_plugins", "koji_tag_build"),
("exit_plugins", "import_image"),
("prebuild_plugins", "check_and_set_rebuild"),
("prebuild_plugins", "stop_autorebuild_if_disabled")
]
if not self.has_tag_suffixes_placeholder():
remove_plugins.append(("postbuild_plugins", "tag_from_config"))
for when, which in remove_plugins:
self.pt.remove_plugin(when, which, 'removed from scratch build request') | def function[adjust_for_scratch, parameter[self]]:
constant[
Remove certain plugins in order to handle the "scratch build"
scenario. Scratch builds must not affect subsequent builds,
and should not be imported into Koji.
]
if name[self].user_params.scratch.value begin[:]
variable[remove_plugins] assign[=] list[[<ast.Tuple object at 0x7da1b0fd9000>, <ast.Tuple object at 0x7da1b0fd86a0>, <ast.Tuple object at 0x7da1b0fdbb50>, <ast.Tuple object at 0x7da1b0fdbd00>, <ast.Tuple object at 0x7da1b0fdb0a0>, <ast.Tuple object at 0x7da1b0fda9e0>, <ast.Tuple object at 0x7da1b0fd98d0>, <ast.Tuple object at 0x7da1b0fd8220>, <ast.Tuple object at 0x7da1b0fd8a00>, <ast.Tuple object at 0x7da1b0fda5c0>]]
if <ast.UnaryOp object at 0x7da1b0fd8610> begin[:]
call[name[remove_plugins].append, parameter[tuple[[<ast.Constant object at 0x7da1b0fdab90>, <ast.Constant object at 0x7da1b0fd90c0>]]]]
for taget[tuple[[<ast.Name object at 0x7da1b0fdae60>, <ast.Name object at 0x7da1b0fdb7f0>]]] in starred[name[remove_plugins]] begin[:]
call[name[self].pt.remove_plugin, parameter[name[when], name[which], constant[removed from scratch build request]]] | keyword[def] identifier[adjust_for_scratch] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[user_params] . identifier[scratch] . identifier[value] :
identifier[remove_plugins] =[
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] )
]
keyword[if] keyword[not] identifier[self] . identifier[has_tag_suffixes_placeholder] ():
identifier[remove_plugins] . identifier[append] (( literal[string] , literal[string] ))
keyword[for] identifier[when] , identifier[which] keyword[in] identifier[remove_plugins] :
identifier[self] . identifier[pt] . identifier[remove_plugin] ( identifier[when] , identifier[which] , literal[string] ) | def adjust_for_scratch(self):
"""
Remove certain plugins in order to handle the "scratch build"
scenario. Scratch builds must not affect subsequent builds,
and should not be imported into Koji.
"""
if self.user_params.scratch.value: # required only to make an archive for Koji
# required only to make an archive for Koji
remove_plugins = [('prebuild_plugins', 'koji_parent'), ('postbuild_plugins', 'compress'), ('postbuild_plugins', 'pulp_pull'), ('postbuild_plugins', 'compare_components'), ('postbuild_plugins', 'import_image'), ('exit_plugins', 'koji_promote'), ('exit_plugins', 'koji_tag_build'), ('exit_plugins', 'import_image'), ('prebuild_plugins', 'check_and_set_rebuild'), ('prebuild_plugins', 'stop_autorebuild_if_disabled')]
if not self.has_tag_suffixes_placeholder():
remove_plugins.append(('postbuild_plugins', 'tag_from_config')) # depends on [control=['if'], data=[]]
for (when, which) in remove_plugins:
self.pt.remove_plugin(when, which, 'removed from scratch build request') # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] |
def handle_na(self, data):
"""
Remove rows with NaN values
geoms that infer extra information from missing values
should override this method. For example
:class:`~plotnine.geoms.geom_path`.
Parameters
----------
data : dataframe
Data
Returns
-------
out : dataframe
Data without the NaNs.
Notes
-----
Shows a warning if the any rows are removed and the
`na_rm` parameter is False. It only takes into account
the columns of the required aesthetics.
"""
return remove_missing(data,
self.params['na_rm'],
list(self.REQUIRED_AES | self.NON_MISSING_AES),
self.__class__.__name__) | def function[handle_na, parameter[self, data]]:
constant[
Remove rows with NaN values
geoms that infer extra information from missing values
should override this method. For example
:class:`~plotnine.geoms.geom_path`.
Parameters
----------
data : dataframe
Data
Returns
-------
out : dataframe
Data without the NaNs.
Notes
-----
Shows a warning if the any rows are removed and the
`na_rm` parameter is False. It only takes into account
the columns of the required aesthetics.
]
return[call[name[remove_missing], parameter[name[data], call[name[self].params][constant[na_rm]], call[name[list], parameter[binary_operation[name[self].REQUIRED_AES <ast.BitOr object at 0x7da2590d6aa0> name[self].NON_MISSING_AES]]], name[self].__class__.__name__]]] | keyword[def] identifier[handle_na] ( identifier[self] , identifier[data] ):
literal[string]
keyword[return] identifier[remove_missing] ( identifier[data] ,
identifier[self] . identifier[params] [ literal[string] ],
identifier[list] ( identifier[self] . identifier[REQUIRED_AES] | identifier[self] . identifier[NON_MISSING_AES] ),
identifier[self] . identifier[__class__] . identifier[__name__] ) | def handle_na(self, data):
"""
Remove rows with NaN values
geoms that infer extra information from missing values
should override this method. For example
:class:`~plotnine.geoms.geom_path`.
Parameters
----------
data : dataframe
Data
Returns
-------
out : dataframe
Data without the NaNs.
Notes
-----
Shows a warning if the any rows are removed and the
`na_rm` parameter is False. It only takes into account
the columns of the required aesthetics.
"""
return remove_missing(data, self.params['na_rm'], list(self.REQUIRED_AES | self.NON_MISSING_AES), self.__class__.__name__) |
def debug(self, text):
""" Ajout d'un message de log de type DEBUG """
self.logger.debug("{}{}".format(self.message_prefix, text)) | def function[debug, parameter[self, text]]:
constant[ Ajout d'un message de log de type DEBUG ]
call[name[self].logger.debug, parameter[call[constant[{}{}].format, parameter[name[self].message_prefix, name[text]]]]] | keyword[def] identifier[debug] ( identifier[self] , identifier[text] ):
literal[string]
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[self] . identifier[message_prefix] , identifier[text] )) | def debug(self, text):
""" Ajout d'un message de log de type DEBUG """
self.logger.debug('{}{}'.format(self.message_prefix, text)) |
def json_data(self):
"""Return json description of a question."""
return {
"number": self.number,
"type": self.type,
"participant_id": self.participant_id,
"question": self.question,
"response": self.response,
} | def function[json_data, parameter[self]]:
constant[Return json description of a question.]
return[dictionary[[<ast.Constant object at 0x7da1b03dafe0>, <ast.Constant object at 0x7da1b03db520>, <ast.Constant object at 0x7da1b03dba90>, <ast.Constant object at 0x7da1b03dbeb0>, <ast.Constant object at 0x7da1b03db9d0>], [<ast.Attribute object at 0x7da1b03db010>, <ast.Attribute object at 0x7da18ede6350>, <ast.Attribute object at 0x7da18ede4940>, <ast.Attribute object at 0x7da18ede6890>, <ast.Attribute object at 0x7da18ede4970>]]] | keyword[def] identifier[json_data] ( identifier[self] ):
literal[string]
keyword[return] {
literal[string] : identifier[self] . identifier[number] ,
literal[string] : identifier[self] . identifier[type] ,
literal[string] : identifier[self] . identifier[participant_id] ,
literal[string] : identifier[self] . identifier[question] ,
literal[string] : identifier[self] . identifier[response] ,
} | def json_data(self):
"""Return json description of a question."""
return {'number': self.number, 'type': self.type, 'participant_id': self.participant_id, 'question': self.question, 'response': self.response} |
def generate_contact_list(config, args):
"""TODO: Docstring for generate_contact_list.
:param config: the config object to use
:type config: config.Config
:param args: the command line arguments
:type args: argparse.Namespace
:returns: the contacts for further processing (TODO)
:rtype: list(TODO)
"""
# fill contact list
vcard_list = []
if "uid" in args and args.uid:
# If an uid was given we use it to find the contact.
logging.debug("args.uid=%s", args.uid)
# set search terms to the empty query to prevent errors in
# phone and email actions
args.search_terms = ".*"
vcard_list = get_contacts(args.addressbook, args.uid, method="uid")
# We require that the uid given can uniquely identify a contact.
if not vcard_list:
sys.exit("Found no contact for {}uid {}".format(
"source " if args.action == "merge" else "", args.uid))
elif len(vcard_list) != 1:
print("Found multiple contacts for {}uid {}".format(
"source " if args.action == "merge" else "", args.uid))
for vcard in vcard_list:
print(" {}: {}".format(vcard, vcard.get_uid()))
sys.exit(1)
else:
# No uid was given so we try to use the search terms to select a
# contact.
if "source_search_terms" in args:
# exception for merge command
if args.source_search_terms:
args.search_terms = args.source_search_terms
else:
args.search_terms = ".*"
elif "search_terms" in args:
if args.search_terms:
args.search_terms = args.search_terms
else:
args.search_terms = ".*"
else:
# If no search terms where given on the command line we match
# everything with the empty search pattern.
args.search_terms = ".*"
logging.debug("args.search_terms=%s", args.search_terms)
vcard_list = get_contact_list_by_user_selection(
args.addressbook, args.search_terms,
args.strict_search if "strict_search" in args else False)
return vcard_list | def function[generate_contact_list, parameter[config, args]]:
constant[TODO: Docstring for generate_contact_list.
:param config: the config object to use
:type config: config.Config
:param args: the command line arguments
:type args: argparse.Namespace
:returns: the contacts for further processing (TODO)
:rtype: list(TODO)
]
variable[vcard_list] assign[=] list[[]]
if <ast.BoolOp object at 0x7da2041d9570> begin[:]
call[name[logging].debug, parameter[constant[args.uid=%s], name[args].uid]]
name[args].search_terms assign[=] constant[.*]
variable[vcard_list] assign[=] call[name[get_contacts], parameter[name[args].addressbook, name[args].uid]]
if <ast.UnaryOp object at 0x7da1b0552d40> begin[:]
call[name[sys].exit, parameter[call[constant[Found no contact for {}uid {}].format, parameter[<ast.IfExp object at 0x7da1b0553190>, name[args].uid]]]]
return[name[vcard_list]] | keyword[def] identifier[generate_contact_list] ( identifier[config] , identifier[args] ):
literal[string]
identifier[vcard_list] =[]
keyword[if] literal[string] keyword[in] identifier[args] keyword[and] identifier[args] . identifier[uid] :
identifier[logging] . identifier[debug] ( literal[string] , identifier[args] . identifier[uid] )
identifier[args] . identifier[search_terms] = literal[string]
identifier[vcard_list] = identifier[get_contacts] ( identifier[args] . identifier[addressbook] , identifier[args] . identifier[uid] , identifier[method] = literal[string] )
keyword[if] keyword[not] identifier[vcard_list] :
identifier[sys] . identifier[exit] ( literal[string] . identifier[format] (
literal[string] keyword[if] identifier[args] . identifier[action] == literal[string] keyword[else] literal[string] , identifier[args] . identifier[uid] ))
keyword[elif] identifier[len] ( identifier[vcard_list] )!= literal[int] :
identifier[print] ( literal[string] . identifier[format] (
literal[string] keyword[if] identifier[args] . identifier[action] == literal[string] keyword[else] literal[string] , identifier[args] . identifier[uid] ))
keyword[for] identifier[vcard] keyword[in] identifier[vcard_list] :
identifier[print] ( literal[string] . identifier[format] ( identifier[vcard] , identifier[vcard] . identifier[get_uid] ()))
identifier[sys] . identifier[exit] ( literal[int] )
keyword[else] :
keyword[if] literal[string] keyword[in] identifier[args] :
keyword[if] identifier[args] . identifier[source_search_terms] :
identifier[args] . identifier[search_terms] = identifier[args] . identifier[source_search_terms]
keyword[else] :
identifier[args] . identifier[search_terms] = literal[string]
keyword[elif] literal[string] keyword[in] identifier[args] :
keyword[if] identifier[args] . identifier[search_terms] :
identifier[args] . identifier[search_terms] = identifier[args] . identifier[search_terms]
keyword[else] :
identifier[args] . identifier[search_terms] = literal[string]
keyword[else] :
identifier[args] . identifier[search_terms] = literal[string]
identifier[logging] . identifier[debug] ( literal[string] , identifier[args] . identifier[search_terms] )
identifier[vcard_list] = identifier[get_contact_list_by_user_selection] (
identifier[args] . identifier[addressbook] , identifier[args] . identifier[search_terms] ,
identifier[args] . identifier[strict_search] keyword[if] literal[string] keyword[in] identifier[args] keyword[else] keyword[False] )
keyword[return] identifier[vcard_list] | def generate_contact_list(config, args):
"""TODO: Docstring for generate_contact_list.
:param config: the config object to use
:type config: config.Config
:param args: the command line arguments
:type args: argparse.Namespace
:returns: the contacts for further processing (TODO)
:rtype: list(TODO)
"""
# fill contact list
vcard_list = []
if 'uid' in args and args.uid:
# If an uid was given we use it to find the contact.
logging.debug('args.uid=%s', args.uid)
# set search terms to the empty query to prevent errors in
# phone and email actions
args.search_terms = '.*'
vcard_list = get_contacts(args.addressbook, args.uid, method='uid')
# We require that the uid given can uniquely identify a contact.
if not vcard_list:
sys.exit('Found no contact for {}uid {}'.format('source ' if args.action == 'merge' else '', args.uid)) # depends on [control=['if'], data=[]]
elif len(vcard_list) != 1:
print('Found multiple contacts for {}uid {}'.format('source ' if args.action == 'merge' else '', args.uid))
for vcard in vcard_list:
print(' {}: {}'.format(vcard, vcard.get_uid())) # depends on [control=['for'], data=['vcard']]
sys.exit(1) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
# No uid was given so we try to use the search terms to select a
# contact.
if 'source_search_terms' in args:
# exception for merge command
if args.source_search_terms:
args.search_terms = args.source_search_terms # depends on [control=['if'], data=[]]
else:
args.search_terms = '.*' # depends on [control=['if'], data=['args']]
elif 'search_terms' in args:
if args.search_terms:
args.search_terms = args.search_terms # depends on [control=['if'], data=[]]
else:
args.search_terms = '.*' # depends on [control=['if'], data=['args']]
else:
# If no search terms where given on the command line we match
# everything with the empty search pattern.
args.search_terms = '.*'
logging.debug('args.search_terms=%s', args.search_terms)
vcard_list = get_contact_list_by_user_selection(args.addressbook, args.search_terms, args.strict_search if 'strict_search' in args else False)
return vcard_list |
def get_rrsets_by_type_owner(self, zone_name, rtype, owner_name, q=None, **kwargs):
"""Returns the list of RRSets in the specified zone of the specified type.
Arguments:
zone_name -- The name of the zone.
rtype -- The type of the RRSets. This can be numeric (1) or
if a well-known name is defined for the type (A), you can use it instead.
owner_name -- The owner name for the RRSet.
If no trailing dot is supplied, the owner_name is assumed to be relative (foo).
If a trailing dot is supplied, the owner name is assumed to be absolute (foo.zonename.com.)
Keyword Arguments:
q -- The search parameters, in a dict. Valid keys are:
ttl - must match the TTL for the rrset
value - substring match of the first BIND field value
sort -- The sort column used to order the list. Valid values for the sort field are:
TTL
TYPE
reverse -- Whether the list is ascending(False) or descending(True)
offset -- The position in the list for the first returned element(0 based)
limit -- The maximum number of rows to be returned.
"""
uri = "/v1/zones/" + zone_name + "/rrsets/" + rtype + "/" + owner_name
params = build_params(q, kwargs)
return self.rest_api_connection.get(uri, params) | def function[get_rrsets_by_type_owner, parameter[self, zone_name, rtype, owner_name, q]]:
constant[Returns the list of RRSets in the specified zone of the specified type.
Arguments:
zone_name -- The name of the zone.
rtype -- The type of the RRSets. This can be numeric (1) or
if a well-known name is defined for the type (A), you can use it instead.
owner_name -- The owner name for the RRSet.
If no trailing dot is supplied, the owner_name is assumed to be relative (foo).
If a trailing dot is supplied, the owner name is assumed to be absolute (foo.zonename.com.)
Keyword Arguments:
q -- The search parameters, in a dict. Valid keys are:
ttl - must match the TTL for the rrset
value - substring match of the first BIND field value
sort -- The sort column used to order the list. Valid values for the sort field are:
TTL
TYPE
reverse -- Whether the list is ascending(False) or descending(True)
offset -- The position in the list for the first returned element(0 based)
limit -- The maximum number of rows to be returned.
]
variable[uri] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[constant[/v1/zones/] + name[zone_name]] + constant[/rrsets/]] + name[rtype]] + constant[/]] + name[owner_name]]
variable[params] assign[=] call[name[build_params], parameter[name[q], name[kwargs]]]
return[call[name[self].rest_api_connection.get, parameter[name[uri], name[params]]]] | keyword[def] identifier[get_rrsets_by_type_owner] ( identifier[self] , identifier[zone_name] , identifier[rtype] , identifier[owner_name] , identifier[q] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[uri] = literal[string] + identifier[zone_name] + literal[string] + identifier[rtype] + literal[string] + identifier[owner_name]
identifier[params] = identifier[build_params] ( identifier[q] , identifier[kwargs] )
keyword[return] identifier[self] . identifier[rest_api_connection] . identifier[get] ( identifier[uri] , identifier[params] ) | def get_rrsets_by_type_owner(self, zone_name, rtype, owner_name, q=None, **kwargs):
"""Returns the list of RRSets in the specified zone of the specified type.
Arguments:
zone_name -- The name of the zone.
rtype -- The type of the RRSets. This can be numeric (1) or
if a well-known name is defined for the type (A), you can use it instead.
owner_name -- The owner name for the RRSet.
If no trailing dot is supplied, the owner_name is assumed to be relative (foo).
If a trailing dot is supplied, the owner name is assumed to be absolute (foo.zonename.com.)
Keyword Arguments:
q -- The search parameters, in a dict. Valid keys are:
ttl - must match the TTL for the rrset
value - substring match of the first BIND field value
sort -- The sort column used to order the list. Valid values for the sort field are:
TTL
TYPE
reverse -- Whether the list is ascending(False) or descending(True)
offset -- The position in the list for the first returned element(0 based)
limit -- The maximum number of rows to be returned.
"""
uri = '/v1/zones/' + zone_name + '/rrsets/' + rtype + '/' + owner_name
params = build_params(q, kwargs)
return self.rest_api_connection.get(uri, params) |
def is_boolean(value):
"""
Check if the value represents a boolean.
>>> vtor = Validator()
>>> vtor.check('boolean', 0)
0
>>> vtor.check('boolean', False)
0
>>> vtor.check('boolean', '0')
0
>>> vtor.check('boolean', 'off')
0
>>> vtor.check('boolean', 'false')
0
>>> vtor.check('boolean', 'no')
0
>>> vtor.check('boolean', 'nO')
0
>>> vtor.check('boolean', 'NO')
0
>>> vtor.check('boolean', 1)
1
>>> vtor.check('boolean', True)
1
>>> vtor.check('boolean', '1')
1
>>> vtor.check('boolean', 'on')
1
>>> vtor.check('boolean', 'true')
1
>>> vtor.check('boolean', 'yes')
1
>>> vtor.check('boolean', 'Yes')
1
>>> vtor.check('boolean', 'YES')
1
>>> vtor.check('boolean', '') # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "" is of the wrong type.
>>> vtor.check('boolean', 'up') # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "up" is of the wrong type.
"""
if isinstance(value, string_types):
try:
return bool_dict[value.lower()]
except KeyError:
raise VdtTypeError(value)
# we do an equality test rather than an identity test
# this ensures Python 2.2 compatibilty
# and allows 0 and 1 to represent True and False
if value == False:
return False
elif value == True:
return True
else:
raise VdtTypeError(value) | def function[is_boolean, parameter[value]]:
constant[
Check if the value represents a boolean.
>>> vtor = Validator()
>>> vtor.check('boolean', 0)
0
>>> vtor.check('boolean', False)
0
>>> vtor.check('boolean', '0')
0
>>> vtor.check('boolean', 'off')
0
>>> vtor.check('boolean', 'false')
0
>>> vtor.check('boolean', 'no')
0
>>> vtor.check('boolean', 'nO')
0
>>> vtor.check('boolean', 'NO')
0
>>> vtor.check('boolean', 1)
1
>>> vtor.check('boolean', True)
1
>>> vtor.check('boolean', '1')
1
>>> vtor.check('boolean', 'on')
1
>>> vtor.check('boolean', 'true')
1
>>> vtor.check('boolean', 'yes')
1
>>> vtor.check('boolean', 'Yes')
1
>>> vtor.check('boolean', 'YES')
1
>>> vtor.check('boolean', '') # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "" is of the wrong type.
>>> vtor.check('boolean', 'up') # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "up" is of the wrong type.
]
if call[name[isinstance], parameter[name[value], name[string_types]]] begin[:]
<ast.Try object at 0x7da1b0e82590>
if compare[name[value] equal[==] constant[False]] begin[:]
return[constant[False]] | keyword[def] identifier[is_boolean] ( identifier[value] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[string_types] ):
keyword[try] :
keyword[return] identifier[bool_dict] [ identifier[value] . identifier[lower] ()]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[VdtTypeError] ( identifier[value] )
keyword[if] identifier[value] == keyword[False] :
keyword[return] keyword[False]
keyword[elif] identifier[value] == keyword[True] :
keyword[return] keyword[True]
keyword[else] :
keyword[raise] identifier[VdtTypeError] ( identifier[value] ) | def is_boolean(value):
"""
Check if the value represents a boolean.
>>> vtor = Validator()
>>> vtor.check('boolean', 0)
0
>>> vtor.check('boolean', False)
0
>>> vtor.check('boolean', '0')
0
>>> vtor.check('boolean', 'off')
0
>>> vtor.check('boolean', 'false')
0
>>> vtor.check('boolean', 'no')
0
>>> vtor.check('boolean', 'nO')
0
>>> vtor.check('boolean', 'NO')
0
>>> vtor.check('boolean', 1)
1
>>> vtor.check('boolean', True)
1
>>> vtor.check('boolean', '1')
1
>>> vtor.check('boolean', 'on')
1
>>> vtor.check('boolean', 'true')
1
>>> vtor.check('boolean', 'yes')
1
>>> vtor.check('boolean', 'Yes')
1
>>> vtor.check('boolean', 'YES')
1
>>> vtor.check('boolean', '') # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "" is of the wrong type.
>>> vtor.check('boolean', 'up') # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "up" is of the wrong type.
"""
if isinstance(value, string_types):
try:
return bool_dict[value.lower()] # depends on [control=['try'], data=[]]
except KeyError:
raise VdtTypeError(value) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
# we do an equality test rather than an identity test
# this ensures Python 2.2 compatibilty
# and allows 0 and 1 to represent True and False
if value == False:
return False # depends on [control=['if'], data=[]]
elif value == True:
return True # depends on [control=['if'], data=[]]
else:
raise VdtTypeError(value) |
def _get_main_and_json(directory):
"""Retrieve the main CWL and sample JSON files from a bcbio generated directory.
"""
directory = os.path.normpath(os.path.abspath(directory))
checker_main = os.path.normpath(os.path.join(directory, os.path.pardir, "checker-workflow-wrapping-tool.cwl"))
if checker_main and os.path.exists(checker_main):
main_cwl = [checker_main]
else:
main_cwl = glob.glob(os.path.join(directory, "main-*.cwl"))
main_cwl = [x for x in main_cwl if not x.find("-pack") >= 0]
assert len(main_cwl) == 1, "Did not find main CWL in %s" % directory
main_json = glob.glob(os.path.join(directory, "main-*-samples.json"))
assert len(main_json) == 1, "Did not find main json in %s" % directory
project_name = os.path.basename(directory).split("-workflow")[0]
return main_cwl[0], main_json[0], project_name | def function[_get_main_and_json, parameter[directory]]:
constant[Retrieve the main CWL and sample JSON files from a bcbio generated directory.
]
variable[directory] assign[=] call[name[os].path.normpath, parameter[call[name[os].path.abspath, parameter[name[directory]]]]]
variable[checker_main] assign[=] call[name[os].path.normpath, parameter[call[name[os].path.join, parameter[name[directory], name[os].path.pardir, constant[checker-workflow-wrapping-tool.cwl]]]]]
if <ast.BoolOp object at 0x7da2041d9510> begin[:]
variable[main_cwl] assign[=] list[[<ast.Name object at 0x7da2041d9540>]]
variable[main_json] assign[=] call[name[glob].glob, parameter[call[name[os].path.join, parameter[name[directory], constant[main-*-samples.json]]]]]
assert[compare[call[name[len], parameter[name[main_json]]] equal[==] constant[1]]]
variable[project_name] assign[=] call[call[call[name[os].path.basename, parameter[name[directory]]].split, parameter[constant[-workflow]]]][constant[0]]
return[tuple[[<ast.Subscript object at 0x7da2041db610>, <ast.Subscript object at 0x7da2041db670>, <ast.Name object at 0x7da2041d9330>]]] | keyword[def] identifier[_get_main_and_json] ( identifier[directory] ):
literal[string]
identifier[directory] = identifier[os] . identifier[path] . identifier[normpath] ( identifier[os] . identifier[path] . identifier[abspath] ( identifier[directory] ))
identifier[checker_main] = identifier[os] . identifier[path] . identifier[normpath] ( identifier[os] . identifier[path] . identifier[join] ( identifier[directory] , identifier[os] . identifier[path] . identifier[pardir] , literal[string] ))
keyword[if] identifier[checker_main] keyword[and] identifier[os] . identifier[path] . identifier[exists] ( identifier[checker_main] ):
identifier[main_cwl] =[ identifier[checker_main] ]
keyword[else] :
identifier[main_cwl] = identifier[glob] . identifier[glob] ( identifier[os] . identifier[path] . identifier[join] ( identifier[directory] , literal[string] ))
identifier[main_cwl] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[main_cwl] keyword[if] keyword[not] identifier[x] . identifier[find] ( literal[string] )>= literal[int] ]
keyword[assert] identifier[len] ( identifier[main_cwl] )== literal[int] , literal[string] % identifier[directory]
identifier[main_json] = identifier[glob] . identifier[glob] ( identifier[os] . identifier[path] . identifier[join] ( identifier[directory] , literal[string] ))
keyword[assert] identifier[len] ( identifier[main_json] )== literal[int] , literal[string] % identifier[directory]
identifier[project_name] = identifier[os] . identifier[path] . identifier[basename] ( identifier[directory] ). identifier[split] ( literal[string] )[ literal[int] ]
keyword[return] identifier[main_cwl] [ literal[int] ], identifier[main_json] [ literal[int] ], identifier[project_name] | def _get_main_and_json(directory):
"""Retrieve the main CWL and sample JSON files from a bcbio generated directory.
"""
directory = os.path.normpath(os.path.abspath(directory))
checker_main = os.path.normpath(os.path.join(directory, os.path.pardir, 'checker-workflow-wrapping-tool.cwl'))
if checker_main and os.path.exists(checker_main):
main_cwl = [checker_main] # depends on [control=['if'], data=[]]
else:
main_cwl = glob.glob(os.path.join(directory, 'main-*.cwl'))
main_cwl = [x for x in main_cwl if not x.find('-pack') >= 0]
assert len(main_cwl) == 1, 'Did not find main CWL in %s' % directory
main_json = glob.glob(os.path.join(directory, 'main-*-samples.json'))
assert len(main_json) == 1, 'Did not find main json in %s' % directory
project_name = os.path.basename(directory).split('-workflow')[0]
return (main_cwl[0], main_json[0], project_name) |
def yield_name2value(self, idx1=None, idx2=None) \
-> Iterator[Tuple[str, str]]:
"""Sequentially return name-value-pairs describing the current state
of the target variables.
The names are automatically generated and contain both the name of
the |Device| of the respective |Variable| object and the target
description:
>>> from hydpy.core.examples import prepare_full_example_2
>>> hp, pub, TestIO = prepare_full_example_2()
>>> from hydpy.core.itemtools import SetItem
>>> item = GetItem('hland_v1', 'states.lz')
>>> item.collect_variables(pub.selections)
>>> hp.elements.land_dill.model.sequences.states.lz = 100.0
>>> for name, value in item.yield_name2value():
... print(name, value)
land_dill_states_lz 100.0
land_lahn_1_states_lz 8.18711
land_lahn_2_states_lz 10.14007
land_lahn_3_states_lz 7.52648
>>> item = GetItem('hland_v1', 'states.sm')
>>> item.collect_variables(pub.selections)
>>> hp.elements.land_dill.model.sequences.states.sm = 2.0
>>> for name, value in item.yield_name2value():
... print(name, value) # doctest: +ELLIPSIS
land_dill_states_sm [2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, \
2.0, 2.0, 2.0, 2.0]
land_lahn_1_states_sm [99.27505, ..., 142.84148]
...
When querying time series, one can restrict the span of interest
by passing index values:
>>> item = GetItem('nodes', 'sim.series')
>>> item.collect_variables(pub.selections)
>>> hp.nodes.dill.sequences.sim.series = 1.0, 2.0, 3.0, 4.0
>>> for name, value in item.yield_name2value():
... print(name, value) # doctest: +ELLIPSIS
dill_sim_series [1.0, 2.0, 3.0, 4.0]
lahn_1_sim_series [nan, ...
...
>>> for name, value in item.yield_name2value(2, 3):
... print(name, value) # doctest: +ELLIPSIS
dill_sim_series [3.0]
lahn_1_sim_series [nan]
...
"""
for device, name in self._device2name.items():
target = self.device2target[device]
if self.targetspecs.series:
values = target.series[idx1:idx2]
else:
values = target.values
if self.ndim == 0:
values = objecttools.repr_(float(values))
else:
values = objecttools.repr_list(values.tolist())
yield name, values | def function[yield_name2value, parameter[self, idx1, idx2]]:
constant[Sequentially return name-value-pairs describing the current state
of the target variables.
The names are automatically generated and contain both the name of
the |Device| of the respective |Variable| object and the target
description:
>>> from hydpy.core.examples import prepare_full_example_2
>>> hp, pub, TestIO = prepare_full_example_2()
>>> from hydpy.core.itemtools import SetItem
>>> item = GetItem('hland_v1', 'states.lz')
>>> item.collect_variables(pub.selections)
>>> hp.elements.land_dill.model.sequences.states.lz = 100.0
>>> for name, value in item.yield_name2value():
... print(name, value)
land_dill_states_lz 100.0
land_lahn_1_states_lz 8.18711
land_lahn_2_states_lz 10.14007
land_lahn_3_states_lz 7.52648
>>> item = GetItem('hland_v1', 'states.sm')
>>> item.collect_variables(pub.selections)
>>> hp.elements.land_dill.model.sequences.states.sm = 2.0
>>> for name, value in item.yield_name2value():
... print(name, value) # doctest: +ELLIPSIS
land_dill_states_sm [2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]
land_lahn_1_states_sm [99.27505, ..., 142.84148]
...
When querying time series, one can restrict the span of interest
by passing index values:
>>> item = GetItem('nodes', 'sim.series')
>>> item.collect_variables(pub.selections)
>>> hp.nodes.dill.sequences.sim.series = 1.0, 2.0, 3.0, 4.0
>>> for name, value in item.yield_name2value():
... print(name, value) # doctest: +ELLIPSIS
dill_sim_series [1.0, 2.0, 3.0, 4.0]
lahn_1_sim_series [nan, ...
...
>>> for name, value in item.yield_name2value(2, 3):
... print(name, value) # doctest: +ELLIPSIS
dill_sim_series [3.0]
lahn_1_sim_series [nan]
...
]
for taget[tuple[[<ast.Name object at 0x7da18dc99a80>, <ast.Name object at 0x7da18dc9b220>]]] in starred[call[name[self]._device2name.items, parameter[]]] begin[:]
variable[target] assign[=] call[name[self].device2target][name[device]]
if name[self].targetspecs.series begin[:]
variable[values] assign[=] call[name[target].series][<ast.Slice object at 0x7da18dc9a110>]
if compare[name[self].ndim equal[==] constant[0]] begin[:]
variable[values] assign[=] call[name[objecttools].repr_, parameter[call[name[float], parameter[name[values]]]]]
<ast.Yield object at 0x7da18bcc9de0> | keyword[def] identifier[yield_name2value] ( identifier[self] , identifier[idx1] = keyword[None] , identifier[idx2] = keyword[None] )-> identifier[Iterator] [ identifier[Tuple] [ identifier[str] , identifier[str] ]]:
literal[string]
keyword[for] identifier[device] , identifier[name] keyword[in] identifier[self] . identifier[_device2name] . identifier[items] ():
identifier[target] = identifier[self] . identifier[device2target] [ identifier[device] ]
keyword[if] identifier[self] . identifier[targetspecs] . identifier[series] :
identifier[values] = identifier[target] . identifier[series] [ identifier[idx1] : identifier[idx2] ]
keyword[else] :
identifier[values] = identifier[target] . identifier[values]
keyword[if] identifier[self] . identifier[ndim] == literal[int] :
identifier[values] = identifier[objecttools] . identifier[repr_] ( identifier[float] ( identifier[values] ))
keyword[else] :
identifier[values] = identifier[objecttools] . identifier[repr_list] ( identifier[values] . identifier[tolist] ())
keyword[yield] identifier[name] , identifier[values] | def yield_name2value(self, idx1=None, idx2=None) -> Iterator[Tuple[str, str]]:
"""Sequentially return name-value-pairs describing the current state
of the target variables.
The names are automatically generated and contain both the name of
the |Device| of the respective |Variable| object and the target
description:
>>> from hydpy.core.examples import prepare_full_example_2
>>> hp, pub, TestIO = prepare_full_example_2()
>>> from hydpy.core.itemtools import SetItem
>>> item = GetItem('hland_v1', 'states.lz')
>>> item.collect_variables(pub.selections)
>>> hp.elements.land_dill.model.sequences.states.lz = 100.0
>>> for name, value in item.yield_name2value():
... print(name, value)
land_dill_states_lz 100.0
land_lahn_1_states_lz 8.18711
land_lahn_2_states_lz 10.14007
land_lahn_3_states_lz 7.52648
>>> item = GetItem('hland_v1', 'states.sm')
>>> item.collect_variables(pub.selections)
>>> hp.elements.land_dill.model.sequences.states.sm = 2.0
>>> for name, value in item.yield_name2value():
... print(name, value) # doctest: +ELLIPSIS
land_dill_states_sm [2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]
land_lahn_1_states_sm [99.27505, ..., 142.84148]
...
When querying time series, one can restrict the span of interest
by passing index values:
>>> item = GetItem('nodes', 'sim.series')
>>> item.collect_variables(pub.selections)
>>> hp.nodes.dill.sequences.sim.series = 1.0, 2.0, 3.0, 4.0
>>> for name, value in item.yield_name2value():
... print(name, value) # doctest: +ELLIPSIS
dill_sim_series [1.0, 2.0, 3.0, 4.0]
lahn_1_sim_series [nan, ...
...
>>> for name, value in item.yield_name2value(2, 3):
... print(name, value) # doctest: +ELLIPSIS
dill_sim_series [3.0]
lahn_1_sim_series [nan]
...
"""
for (device, name) in self._device2name.items():
target = self.device2target[device]
if self.targetspecs.series:
values = target.series[idx1:idx2] # depends on [control=['if'], data=[]]
else:
values = target.values
if self.ndim == 0:
values = objecttools.repr_(float(values)) # depends on [control=['if'], data=[]]
else:
values = objecttools.repr_list(values.tolist())
yield (name, values) # depends on [control=['for'], data=[]] |
def _get_original_coverage(data, itype="target"):
"""Back compatible: get existing coverage files if they exist
"""
work_dir = os.path.join(_sv_workdir(data), "raw")
work_bam = dd.get_work_bam(data) or dd.get_align_bam(data)
out = []
base, _ = _bam_to_outbase(work_bam, work_dir, data)
target_cnn = "%s.targetcoverage.cnn" % base
anti_cnn = "%s.antitargetcoverage.cnn" % base
if os.path.exists(target_cnn) and os.path.exists(anti_cnn):
out.append({"bam": work_bam, "file": target_cnn, "cnntype": "target",
"itype": itype, "sample": dd.get_sample_name(data)})
out.append({"bam": work_bam, "file": anti_cnn, "cnntype": "antitarget",
"itype": itype, "sample": dd.get_sample_name(data)})
return out | def function[_get_original_coverage, parameter[data, itype]]:
constant[Back compatible: get existing coverage files if they exist
]
variable[work_dir] assign[=] call[name[os].path.join, parameter[call[name[_sv_workdir], parameter[name[data]]], constant[raw]]]
variable[work_bam] assign[=] <ast.BoolOp object at 0x7da1b1984970>
variable[out] assign[=] list[[]]
<ast.Tuple object at 0x7da1b1984040> assign[=] call[name[_bam_to_outbase], parameter[name[work_bam], name[work_dir], name[data]]]
variable[target_cnn] assign[=] binary_operation[constant[%s.targetcoverage.cnn] <ast.Mod object at 0x7da2590d6920> name[base]]
variable[anti_cnn] assign[=] binary_operation[constant[%s.antitargetcoverage.cnn] <ast.Mod object at 0x7da2590d6920> name[base]]
if <ast.BoolOp object at 0x7da1b1987e80> begin[:]
call[name[out].append, parameter[dictionary[[<ast.Constant object at 0x7da1b1984d60>, <ast.Constant object at 0x7da1b1986ec0>, <ast.Constant object at 0x7da1b1987100>, <ast.Constant object at 0x7da1b1987a30>, <ast.Constant object at 0x7da1b1984fd0>], [<ast.Name object at 0x7da1b1984bb0>, <ast.Name object at 0x7da1b19858d0>, <ast.Constant object at 0x7da1b1986650>, <ast.Name object at 0x7da1b1986710>, <ast.Call object at 0x7da1b1984940>]]]]
call[name[out].append, parameter[dictionary[[<ast.Constant object at 0x7da1b1984070>, <ast.Constant object at 0x7da1b19859f0>, <ast.Constant object at 0x7da1b1987790>, <ast.Constant object at 0x7da1b1986d40>, <ast.Constant object at 0x7da1b1984dc0>], [<ast.Name object at 0x7da1b1985cc0>, <ast.Name object at 0x7da1b1987160>, <ast.Constant object at 0x7da1b19862c0>, <ast.Name object at 0x7da1b1984b20>, <ast.Call object at 0x7da1b1985f60>]]]]
return[name[out]] | keyword[def] identifier[_get_original_coverage] ( identifier[data] , identifier[itype] = literal[string] ):
literal[string]
identifier[work_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[_sv_workdir] ( identifier[data] ), literal[string] )
identifier[work_bam] = identifier[dd] . identifier[get_work_bam] ( identifier[data] ) keyword[or] identifier[dd] . identifier[get_align_bam] ( identifier[data] )
identifier[out] =[]
identifier[base] , identifier[_] = identifier[_bam_to_outbase] ( identifier[work_bam] , identifier[work_dir] , identifier[data] )
identifier[target_cnn] = literal[string] % identifier[base]
identifier[anti_cnn] = literal[string] % identifier[base]
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[target_cnn] ) keyword[and] identifier[os] . identifier[path] . identifier[exists] ( identifier[anti_cnn] ):
identifier[out] . identifier[append] ({ literal[string] : identifier[work_bam] , literal[string] : identifier[target_cnn] , literal[string] : literal[string] ,
literal[string] : identifier[itype] , literal[string] : identifier[dd] . identifier[get_sample_name] ( identifier[data] )})
identifier[out] . identifier[append] ({ literal[string] : identifier[work_bam] , literal[string] : identifier[anti_cnn] , literal[string] : literal[string] ,
literal[string] : identifier[itype] , literal[string] : identifier[dd] . identifier[get_sample_name] ( identifier[data] )})
keyword[return] identifier[out] | def _get_original_coverage(data, itype='target'):
"""Back compatible: get existing coverage files if they exist
"""
work_dir = os.path.join(_sv_workdir(data), 'raw')
work_bam = dd.get_work_bam(data) or dd.get_align_bam(data)
out = []
(base, _) = _bam_to_outbase(work_bam, work_dir, data)
target_cnn = '%s.targetcoverage.cnn' % base
anti_cnn = '%s.antitargetcoverage.cnn' % base
if os.path.exists(target_cnn) and os.path.exists(anti_cnn):
out.append({'bam': work_bam, 'file': target_cnn, 'cnntype': 'target', 'itype': itype, 'sample': dd.get_sample_name(data)})
out.append({'bam': work_bam, 'file': anti_cnn, 'cnntype': 'antitarget', 'itype': itype, 'sample': dd.get_sample_name(data)}) # depends on [control=['if'], data=[]]
return out |
def tick(self, filename):
"""Try to connect and display messages in queue."""
if self.connection_attempts < 10:
# Trick to connect ASAP when
# plugin is started without
# user interaction (CursorMove)
self.setup(True, False)
self.connection_attempts += 1
self.unqueue_and_display(filename) | def function[tick, parameter[self, filename]]:
constant[Try to connect and display messages in queue.]
if compare[name[self].connection_attempts less[<] constant[10]] begin[:]
call[name[self].setup, parameter[constant[True], constant[False]]]
<ast.AugAssign object at 0x7da18fe93d90>
call[name[self].unqueue_and_display, parameter[name[filename]]] | keyword[def] identifier[tick] ( identifier[self] , identifier[filename] ):
literal[string]
keyword[if] identifier[self] . identifier[connection_attempts] < literal[int] :
identifier[self] . identifier[setup] ( keyword[True] , keyword[False] )
identifier[self] . identifier[connection_attempts] += literal[int]
identifier[self] . identifier[unqueue_and_display] ( identifier[filename] ) | def tick(self, filename):
"""Try to connect and display messages in queue."""
if self.connection_attempts < 10:
# Trick to connect ASAP when
# plugin is started without
# user interaction (CursorMove)
self.setup(True, False)
self.connection_attempts += 1 # depends on [control=['if'], data=[]]
self.unqueue_and_display(filename) |
def ft2file(self, **kwargs):
""" return the name of the input ft2 file list
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
kwargs_copy['data_time'] = kwargs.get(
'data_time', self.dataset(**kwargs))
self._replace_none(kwargs_copy)
localpath = NameFactory.ft2file_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath | def function[ft2file, parameter[self]]:
constant[ return the name of the input ft2 file list
]
variable[kwargs_copy] assign[=] call[name[self].base_dict.copy, parameter[]]
call[name[kwargs_copy].update, parameter[]]
call[name[kwargs_copy]][constant[data_time]] assign[=] call[name[kwargs].get, parameter[constant[data_time], call[name[self].dataset, parameter[]]]]
call[name[self]._replace_none, parameter[name[kwargs_copy]]]
variable[localpath] assign[=] call[name[NameFactory].ft2file_format.format, parameter[]]
if call[name[kwargs].get, parameter[constant[fullpath], constant[False]]] begin[:]
return[call[name[self].fullpath, parameter[]]]
return[name[localpath]] | keyword[def] identifier[ft2file] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs_copy] = identifier[self] . identifier[base_dict] . identifier[copy] ()
identifier[kwargs_copy] . identifier[update] (** identifier[kwargs] )
identifier[kwargs_copy] [ literal[string] ]= identifier[kwargs] . identifier[get] (
literal[string] , identifier[self] . identifier[dataset] (** identifier[kwargs] ))
identifier[self] . identifier[_replace_none] ( identifier[kwargs_copy] )
identifier[localpath] = identifier[NameFactory] . identifier[ft2file_format] . identifier[format] (** identifier[kwargs_copy] )
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] , keyword[False] ):
keyword[return] identifier[self] . identifier[fullpath] ( identifier[localpath] = identifier[localpath] )
keyword[return] identifier[localpath] | def ft2file(self, **kwargs):
""" return the name of the input ft2 file list
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
kwargs_copy['data_time'] = kwargs.get('data_time', self.dataset(**kwargs))
self._replace_none(kwargs_copy)
localpath = NameFactory.ft2file_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath) # depends on [control=['if'], data=[]]
return localpath |
def send_activation_email(self, user, profile, password, site):
"""
Custom send email method to supplied the activation link and
new generated password.
"""
ctx_dict = { 'password': password,
'site': site,
'activation_key': profile.activation_key,
'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS}
subject = render_to_string(
'registration/email/emails/password_subject.txt',
ctx_dict)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message = render_to_string('registration/email/emails/password.txt',
ctx_dict)
try:
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
except:
pass | def function[send_activation_email, parameter[self, user, profile, password, site]]:
constant[
Custom send email method to supplied the activation link and
new generated password.
]
variable[ctx_dict] assign[=] dictionary[[<ast.Constant object at 0x7da1b2866470>, <ast.Constant object at 0x7da1b2866260>, <ast.Constant object at 0x7da1b28662c0>, <ast.Constant object at 0x7da1b2866230>], [<ast.Name object at 0x7da1b2866110>, <ast.Name object at 0x7da1b2866080>, <ast.Attribute object at 0x7da1b2866680>, <ast.Attribute object at 0x7da1b2867c70>]]
variable[subject] assign[=] call[name[render_to_string], parameter[constant[registration/email/emails/password_subject.txt], name[ctx_dict]]]
variable[subject] assign[=] call[constant[].join, parameter[call[name[subject].splitlines, parameter[]]]]
variable[message] assign[=] call[name[render_to_string], parameter[constant[registration/email/emails/password.txt], name[ctx_dict]]]
<ast.Try object at 0x7da1b28643d0> | keyword[def] identifier[send_activation_email] ( identifier[self] , identifier[user] , identifier[profile] , identifier[password] , identifier[site] ):
literal[string]
identifier[ctx_dict] ={ literal[string] : identifier[password] ,
literal[string] : identifier[site] ,
literal[string] : identifier[profile] . identifier[activation_key] ,
literal[string] : identifier[settings] . identifier[ACCOUNT_ACTIVATION_DAYS] }
identifier[subject] = identifier[render_to_string] (
literal[string] ,
identifier[ctx_dict] )
identifier[subject] = literal[string] . identifier[join] ( identifier[subject] . identifier[splitlines] ())
identifier[message] = identifier[render_to_string] ( literal[string] ,
identifier[ctx_dict] )
keyword[try] :
identifier[user] . identifier[email_user] ( identifier[subject] , identifier[message] , identifier[settings] . identifier[DEFAULT_FROM_EMAIL] )
keyword[except] :
keyword[pass] | def send_activation_email(self, user, profile, password, site):
"""
Custom send email method to supplied the activation link and
new generated password.
"""
ctx_dict = {'password': password, 'site': site, 'activation_key': profile.activation_key, 'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS}
subject = render_to_string('registration/email/emails/password_subject.txt', ctx_dict)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message = render_to_string('registration/email/emails/password.txt', ctx_dict)
try:
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL) # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]] |
def random_sample(list_, nSample, strict=False, rng=None, seed=None):
"""
Grabs data randomly
Args:
list_ (list):
nSample (?):
strict (bool): (default = False)
rng (module): random number generator(default = numpy.random)
seed (None): (default = None)
Returns:
list: sample_list
CommandLine:
python -m utool.util_numpy --exec-random_sample
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_numpy import * # NOQA
>>> list_ = np.arange(10)
>>> nSample = 4
>>> strict = False
>>> rng = np.random.RandomState(0)
>>> seed = None
>>> sample_list = random_sample(list_, nSample, strict, rng, seed)
>>> result = ('sample_list = %s' % (str(sample_list),))
>>> print(result)
"""
rng = ensure_rng(seed if rng is None else rng)
if isinstance(list_, list):
list2_ = list_[:]
else:
list2_ = np.copy(list_)
if len(list2_) == 0 and not strict:
return list2_
rng.shuffle(list2_)
if nSample is None and strict is False:
return list2_
if not strict:
nSample = min(max(0, nSample), len(list2_))
sample_list = list2_[:nSample]
return sample_list | def function[random_sample, parameter[list_, nSample, strict, rng, seed]]:
constant[
Grabs data randomly
Args:
list_ (list):
nSample (?):
strict (bool): (default = False)
rng (module): random number generator(default = numpy.random)
seed (None): (default = None)
Returns:
list: sample_list
CommandLine:
python -m utool.util_numpy --exec-random_sample
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_numpy import * # NOQA
>>> list_ = np.arange(10)
>>> nSample = 4
>>> strict = False
>>> rng = np.random.RandomState(0)
>>> seed = None
>>> sample_list = random_sample(list_, nSample, strict, rng, seed)
>>> result = ('sample_list = %s' % (str(sample_list),))
>>> print(result)
]
variable[rng] assign[=] call[name[ensure_rng], parameter[<ast.IfExp object at 0x7da1b245f130>]]
if call[name[isinstance], parameter[name[list_], name[list]]] begin[:]
variable[list2_] assign[=] call[name[list_]][<ast.Slice object at 0x7da1b245d180>]
if <ast.BoolOp object at 0x7da1b245cf40> begin[:]
return[name[list2_]]
call[name[rng].shuffle, parameter[name[list2_]]]
if <ast.BoolOp object at 0x7da1b2490e50> begin[:]
return[name[list2_]]
if <ast.UnaryOp object at 0x7da1b2491600> begin[:]
variable[nSample] assign[=] call[name[min], parameter[call[name[max], parameter[constant[0], name[nSample]]], call[name[len], parameter[name[list2_]]]]]
variable[sample_list] assign[=] call[name[list2_]][<ast.Slice object at 0x7da1b2491240>]
return[name[sample_list]] | keyword[def] identifier[random_sample] ( identifier[list_] , identifier[nSample] , identifier[strict] = keyword[False] , identifier[rng] = keyword[None] , identifier[seed] = keyword[None] ):
literal[string]
identifier[rng] = identifier[ensure_rng] ( identifier[seed] keyword[if] identifier[rng] keyword[is] keyword[None] keyword[else] identifier[rng] )
keyword[if] identifier[isinstance] ( identifier[list_] , identifier[list] ):
identifier[list2_] = identifier[list_] [:]
keyword[else] :
identifier[list2_] = identifier[np] . identifier[copy] ( identifier[list_] )
keyword[if] identifier[len] ( identifier[list2_] )== literal[int] keyword[and] keyword[not] identifier[strict] :
keyword[return] identifier[list2_]
identifier[rng] . identifier[shuffle] ( identifier[list2_] )
keyword[if] identifier[nSample] keyword[is] keyword[None] keyword[and] identifier[strict] keyword[is] keyword[False] :
keyword[return] identifier[list2_]
keyword[if] keyword[not] identifier[strict] :
identifier[nSample] = identifier[min] ( identifier[max] ( literal[int] , identifier[nSample] ), identifier[len] ( identifier[list2_] ))
identifier[sample_list] = identifier[list2_] [: identifier[nSample] ]
keyword[return] identifier[sample_list] | def random_sample(list_, nSample, strict=False, rng=None, seed=None):
"""
Grabs data randomly
Args:
list_ (list):
nSample (?):
strict (bool): (default = False)
rng (module): random number generator(default = numpy.random)
seed (None): (default = None)
Returns:
list: sample_list
CommandLine:
python -m utool.util_numpy --exec-random_sample
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_numpy import * # NOQA
>>> list_ = np.arange(10)
>>> nSample = 4
>>> strict = False
>>> rng = np.random.RandomState(0)
>>> seed = None
>>> sample_list = random_sample(list_, nSample, strict, rng, seed)
>>> result = ('sample_list = %s' % (str(sample_list),))
>>> print(result)
"""
rng = ensure_rng(seed if rng is None else rng)
if isinstance(list_, list):
list2_ = list_[:] # depends on [control=['if'], data=[]]
else:
list2_ = np.copy(list_)
if len(list2_) == 0 and (not strict):
return list2_ # depends on [control=['if'], data=[]]
rng.shuffle(list2_)
if nSample is None and strict is False:
return list2_ # depends on [control=['if'], data=[]]
if not strict:
nSample = min(max(0, nSample), len(list2_)) # depends on [control=['if'], data=[]]
sample_list = list2_[:nSample]
return sample_list |
def iter_rels(self):
"""
Generate exactly one reference to each relationship in the package by
performing a depth-first traversal of the rels graph.
"""
def walk_rels(source, visited=None):
visited = [] if visited is None else visited
for rel in source.rels.values():
yield rel
if rel.is_external:
continue
part = rel.target_part
if part in visited:
continue
visited.append(part)
new_source = part
for rel in walk_rels(new_source, visited):
yield rel
for rel in walk_rels(self):
yield rel | def function[iter_rels, parameter[self]]:
constant[
Generate exactly one reference to each relationship in the package by
performing a depth-first traversal of the rels graph.
]
def function[walk_rels, parameter[source, visited]]:
variable[visited] assign[=] <ast.IfExp object at 0x7da1b1cba3e0>
for taget[name[rel]] in starred[call[name[source].rels.values, parameter[]]] begin[:]
<ast.Yield object at 0x7da1b1cb8070>
if name[rel].is_external begin[:]
continue
variable[part] assign[=] name[rel].target_part
if compare[name[part] in name[visited]] begin[:]
continue
call[name[visited].append, parameter[name[part]]]
variable[new_source] assign[=] name[part]
for taget[name[rel]] in starred[call[name[walk_rels], parameter[name[new_source], name[visited]]]] begin[:]
<ast.Yield object at 0x7da1b1cbb6d0>
for taget[name[rel]] in starred[call[name[walk_rels], parameter[name[self]]]] begin[:]
<ast.Yield object at 0x7da1b1cba0e0> | keyword[def] identifier[iter_rels] ( identifier[self] ):
literal[string]
keyword[def] identifier[walk_rels] ( identifier[source] , identifier[visited] = keyword[None] ):
identifier[visited] =[] keyword[if] identifier[visited] keyword[is] keyword[None] keyword[else] identifier[visited]
keyword[for] identifier[rel] keyword[in] identifier[source] . identifier[rels] . identifier[values] ():
keyword[yield] identifier[rel]
keyword[if] identifier[rel] . identifier[is_external] :
keyword[continue]
identifier[part] = identifier[rel] . identifier[target_part]
keyword[if] identifier[part] keyword[in] identifier[visited] :
keyword[continue]
identifier[visited] . identifier[append] ( identifier[part] )
identifier[new_source] = identifier[part]
keyword[for] identifier[rel] keyword[in] identifier[walk_rels] ( identifier[new_source] , identifier[visited] ):
keyword[yield] identifier[rel]
keyword[for] identifier[rel] keyword[in] identifier[walk_rels] ( identifier[self] ):
keyword[yield] identifier[rel] | def iter_rels(self):
"""
Generate exactly one reference to each relationship in the package by
performing a depth-first traversal of the rels graph.
"""
def walk_rels(source, visited=None):
visited = [] if visited is None else visited
for rel in source.rels.values():
yield rel
if rel.is_external:
continue # depends on [control=['if'], data=[]]
part = rel.target_part
if part in visited:
continue # depends on [control=['if'], data=[]]
visited.append(part)
new_source = part
for rel in walk_rels(new_source, visited):
yield rel # depends on [control=['for'], data=['rel']] # depends on [control=['for'], data=['rel']]
for rel in walk_rels(self):
yield rel # depends on [control=['for'], data=['rel']] |
def ReadAllArtifacts(self, cursor=None):
"""Lists all artifacts that are stored in the database."""
cursor.execute("SELECT definition FROM artifacts")
return [_RowToArtifact(row) for row in cursor.fetchall()] | def function[ReadAllArtifacts, parameter[self, cursor]]:
constant[Lists all artifacts that are stored in the database.]
call[name[cursor].execute, parameter[constant[SELECT definition FROM artifacts]]]
return[<ast.ListComp object at 0x7da1b1b0ec80>] | keyword[def] identifier[ReadAllArtifacts] ( identifier[self] , identifier[cursor] = keyword[None] ):
literal[string]
identifier[cursor] . identifier[execute] ( literal[string] )
keyword[return] [ identifier[_RowToArtifact] ( identifier[row] ) keyword[for] identifier[row] keyword[in] identifier[cursor] . identifier[fetchall] ()] | def ReadAllArtifacts(self, cursor=None):
"""Lists all artifacts that are stored in the database."""
cursor.execute('SELECT definition FROM artifacts')
return [_RowToArtifact(row) for row in cursor.fetchall()] |
def create_or_login(resp):
"""This is called when login with OpenID succeeded and it's not
necessary to figure out if this is the users's first login or not.
This function has to redirect otherwise the user will be presented
with a terrible URL which we certainly don't want.
"""
session['openid'] = resp.identity_url
user = User.query.filter_by(openid=resp.identity_url).first()
if user is not None:
flash(u'Successfully signed in')
g.user = user
return redirect(oid.get_next_url())
return redirect(url_for('create_profile', next=oid.get_next_url(),
name=resp.fullname or resp.nickname,
email=resp.email)) | def function[create_or_login, parameter[resp]]:
constant[This is called when login with OpenID succeeded and it's not
necessary to figure out if this is the users's first login or not.
This function has to redirect otherwise the user will be presented
with a terrible URL which we certainly don't want.
]
call[name[session]][constant[openid]] assign[=] name[resp].identity_url
variable[user] assign[=] call[call[name[User].query.filter_by, parameter[]].first, parameter[]]
if compare[name[user] is_not constant[None]] begin[:]
call[name[flash], parameter[constant[Successfully signed in]]]
name[g].user assign[=] name[user]
return[call[name[redirect], parameter[call[name[oid].get_next_url, parameter[]]]]]
return[call[name[redirect], parameter[call[name[url_for], parameter[constant[create_profile]]]]]] | keyword[def] identifier[create_or_login] ( identifier[resp] ):
literal[string]
identifier[session] [ literal[string] ]= identifier[resp] . identifier[identity_url]
identifier[user] = identifier[User] . identifier[query] . identifier[filter_by] ( identifier[openid] = identifier[resp] . identifier[identity_url] ). identifier[first] ()
keyword[if] identifier[user] keyword[is] keyword[not] keyword[None] :
identifier[flash] ( literal[string] )
identifier[g] . identifier[user] = identifier[user]
keyword[return] identifier[redirect] ( identifier[oid] . identifier[get_next_url] ())
keyword[return] identifier[redirect] ( identifier[url_for] ( literal[string] , identifier[next] = identifier[oid] . identifier[get_next_url] (),
identifier[name] = identifier[resp] . identifier[fullname] keyword[or] identifier[resp] . identifier[nickname] ,
identifier[email] = identifier[resp] . identifier[email] )) | def create_or_login(resp):
"""This is called when login with OpenID succeeded and it's not
necessary to figure out if this is the users's first login or not.
This function has to redirect otherwise the user will be presented
with a terrible URL which we certainly don't want.
"""
session['openid'] = resp.identity_url
user = User.query.filter_by(openid=resp.identity_url).first()
if user is not None:
flash(u'Successfully signed in')
g.user = user
return redirect(oid.get_next_url()) # depends on [control=['if'], data=['user']]
return redirect(url_for('create_profile', next=oid.get_next_url(), name=resp.fullname or resp.nickname, email=resp.email)) |
def eval_file(filename: str, ctx: compiler.CompilerContext, module: types.ModuleType):
"""Evaluate a file with the given name into a Python module AST node."""
last = None
for form in reader.read_file(filename, resolver=runtime.resolve_alias):
last = compiler.compile_and_exec_form(form, ctx, module)
return last | def function[eval_file, parameter[filename, ctx, module]]:
constant[Evaluate a file with the given name into a Python module AST node.]
variable[last] assign[=] constant[None]
for taget[name[form]] in starred[call[name[reader].read_file, parameter[name[filename]]]] begin[:]
variable[last] assign[=] call[name[compiler].compile_and_exec_form, parameter[name[form], name[ctx], name[module]]]
return[name[last]] | keyword[def] identifier[eval_file] ( identifier[filename] : identifier[str] , identifier[ctx] : identifier[compiler] . identifier[CompilerContext] , identifier[module] : identifier[types] . identifier[ModuleType] ):
literal[string]
identifier[last] = keyword[None]
keyword[for] identifier[form] keyword[in] identifier[reader] . identifier[read_file] ( identifier[filename] , identifier[resolver] = identifier[runtime] . identifier[resolve_alias] ):
identifier[last] = identifier[compiler] . identifier[compile_and_exec_form] ( identifier[form] , identifier[ctx] , identifier[module] )
keyword[return] identifier[last] | def eval_file(filename: str, ctx: compiler.CompilerContext, module: types.ModuleType):
"""Evaluate a file with the given name into a Python module AST node."""
last = None
for form in reader.read_file(filename, resolver=runtime.resolve_alias):
last = compiler.compile_and_exec_form(form, ctx, module) # depends on [control=['for'], data=['form']]
return last |
def re_line_and_indentation(base_indentation,
modifiers=(True, True)):
"""Returns a re matching newline + base_indentation.
modifiers is a tuple, (include_first, include_final).
If include_first, matches indentation at the beginning of the string.
If include_final, matches indentation at the end of the string.
Cached.
"""
cache = re_line_and_indentation.cache[modifiers]
compiled = cache.get(base_indentation, None)
if compiled is None:
[prefix, suffix] = re_line_and_indentation.tuple[modifiers]
compiled = cache[modifiers] = \
_re.compile(prefix + base_indentation + suffix)
return compiled | def function[re_line_and_indentation, parameter[base_indentation, modifiers]]:
constant[Returns a re matching newline + base_indentation.
modifiers is a tuple, (include_first, include_final).
If include_first, matches indentation at the beginning of the string.
If include_final, matches indentation at the end of the string.
Cached.
]
variable[cache] assign[=] call[name[re_line_and_indentation].cache][name[modifiers]]
variable[compiled] assign[=] call[name[cache].get, parameter[name[base_indentation], constant[None]]]
if compare[name[compiled] is constant[None]] begin[:]
<ast.List object at 0x7da18f811330> assign[=] call[name[re_line_and_indentation].tuple][name[modifiers]]
variable[compiled] assign[=] call[name[_re].compile, parameter[binary_operation[binary_operation[name[prefix] + name[base_indentation]] + name[suffix]]]]
return[name[compiled]] | keyword[def] identifier[re_line_and_indentation] ( identifier[base_indentation] ,
identifier[modifiers] =( keyword[True] , keyword[True] )):
literal[string]
identifier[cache] = identifier[re_line_and_indentation] . identifier[cache] [ identifier[modifiers] ]
identifier[compiled] = identifier[cache] . identifier[get] ( identifier[base_indentation] , keyword[None] )
keyword[if] identifier[compiled] keyword[is] keyword[None] :
[ identifier[prefix] , identifier[suffix] ]= identifier[re_line_and_indentation] . identifier[tuple] [ identifier[modifiers] ]
identifier[compiled] = identifier[cache] [ identifier[modifiers] ]= identifier[_re] . identifier[compile] ( identifier[prefix] + identifier[base_indentation] + identifier[suffix] )
keyword[return] identifier[compiled] | def re_line_and_indentation(base_indentation, modifiers=(True, True)):
"""Returns a re matching newline + base_indentation.
modifiers is a tuple, (include_first, include_final).
If include_first, matches indentation at the beginning of the string.
If include_final, matches indentation at the end of the string.
Cached.
"""
cache = re_line_and_indentation.cache[modifiers]
compiled = cache.get(base_indentation, None)
if compiled is None:
[prefix, suffix] = re_line_and_indentation.tuple[modifiers]
compiled = cache[modifiers] = _re.compile(prefix + base_indentation + suffix) # depends on [control=['if'], data=['compiled']]
return compiled |
def create_table(self, table_name, model):
"""Create the model's table. Returns True if the table is being created, False otherwise.
Does not wait for the table to create, and does not validate an existing table.
Will not raise "ResourceInUseException" if the table exists or is being created.
:param str table_name: The name of the table to create for the model.
:param model: The :class:`~bloop.models.BaseModel` to create the table for.
:return: True if the table is being created, False if the table exists
:rtype: bool
"""
table = create_table_request(table_name, model)
try:
self.dynamodb_client.create_table(**table)
is_creating = True
except botocore.exceptions.ClientError as error:
handle_table_exists(error, model)
is_creating = False
return is_creating | def function[create_table, parameter[self, table_name, model]]:
constant[Create the model's table. Returns True if the table is being created, False otherwise.
Does not wait for the table to create, and does not validate an existing table.
Will not raise "ResourceInUseException" if the table exists or is being created.
:param str table_name: The name of the table to create for the model.
:param model: The :class:`~bloop.models.BaseModel` to create the table for.
:return: True if the table is being created, False if the table exists
:rtype: bool
]
variable[table] assign[=] call[name[create_table_request], parameter[name[table_name], name[model]]]
<ast.Try object at 0x7da1b0fc5c00>
return[name[is_creating]] | keyword[def] identifier[create_table] ( identifier[self] , identifier[table_name] , identifier[model] ):
literal[string]
identifier[table] = identifier[create_table_request] ( identifier[table_name] , identifier[model] )
keyword[try] :
identifier[self] . identifier[dynamodb_client] . identifier[create_table] (** identifier[table] )
identifier[is_creating] = keyword[True]
keyword[except] identifier[botocore] . identifier[exceptions] . identifier[ClientError] keyword[as] identifier[error] :
identifier[handle_table_exists] ( identifier[error] , identifier[model] )
identifier[is_creating] = keyword[False]
keyword[return] identifier[is_creating] | def create_table(self, table_name, model):
"""Create the model's table. Returns True if the table is being created, False otherwise.
Does not wait for the table to create, and does not validate an existing table.
Will not raise "ResourceInUseException" if the table exists or is being created.
:param str table_name: The name of the table to create for the model.
:param model: The :class:`~bloop.models.BaseModel` to create the table for.
:return: True if the table is being created, False if the table exists
:rtype: bool
"""
table = create_table_request(table_name, model)
try:
self.dynamodb_client.create_table(**table)
is_creating = True # depends on [control=['try'], data=[]]
except botocore.exceptions.ClientError as error:
handle_table_exists(error, model)
is_creating = False # depends on [control=['except'], data=['error']]
return is_creating |
def _get_station_codes(self, force=False):
"""
Gets and caches a list of station codes optionally within a bbox.
Will return the cached version if it exists unless force is True.
"""
if not force and self.station_codes is not None:
return self.station_codes
state_urls = self._get_state_urls()
# filter by bounding box against a shapefile
state_matches = None
if self.bbox:
with collection(
os.path.join(
"resources",
"ne_50m_admin_1_states_provinces_lakes_shp.shp",
),
"r",
) as c:
geom_matches = [
x["properties"] for x in c.filter(bbox=self.bbox)
]
state_matches = [
x["postal"] if x["admin"] != "Canada" else "CN"
for x in geom_matches
]
self.station_codes = []
for state_url in state_urls:
if state_matches is not None:
state_abbr = state_url.split("/")[-1].split(".")[0]
if state_abbr not in state_matches:
continue
self.station_codes.extend(self._get_stations_for_state(state_url))
if self.bbox:
# retrieve metadata for all stations to properly filter them
metadata = self._get_metadata(self.station_codes)
parsed_metadata = self.parser._parse_metadata(metadata)
def in_bbox(code):
lat = parsed_metadata[code]["latitude"]
lon = parsed_metadata[code]["longitude"]
return (
lon >= self.bbox[0]
and lon <= self.bbox[2]
and lat >= self.bbox[1]
and lat <= self.bbox[3]
)
self.station_codes = list(filter(in_bbox, self.station_codes))
return self.station_codes | def function[_get_station_codes, parameter[self, force]]:
constant[
Gets and caches a list of station codes optionally within a bbox.
Will return the cached version if it exists unless force is True.
]
if <ast.BoolOp object at 0x7da1b24b6320> begin[:]
return[name[self].station_codes]
variable[state_urls] assign[=] call[name[self]._get_state_urls, parameter[]]
variable[state_matches] assign[=] constant[None]
if name[self].bbox begin[:]
with call[name[collection], parameter[call[name[os].path.join, parameter[constant[resources], constant[ne_50m_admin_1_states_provinces_lakes_shp.shp]]], constant[r]]] begin[:]
variable[geom_matches] assign[=] <ast.ListComp object at 0x7da1b24b6500>
variable[state_matches] assign[=] <ast.ListComp object at 0x7da1b24b6380>
name[self].station_codes assign[=] list[[]]
for taget[name[state_url]] in starred[name[state_urls]] begin[:]
if compare[name[state_matches] is_not constant[None]] begin[:]
variable[state_abbr] assign[=] call[call[call[call[name[state_url].split, parameter[constant[/]]]][<ast.UnaryOp object at 0x7da1b24b7070>].split, parameter[constant[.]]]][constant[0]]
if compare[name[state_abbr] <ast.NotIn object at 0x7da2590d7190> name[state_matches]] begin[:]
continue
call[name[self].station_codes.extend, parameter[call[name[self]._get_stations_for_state, parameter[name[state_url]]]]]
if name[self].bbox begin[:]
variable[metadata] assign[=] call[name[self]._get_metadata, parameter[name[self].station_codes]]
variable[parsed_metadata] assign[=] call[name[self].parser._parse_metadata, parameter[name[metadata]]]
def function[in_bbox, parameter[code]]:
variable[lat] assign[=] call[call[name[parsed_metadata]][name[code]]][constant[latitude]]
variable[lon] assign[=] call[call[name[parsed_metadata]][name[code]]][constant[longitude]]
return[<ast.BoolOp object at 0x7da1b24b4280>]
name[self].station_codes assign[=] call[name[list], parameter[call[name[filter], parameter[name[in_bbox], name[self].station_codes]]]]
return[name[self].station_codes] | keyword[def] identifier[_get_station_codes] ( identifier[self] , identifier[force] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[force] keyword[and] identifier[self] . identifier[station_codes] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[self] . identifier[station_codes]
identifier[state_urls] = identifier[self] . identifier[_get_state_urls] ()
identifier[state_matches] = keyword[None]
keyword[if] identifier[self] . identifier[bbox] :
keyword[with] identifier[collection] (
identifier[os] . identifier[path] . identifier[join] (
literal[string] ,
literal[string] ,
),
literal[string] ,
) keyword[as] identifier[c] :
identifier[geom_matches] =[
identifier[x] [ literal[string] ] keyword[for] identifier[x] keyword[in] identifier[c] . identifier[filter] ( identifier[bbox] = identifier[self] . identifier[bbox] )
]
identifier[state_matches] =[
identifier[x] [ literal[string] ] keyword[if] identifier[x] [ literal[string] ]!= literal[string] keyword[else] literal[string]
keyword[for] identifier[x] keyword[in] identifier[geom_matches]
]
identifier[self] . identifier[station_codes] =[]
keyword[for] identifier[state_url] keyword[in] identifier[state_urls] :
keyword[if] identifier[state_matches] keyword[is] keyword[not] keyword[None] :
identifier[state_abbr] = identifier[state_url] . identifier[split] ( literal[string] )[- literal[int] ]. identifier[split] ( literal[string] )[ literal[int] ]
keyword[if] identifier[state_abbr] keyword[not] keyword[in] identifier[state_matches] :
keyword[continue]
identifier[self] . identifier[station_codes] . identifier[extend] ( identifier[self] . identifier[_get_stations_for_state] ( identifier[state_url] ))
keyword[if] identifier[self] . identifier[bbox] :
identifier[metadata] = identifier[self] . identifier[_get_metadata] ( identifier[self] . identifier[station_codes] )
identifier[parsed_metadata] = identifier[self] . identifier[parser] . identifier[_parse_metadata] ( identifier[metadata] )
keyword[def] identifier[in_bbox] ( identifier[code] ):
identifier[lat] = identifier[parsed_metadata] [ identifier[code] ][ literal[string] ]
identifier[lon] = identifier[parsed_metadata] [ identifier[code] ][ literal[string] ]
keyword[return] (
identifier[lon] >= identifier[self] . identifier[bbox] [ literal[int] ]
keyword[and] identifier[lon] <= identifier[self] . identifier[bbox] [ literal[int] ]
keyword[and] identifier[lat] >= identifier[self] . identifier[bbox] [ literal[int] ]
keyword[and] identifier[lat] <= identifier[self] . identifier[bbox] [ literal[int] ]
)
identifier[self] . identifier[station_codes] = identifier[list] ( identifier[filter] ( identifier[in_bbox] , identifier[self] . identifier[station_codes] ))
keyword[return] identifier[self] . identifier[station_codes] | def _get_station_codes(self, force=False):
"""
Gets and caches a list of station codes optionally within a bbox.
Will return the cached version if it exists unless force is True.
"""
if not force and self.station_codes is not None:
return self.station_codes # depends on [control=['if'], data=[]]
state_urls = self._get_state_urls()
# filter by bounding box against a shapefile
state_matches = None
if self.bbox:
with collection(os.path.join('resources', 'ne_50m_admin_1_states_provinces_lakes_shp.shp'), 'r') as c:
geom_matches = [x['properties'] for x in c.filter(bbox=self.bbox)]
state_matches = [x['postal'] if x['admin'] != 'Canada' else 'CN' for x in geom_matches] # depends on [control=['with'], data=['c']] # depends on [control=['if'], data=[]]
self.station_codes = []
for state_url in state_urls:
if state_matches is not None:
state_abbr = state_url.split('/')[-1].split('.')[0]
if state_abbr not in state_matches:
continue # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['state_matches']]
self.station_codes.extend(self._get_stations_for_state(state_url)) # depends on [control=['for'], data=['state_url']]
if self.bbox:
# retrieve metadata for all stations to properly filter them
metadata = self._get_metadata(self.station_codes)
parsed_metadata = self.parser._parse_metadata(metadata)
def in_bbox(code):
lat = parsed_metadata[code]['latitude']
lon = parsed_metadata[code]['longitude']
return lon >= self.bbox[0] and lon <= self.bbox[2] and (lat >= self.bbox[1]) and (lat <= self.bbox[3])
self.station_codes = list(filter(in_bbox, self.station_codes)) # depends on [control=['if'], data=[]]
return self.station_codes |
def get_money_with_currency_format(self, amount):
"""
:type amount: int or float or str
Usage:
>>> currency = Currency('USD')
>>> currency.get_money_with_currency_format(13)
>>> '$13 USD'
>>> currency.get_money_with_currency_format(13.99)
>>> '$13.99 USD'
>>> currency.get_money_with_currency_format('13,2313,33')
>>> '$13,2313,33 USD'
:rtype: str
"""
return self.money_formats[
self.get_money_currency()
]['money_with_currency_format'].format(amount=amount) | def function[get_money_with_currency_format, parameter[self, amount]]:
constant[
:type amount: int or float or str
Usage:
>>> currency = Currency('USD')
>>> currency.get_money_with_currency_format(13)
>>> '$13 USD'
>>> currency.get_money_with_currency_format(13.99)
>>> '$13.99 USD'
>>> currency.get_money_with_currency_format('13,2313,33')
>>> '$13,2313,33 USD'
:rtype: str
]
return[call[call[call[name[self].money_formats][call[name[self].get_money_currency, parameter[]]]][constant[money_with_currency_format]].format, parameter[]]] | keyword[def] identifier[get_money_with_currency_format] ( identifier[self] , identifier[amount] ):
literal[string]
keyword[return] identifier[self] . identifier[money_formats] [
identifier[self] . identifier[get_money_currency] ()
][ literal[string] ]. identifier[format] ( identifier[amount] = identifier[amount] ) | def get_money_with_currency_format(self, amount):
"""
:type amount: int or float or str
Usage:
>>> currency = Currency('USD')
>>> currency.get_money_with_currency_format(13)
>>> '$13 USD'
>>> currency.get_money_with_currency_format(13.99)
>>> '$13.99 USD'
>>> currency.get_money_with_currency_format('13,2313,33')
>>> '$13,2313,33 USD'
:rtype: str
"""
return self.money_formats[self.get_money_currency()]['money_with_currency_format'].format(amount=amount) |
def lz (inlist, score):
"""
Returns the z-score for a given input score, given that score and the
list from which that score came. Not appropriate for population calculations.
Usage: lz(inlist, score)
"""
z = (score-mean(inlist))/samplestdev(inlist)
return z | def function[lz, parameter[inlist, score]]:
constant[
Returns the z-score for a given input score, given that score and the
list from which that score came. Not appropriate for population calculations.
Usage: lz(inlist, score)
]
variable[z] assign[=] binary_operation[binary_operation[name[score] - call[name[mean], parameter[name[inlist]]]] / call[name[samplestdev], parameter[name[inlist]]]]
return[name[z]] | keyword[def] identifier[lz] ( identifier[inlist] , identifier[score] ):
literal[string]
identifier[z] =( identifier[score] - identifier[mean] ( identifier[inlist] ))/ identifier[samplestdev] ( identifier[inlist] )
keyword[return] identifier[z] | def lz(inlist, score):
"""
Returns the z-score for a given input score, given that score and the
list from which that score came. Not appropriate for population calculations.
Usage: lz(inlist, score)
"""
z = (score - mean(inlist)) / samplestdev(inlist)
return z |
def pairs(self, strand, cutoff=0.001, temp=37.0, pseudo=False,
material=None, dangles='some', sodium=1.0, magnesium=0.0):
'''Compute the pair probabilities for an ordered complex of strands.
Runs the \'pairs\' command.
:param strand: Strand on which to run pairs. Strands must be either
coral.DNA or coral.RNA).
:type strand: list
:param cutoff: Only probabilities above this cutoff appear in the
output.
:type cutoff: float
:param temp: Temperature setting for the computation. Negative values
are not allowed.
:type temp: float
:param pseudo: Enable pseudoknots.
:type pseudo: bool
:param material: The material setting to use in the computation. If set
to None (the default), the material type is inferred
from the strands. Other settings available: 'dna' for
DNA parameters, 'rna' for RNA (1995) parameters, and
'rna1999' for the RNA 1999 parameters.
:type material: str
:param dangles: How to treat dangles in the computation. From the
user guide: For \'none\': Dangle energies are ignored.
For \'some\': \'A dangle energy is incorporated for
each unpaired base flanking a duplex\'. For 'all': all
dangle energy is considered.
:type dangles: str
:param sodium: Sodium concentration in solution (molar), only applies
to DNA.
:type sodium: float
:param magnesium: Magnesium concentration in solution (molar), only
applies to DNA>
:type magnesium: float
:returns: The probability matrix, where the (i, j)th entry
is the probability that base i is bound to base j. The matrix
is augmented (it's N+1 by N+1, where N is the number of bases
in the sequence) with an (N+1)th column containing the
probability that each base is unpaired.
:rtype: numpy.array
'''
# Set the material (will be used to set command material flag)
material = self._set_material(strand, material)
# Set up command flags
cmd_args = self._prep_cmd_args(temp, dangles, material, pseudo, sodium,
magnesium, multi=False)
# Set up the input file and run the command. Note: no STDOUT
lines = [str(strand)]
self._run('pairs', cmd_args, lines)
# Read the output from file
ppairs = self._read_tempfile('pairs.ppairs')
data = re.search('\n\n\d*\n(.*)', ppairs, flags=re.DOTALL).group(1)
N = len(strand)
data_lines = [line.split('\t') for line in data.split('\n') if line]
prob_matrix = self._pairs_to_np(data_lines, N)
return prob_matrix | def function[pairs, parameter[self, strand, cutoff, temp, pseudo, material, dangles, sodium, magnesium]]:
constant[Compute the pair probabilities for an ordered complex of strands.
Runs the 'pairs' command.
:param strand: Strand on which to run pairs. Strands must be either
coral.DNA or coral.RNA).
:type strand: list
:param cutoff: Only probabilities above this cutoff appear in the
output.
:type cutoff: float
:param temp: Temperature setting for the computation. Negative values
are not allowed.
:type temp: float
:param pseudo: Enable pseudoknots.
:type pseudo: bool
:param material: The material setting to use in the computation. If set
to None (the default), the material type is inferred
from the strands. Other settings available: 'dna' for
DNA parameters, 'rna' for RNA (1995) parameters, and
'rna1999' for the RNA 1999 parameters.
:type material: str
:param dangles: How to treat dangles in the computation. From the
user guide: For 'none': Dangle energies are ignored.
For 'some': 'A dangle energy is incorporated for
each unpaired base flanking a duplex'. For 'all': all
dangle energy is considered.
:type dangles: str
:param sodium: Sodium concentration in solution (molar), only applies
to DNA.
:type sodium: float
:param magnesium: Magnesium concentration in solution (molar), only
applies to DNA>
:type magnesium: float
:returns: The probability matrix, where the (i, j)th entry
is the probability that base i is bound to base j. The matrix
is augmented (it's N+1 by N+1, where N is the number of bases
in the sequence) with an (N+1)th column containing the
probability that each base is unpaired.
:rtype: numpy.array
]
variable[material] assign[=] call[name[self]._set_material, parameter[name[strand], name[material]]]
variable[cmd_args] assign[=] call[name[self]._prep_cmd_args, parameter[name[temp], name[dangles], name[material], name[pseudo], name[sodium], name[magnesium]]]
variable[lines] assign[=] list[[<ast.Call object at 0x7da1b0527b80>]]
call[name[self]._run, parameter[constant[pairs], name[cmd_args], name[lines]]]
variable[ppairs] assign[=] call[name[self]._read_tempfile, parameter[constant[pairs.ppairs]]]
variable[data] assign[=] call[call[name[re].search, parameter[constant[
\d*
(.*)], name[ppairs]]].group, parameter[constant[1]]]
variable[N] assign[=] call[name[len], parameter[name[strand]]]
variable[data_lines] assign[=] <ast.ListComp object at 0x7da1b0525870>
variable[prob_matrix] assign[=] call[name[self]._pairs_to_np, parameter[name[data_lines], name[N]]]
return[name[prob_matrix]] | keyword[def] identifier[pairs] ( identifier[self] , identifier[strand] , identifier[cutoff] = literal[int] , identifier[temp] = literal[int] , identifier[pseudo] = keyword[False] ,
identifier[material] = keyword[None] , identifier[dangles] = literal[string] , identifier[sodium] = literal[int] , identifier[magnesium] = literal[int] ):
literal[string]
identifier[material] = identifier[self] . identifier[_set_material] ( identifier[strand] , identifier[material] )
identifier[cmd_args] = identifier[self] . identifier[_prep_cmd_args] ( identifier[temp] , identifier[dangles] , identifier[material] , identifier[pseudo] , identifier[sodium] ,
identifier[magnesium] , identifier[multi] = keyword[False] )
identifier[lines] =[ identifier[str] ( identifier[strand] )]
identifier[self] . identifier[_run] ( literal[string] , identifier[cmd_args] , identifier[lines] )
identifier[ppairs] = identifier[self] . identifier[_read_tempfile] ( literal[string] )
identifier[data] = identifier[re] . identifier[search] ( literal[string] , identifier[ppairs] , identifier[flags] = identifier[re] . identifier[DOTALL] ). identifier[group] ( literal[int] )
identifier[N] = identifier[len] ( identifier[strand] )
identifier[data_lines] =[ identifier[line] . identifier[split] ( literal[string] ) keyword[for] identifier[line] keyword[in] identifier[data] . identifier[split] ( literal[string] ) keyword[if] identifier[line] ]
identifier[prob_matrix] = identifier[self] . identifier[_pairs_to_np] ( identifier[data_lines] , identifier[N] )
keyword[return] identifier[prob_matrix] | def pairs(self, strand, cutoff=0.001, temp=37.0, pseudo=False, material=None, dangles='some', sodium=1.0, magnesium=0.0):
"""Compute the pair probabilities for an ordered complex of strands.
Runs the 'pairs' command.
:param strand: Strand on which to run pairs. Strands must be either
coral.DNA or coral.RNA).
:type strand: list
:param cutoff: Only probabilities above this cutoff appear in the
output.
:type cutoff: float
:param temp: Temperature setting for the computation. Negative values
are not allowed.
:type temp: float
:param pseudo: Enable pseudoknots.
:type pseudo: bool
:param material: The material setting to use in the computation. If set
to None (the default), the material type is inferred
from the strands. Other settings available: 'dna' for
DNA parameters, 'rna' for RNA (1995) parameters, and
'rna1999' for the RNA 1999 parameters.
:type material: str
:param dangles: How to treat dangles in the computation. From the
user guide: For 'none': Dangle energies are ignored.
For 'some': 'A dangle energy is incorporated for
each unpaired base flanking a duplex'. For 'all': all
dangle energy is considered.
:type dangles: str
:param sodium: Sodium concentration in solution (molar), only applies
to DNA.
:type sodium: float
:param magnesium: Magnesium concentration in solution (molar), only
applies to DNA>
:type magnesium: float
:returns: The probability matrix, where the (i, j)th entry
is the probability that base i is bound to base j. The matrix
is augmented (it's N+1 by N+1, where N is the number of bases
in the sequence) with an (N+1)th column containing the
probability that each base is unpaired.
:rtype: numpy.array
"""
# Set the material (will be used to set command material flag)
material = self._set_material(strand, material)
# Set up command flags
cmd_args = self._prep_cmd_args(temp, dangles, material, pseudo, sodium, magnesium, multi=False)
# Set up the input file and run the command. Note: no STDOUT
lines = [str(strand)]
self._run('pairs', cmd_args, lines)
# Read the output from file
ppairs = self._read_tempfile('pairs.ppairs')
data = re.search('\n\n\\d*\n(.*)', ppairs, flags=re.DOTALL).group(1)
N = len(strand)
data_lines = [line.split('\t') for line in data.split('\n') if line]
prob_matrix = self._pairs_to_np(data_lines, N)
return prob_matrix |
def get_session_conf_dir(self, cleanup=False):
"""
Tries to find the session configuration directory by looking in ~/.dnanexus_config/sessions/<PID>,
where <PID> is pid of the parent of this process, then its parent, and so on.
If none of those exist, the path for the immediate parent is given, even if it doesn't exist.
If *cleanup* is True, looks up and deletes all session configuration directories that belong to nonexistent
processes.
"""
sessions_dir = os.path.join(self._user_conf_dir, "sessions")
try:
from psutil import Process, pid_exists
if cleanup:
try:
session_dirs = os.listdir(sessions_dir)
except OSError as e:
# Silently skip cleanup and continue if we are unable to
# enumerate the session directories for any reason
# (including, most commonly, because the sessions dir
# doesn't exist)
session_dirs = []
for session_dir in session_dirs:
try:
session_pid = int(session_dir)
except ValueError:
# If dir name doesn't look like an int, leave it
# alone
continue
if not pid_exists(session_pid):
rmtree(os.path.join(sessions_dir, session_dir), ignore_errors=True)
parent_process = Process(os.getpid()).parent()
default_session_dir = os.path.join(sessions_dir, str(parent_process.pid))
while parent_process is not None and parent_process.pid != 0:
session_dir = os.path.join(sessions_dir, str(parent_process.pid))
if os.path.exists(session_dir):
return session_dir
parent_process = parent_process.parent()
return default_session_dir
except (ImportError, IOError, AttributeError) as e:
# We don't bundle psutil with Windows, so failure to import
# psutil would be expected.
if platform.system() != 'Windows':
warn(fill("Error while retrieving session configuration: " + format_exception(e)))
except Exception as e:
warn(fill("Unexpected error while retrieving session configuration: " + format_exception(e)))
return self._get_ppid_session_conf_dir(sessions_dir) | def function[get_session_conf_dir, parameter[self, cleanup]]:
constant[
Tries to find the session configuration directory by looking in ~/.dnanexus_config/sessions/<PID>,
where <PID> is pid of the parent of this process, then its parent, and so on.
If none of those exist, the path for the immediate parent is given, even if it doesn't exist.
If *cleanup* is True, looks up and deletes all session configuration directories that belong to nonexistent
processes.
]
variable[sessions_dir] assign[=] call[name[os].path.join, parameter[name[self]._user_conf_dir, constant[sessions]]]
<ast.Try object at 0x7da18dc9bbb0>
return[call[name[self]._get_ppid_session_conf_dir, parameter[name[sessions_dir]]]] | keyword[def] identifier[get_session_conf_dir] ( identifier[self] , identifier[cleanup] = keyword[False] ):
literal[string]
identifier[sessions_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[_user_conf_dir] , literal[string] )
keyword[try] :
keyword[from] identifier[psutil] keyword[import] identifier[Process] , identifier[pid_exists]
keyword[if] identifier[cleanup] :
keyword[try] :
identifier[session_dirs] = identifier[os] . identifier[listdir] ( identifier[sessions_dir] )
keyword[except] identifier[OSError] keyword[as] identifier[e] :
identifier[session_dirs] =[]
keyword[for] identifier[session_dir] keyword[in] identifier[session_dirs] :
keyword[try] :
identifier[session_pid] = identifier[int] ( identifier[session_dir] )
keyword[except] identifier[ValueError] :
keyword[continue]
keyword[if] keyword[not] identifier[pid_exists] ( identifier[session_pid] ):
identifier[rmtree] ( identifier[os] . identifier[path] . identifier[join] ( identifier[sessions_dir] , identifier[session_dir] ), identifier[ignore_errors] = keyword[True] )
identifier[parent_process] = identifier[Process] ( identifier[os] . identifier[getpid] ()). identifier[parent] ()
identifier[default_session_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[sessions_dir] , identifier[str] ( identifier[parent_process] . identifier[pid] ))
keyword[while] identifier[parent_process] keyword[is] keyword[not] keyword[None] keyword[and] identifier[parent_process] . identifier[pid] != literal[int] :
identifier[session_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[sessions_dir] , identifier[str] ( identifier[parent_process] . identifier[pid] ))
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[session_dir] ):
keyword[return] identifier[session_dir]
identifier[parent_process] = identifier[parent_process] . identifier[parent] ()
keyword[return] identifier[default_session_dir]
keyword[except] ( identifier[ImportError] , identifier[IOError] , identifier[AttributeError] ) keyword[as] identifier[e] :
keyword[if] identifier[platform] . identifier[system] ()!= literal[string] :
identifier[warn] ( identifier[fill] ( literal[string] + identifier[format_exception] ( identifier[e] )))
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[warn] ( identifier[fill] ( literal[string] + identifier[format_exception] ( identifier[e] )))
keyword[return] identifier[self] . identifier[_get_ppid_session_conf_dir] ( identifier[sessions_dir] ) | def get_session_conf_dir(self, cleanup=False):
"""
Tries to find the session configuration directory by looking in ~/.dnanexus_config/sessions/<PID>,
where <PID> is pid of the parent of this process, then its parent, and so on.
If none of those exist, the path for the immediate parent is given, even if it doesn't exist.
If *cleanup* is True, looks up and deletes all session configuration directories that belong to nonexistent
processes.
"""
sessions_dir = os.path.join(self._user_conf_dir, 'sessions')
try:
from psutil import Process, pid_exists
if cleanup:
try:
session_dirs = os.listdir(sessions_dir) # depends on [control=['try'], data=[]]
except OSError as e:
# Silently skip cleanup and continue if we are unable to
# enumerate the session directories for any reason
# (including, most commonly, because the sessions dir
# doesn't exist)
session_dirs = [] # depends on [control=['except'], data=[]]
for session_dir in session_dirs:
try:
session_pid = int(session_dir) # depends on [control=['try'], data=[]]
except ValueError:
# If dir name doesn't look like an int, leave it
# alone
continue # depends on [control=['except'], data=[]]
if not pid_exists(session_pid):
rmtree(os.path.join(sessions_dir, session_dir), ignore_errors=True) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['session_dir']] # depends on [control=['if'], data=[]]
parent_process = Process(os.getpid()).parent()
default_session_dir = os.path.join(sessions_dir, str(parent_process.pid))
while parent_process is not None and parent_process.pid != 0:
session_dir = os.path.join(sessions_dir, str(parent_process.pid))
if os.path.exists(session_dir):
return session_dir # depends on [control=['if'], data=[]]
parent_process = parent_process.parent() # depends on [control=['while'], data=[]]
return default_session_dir # depends on [control=['try'], data=[]]
except (ImportError, IOError, AttributeError) as e:
# We don't bundle psutil with Windows, so failure to import
# psutil would be expected.
if platform.system() != 'Windows':
warn(fill('Error while retrieving session configuration: ' + format_exception(e))) # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['e']]
except Exception as e:
warn(fill('Unexpected error while retrieving session configuration: ' + format_exception(e))) # depends on [control=['except'], data=['e']]
return self._get_ppid_session_conf_dir(sessions_dir) |
def save_metadata(self, file_path):
"""Saves a json file of the search result metadata.
Saves a json file of the search result metadata from :class:`api.results`.metadata.
Args:
file_path (str):
Path to the json file to save metadata to.
"""
data = self.metadata
with open(file_path, 'w') as out_file:
json.dump(data, out_file) | def function[save_metadata, parameter[self, file_path]]:
constant[Saves a json file of the search result metadata.
Saves a json file of the search result metadata from :class:`api.results`.metadata.
Args:
file_path (str):
Path to the json file to save metadata to.
]
variable[data] assign[=] name[self].metadata
with call[name[open], parameter[name[file_path], constant[w]]] begin[:]
call[name[json].dump, parameter[name[data], name[out_file]]] | keyword[def] identifier[save_metadata] ( identifier[self] , identifier[file_path] ):
literal[string]
identifier[data] = identifier[self] . identifier[metadata]
keyword[with] identifier[open] ( identifier[file_path] , literal[string] ) keyword[as] identifier[out_file] :
identifier[json] . identifier[dump] ( identifier[data] , identifier[out_file] ) | def save_metadata(self, file_path):
"""Saves a json file of the search result metadata.
Saves a json file of the search result metadata from :class:`api.results`.metadata.
Args:
file_path (str):
Path to the json file to save metadata to.
"""
data = self.metadata
with open(file_path, 'w') as out_file:
json.dump(data, out_file) # depends on [control=['with'], data=['out_file']] |
def redshift_from_comoving_volume(vc, **kwargs):
r"""Returns the redshift from the given comoving volume.
Parameters
----------
vc : float
The comoving volume, in units of cubed Mpc.
\**kwargs :
All other keyword args are passed to :py:func:`get_cosmology` to
select a cosmology. If none provided, will use
:py:attr:`DEFAULT_COSMOLOGY`.
Returns
-------
float :
The redshift at the given comoving volume.
"""
cosmology = get_cosmology(**kwargs)
return z_at_value(cosmology.comoving_volume, vc, units.Mpc**3) | def function[redshift_from_comoving_volume, parameter[vc]]:
constant[Returns the redshift from the given comoving volume.
Parameters
----------
vc : float
The comoving volume, in units of cubed Mpc.
\**kwargs :
All other keyword args are passed to :py:func:`get_cosmology` to
select a cosmology. If none provided, will use
:py:attr:`DEFAULT_COSMOLOGY`.
Returns
-------
float :
The redshift at the given comoving volume.
]
variable[cosmology] assign[=] call[name[get_cosmology], parameter[]]
return[call[name[z_at_value], parameter[name[cosmology].comoving_volume, name[vc], binary_operation[name[units].Mpc ** constant[3]]]]] | keyword[def] identifier[redshift_from_comoving_volume] ( identifier[vc] ,** identifier[kwargs] ):
literal[string]
identifier[cosmology] = identifier[get_cosmology] (** identifier[kwargs] )
keyword[return] identifier[z_at_value] ( identifier[cosmology] . identifier[comoving_volume] , identifier[vc] , identifier[units] . identifier[Mpc] ** literal[int] ) | def redshift_from_comoving_volume(vc, **kwargs):
"""Returns the redshift from the given comoving volume.
Parameters
----------
vc : float
The comoving volume, in units of cubed Mpc.
\\**kwargs :
All other keyword args are passed to :py:func:`get_cosmology` to
select a cosmology. If none provided, will use
:py:attr:`DEFAULT_COSMOLOGY`.
Returns
-------
float :
The redshift at the given comoving volume.
"""
cosmology = get_cosmology(**kwargs)
return z_at_value(cosmology.comoving_volume, vc, units.Mpc ** 3) |
def get_epoch_namespace_lifetime_grace_period( block_height, namespace_id ):
"""
what's the namespace lifetime grace period for this epoch?
"""
epoch_config = get_epoch_config( block_height )
if epoch_config['namespaces'].has_key(namespace_id):
return epoch_config['namespaces'][namespace_id]['NAMESPACE_LIFETIME_GRACE_PERIOD']
else:
return epoch_config['namespaces']['*']['NAMESPACE_LIFETIME_GRACE_PERIOD'] | def function[get_epoch_namespace_lifetime_grace_period, parameter[block_height, namespace_id]]:
constant[
what's the namespace lifetime grace period for this epoch?
]
variable[epoch_config] assign[=] call[name[get_epoch_config], parameter[name[block_height]]]
if call[call[name[epoch_config]][constant[namespaces]].has_key, parameter[name[namespace_id]]] begin[:]
return[call[call[call[name[epoch_config]][constant[namespaces]]][name[namespace_id]]][constant[NAMESPACE_LIFETIME_GRACE_PERIOD]]] | keyword[def] identifier[get_epoch_namespace_lifetime_grace_period] ( identifier[block_height] , identifier[namespace_id] ):
literal[string]
identifier[epoch_config] = identifier[get_epoch_config] ( identifier[block_height] )
keyword[if] identifier[epoch_config] [ literal[string] ]. identifier[has_key] ( identifier[namespace_id] ):
keyword[return] identifier[epoch_config] [ literal[string] ][ identifier[namespace_id] ][ literal[string] ]
keyword[else] :
keyword[return] identifier[epoch_config] [ literal[string] ][ literal[string] ][ literal[string] ] | def get_epoch_namespace_lifetime_grace_period(block_height, namespace_id):
"""
what's the namespace lifetime grace period for this epoch?
"""
epoch_config = get_epoch_config(block_height)
if epoch_config['namespaces'].has_key(namespace_id):
return epoch_config['namespaces'][namespace_id]['NAMESPACE_LIFETIME_GRACE_PERIOD'] # depends on [control=['if'], data=[]]
else:
return epoch_config['namespaces']['*']['NAMESPACE_LIFETIME_GRACE_PERIOD'] |
def ucnstring_to_python(ucn_string):
"""
Return string with Unicode UCN (e.g. "U+4E00") to native Python Unicode
(u'\\u4e00').
"""
res = re.findall("U\+[0-9a-fA-F]*", ucn_string)
for r in res:
ucn_string = ucn_string.replace(text_type(r), text_type(ucn_to_unicode(r)))
ucn_string = ucn_string.encode('utf-8')
assert isinstance(ucn_string, bytes)
return ucn_string | def function[ucnstring_to_python, parameter[ucn_string]]:
constant[
Return string with Unicode UCN (e.g. "U+4E00") to native Python Unicode
(u'\u4e00').
]
variable[res] assign[=] call[name[re].findall, parameter[constant[U\+[0-9a-fA-F]*], name[ucn_string]]]
for taget[name[r]] in starred[name[res]] begin[:]
variable[ucn_string] assign[=] call[name[ucn_string].replace, parameter[call[name[text_type], parameter[name[r]]], call[name[text_type], parameter[call[name[ucn_to_unicode], parameter[name[r]]]]]]]
variable[ucn_string] assign[=] call[name[ucn_string].encode, parameter[constant[utf-8]]]
assert[call[name[isinstance], parameter[name[ucn_string], name[bytes]]]]
return[name[ucn_string]] | keyword[def] identifier[ucnstring_to_python] ( identifier[ucn_string] ):
literal[string]
identifier[res] = identifier[re] . identifier[findall] ( literal[string] , identifier[ucn_string] )
keyword[for] identifier[r] keyword[in] identifier[res] :
identifier[ucn_string] = identifier[ucn_string] . identifier[replace] ( identifier[text_type] ( identifier[r] ), identifier[text_type] ( identifier[ucn_to_unicode] ( identifier[r] )))
identifier[ucn_string] = identifier[ucn_string] . identifier[encode] ( literal[string] )
keyword[assert] identifier[isinstance] ( identifier[ucn_string] , identifier[bytes] )
keyword[return] identifier[ucn_string] | def ucnstring_to_python(ucn_string):
"""
Return string with Unicode UCN (e.g. "U+4E00") to native Python Unicode
(u'\\u4e00').
"""
res = re.findall('U\\+[0-9a-fA-F]*', ucn_string)
for r in res:
ucn_string = ucn_string.replace(text_type(r), text_type(ucn_to_unicode(r))) # depends on [control=['for'], data=['r']]
ucn_string = ucn_string.encode('utf-8')
assert isinstance(ucn_string, bytes)
return ucn_string |
def fourier(x, N):
"""Fourier approximation with N terms"""
term = 0.
for n in range(1, N, 2):
term += (1. / n) * math.sin(n * math.pi * x / L)
return (4. / (math.pi)) * term | def function[fourier, parameter[x, N]]:
constant[Fourier approximation with N terms]
variable[term] assign[=] constant[0.0]
for taget[name[n]] in starred[call[name[range], parameter[constant[1], name[N], constant[2]]]] begin[:]
<ast.AugAssign object at 0x7da1b207d210>
return[binary_operation[binary_operation[constant[4.0] / name[math].pi] * name[term]]] | keyword[def] identifier[fourier] ( identifier[x] , identifier[N] ):
literal[string]
identifier[term] = literal[int]
keyword[for] identifier[n] keyword[in] identifier[range] ( literal[int] , identifier[N] , literal[int] ):
identifier[term] +=( literal[int] / identifier[n] )* identifier[math] . identifier[sin] ( identifier[n] * identifier[math] . identifier[pi] * identifier[x] / identifier[L] )
keyword[return] ( literal[int] /( identifier[math] . identifier[pi] ))* identifier[term] | def fourier(x, N):
"""Fourier approximation with N terms"""
term = 0.0
for n in range(1, N, 2):
term += 1.0 / n * math.sin(n * math.pi * x / L) # depends on [control=['for'], data=['n']]
return 4.0 / math.pi * term |
def pause(self, length=None, **kwargs):
"""
Create a <Pause> element
:param length: Length in seconds to pause
:param kwargs: additional attributes
:returns: <Pause> element
"""
return self.nest(Pause(length=length, **kwargs)) | def function[pause, parameter[self, length]]:
constant[
Create a <Pause> element
:param length: Length in seconds to pause
:param kwargs: additional attributes
:returns: <Pause> element
]
return[call[name[self].nest, parameter[call[name[Pause], parameter[]]]]] | keyword[def] identifier[pause] ( identifier[self] , identifier[length] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[self] . identifier[nest] ( identifier[Pause] ( identifier[length] = identifier[length] ,** identifier[kwargs] )) | def pause(self, length=None, **kwargs):
"""
Create a <Pause> element
:param length: Length in seconds to pause
:param kwargs: additional attributes
:returns: <Pause> element
"""
return self.nest(Pause(length=length, **kwargs)) |
def _post_execute(self):
"""
Should be called when executing the user requested command has been
completed. It will do some maintenance and write out the final result
to the todo.txt file.
"""
if self.todolist.dirty:
# do not archive when the value of the filename is an empty string
# (i.e. explicitly left empty in the configuration
if self.do_archive and config().archive():
self._archive()
elif config().archive() and self.backup:
archive = _retrieve_archive()[0]
self.backup.add_archive(archive)
self._post_archive_action()
if config().keep_sorted():
from topydo.commands.SortCommand import SortCommand
self._execute(SortCommand, [])
if self.backup:
self.backup.save(self.todolist)
self.todofile.write(self.todolist.print_todos())
self.todolist.dirty = False
self.backup = None | def function[_post_execute, parameter[self]]:
constant[
Should be called when executing the user requested command has been
completed. It will do some maintenance and write out the final result
to the todo.txt file.
]
if name[self].todolist.dirty begin[:]
if <ast.BoolOp object at 0x7da20c7c80d0> begin[:]
call[name[self]._archive, parameter[]]
call[name[self]._post_archive_action, parameter[]]
if call[call[name[config], parameter[]].keep_sorted, parameter[]] begin[:]
from relative_module[topydo.commands.SortCommand] import module[SortCommand]
call[name[self]._execute, parameter[name[SortCommand], list[[]]]]
if name[self].backup begin[:]
call[name[self].backup.save, parameter[name[self].todolist]]
call[name[self].todofile.write, parameter[call[name[self].todolist.print_todos, parameter[]]]]
name[self].todolist.dirty assign[=] constant[False]
name[self].backup assign[=] constant[None] | keyword[def] identifier[_post_execute] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[todolist] . identifier[dirty] :
keyword[if] identifier[self] . identifier[do_archive] keyword[and] identifier[config] (). identifier[archive] ():
identifier[self] . identifier[_archive] ()
keyword[elif] identifier[config] (). identifier[archive] () keyword[and] identifier[self] . identifier[backup] :
identifier[archive] = identifier[_retrieve_archive] ()[ literal[int] ]
identifier[self] . identifier[backup] . identifier[add_archive] ( identifier[archive] )
identifier[self] . identifier[_post_archive_action] ()
keyword[if] identifier[config] (). identifier[keep_sorted] ():
keyword[from] identifier[topydo] . identifier[commands] . identifier[SortCommand] keyword[import] identifier[SortCommand]
identifier[self] . identifier[_execute] ( identifier[SortCommand] ,[])
keyword[if] identifier[self] . identifier[backup] :
identifier[self] . identifier[backup] . identifier[save] ( identifier[self] . identifier[todolist] )
identifier[self] . identifier[todofile] . identifier[write] ( identifier[self] . identifier[todolist] . identifier[print_todos] ())
identifier[self] . identifier[todolist] . identifier[dirty] = keyword[False]
identifier[self] . identifier[backup] = keyword[None] | def _post_execute(self):
"""
Should be called when executing the user requested command has been
completed. It will do some maintenance and write out the final result
to the todo.txt file.
"""
if self.todolist.dirty:
# do not archive when the value of the filename is an empty string
# (i.e. explicitly left empty in the configuration
if self.do_archive and config().archive():
self._archive() # depends on [control=['if'], data=[]]
elif config().archive() and self.backup:
archive = _retrieve_archive()[0]
self.backup.add_archive(archive) # depends on [control=['if'], data=[]]
self._post_archive_action()
if config().keep_sorted():
from topydo.commands.SortCommand import SortCommand
self._execute(SortCommand, []) # depends on [control=['if'], data=[]]
if self.backup:
self.backup.save(self.todolist) # depends on [control=['if'], data=[]]
self.todofile.write(self.todolist.print_todos())
self.todolist.dirty = False # depends on [control=['if'], data=[]]
self.backup = None |
def parse_source_file(file_name):
"""
Parses the AST of Python file for lines containing
references to the argparse module.
returns the collection of ast objects found.
Example client code:
1. parser = ArgumentParser(desc="My help Message")
2. parser.add_argument('filename', help="Name of the file to load")
3. parser.add_argument('-f', '--format', help='Format of output \nOptions: ['md', 'html']
4. args = parser.parse_args()
Variables:
* nodes Primary syntax tree object
* argparse_assignments The assignment of the ArgumentParser (line 1 in example code)
* add_arg_assignments Calls to add_argument() (lines 2-3 in example code)
* parser_var_name The instance variable of the ArgumentParser (line 1 in example code)
* ast_source The curated collection of all parser related nodes in the client code
"""
with open(file_name, 'r') as f:
s = f.read()
nodes = ast.parse(s)
module_imports = get_nodes_by_instance_type(nodes, _ast.Import)
specific_imports = get_nodes_by_instance_type(nodes, _ast.ImportFrom)
assignment_objs = get_nodes_by_instance_type(nodes, _ast.Assign)
call_objects = get_nodes_by_instance_type(nodes, _ast.Call)
argparse_assignments = get_nodes_by_containing_attr(assignment_objs, 'ArgumentParser')
group_arg_assignments = get_nodes_by_containing_attr(assignment_objs, 'add_argument_group')
add_arg_assignments = get_nodes_by_containing_attr(call_objects, 'add_argument')
parse_args_assignment = get_nodes_by_containing_attr(call_objects, 'parse_args')
# there are cases where we have custom argparsers, such as subclassing ArgumentParser. The above
# will fail on this. However, we can use the methods known to ArgumentParser to do a duck-type like
# approach to finding what is the arg parser
if not argparse_assignments:
aa_references = set([i.func.value.id for i in chain(add_arg_assignments, parse_args_assignment)])
argparse_like_objects = [getattr(i.value.func, 'id', None) for p_ref in aa_references for i in get_nodes_by_containing_attr(assignment_objs, p_ref)]
argparse_like_objects = filter(None, argparse_like_objects)
argparse_assignments = [get_nodes_by_containing_attr(assignment_objs, i) for i in argparse_like_objects]
# for now, we just choose one
try:
argparse_assignments = argparse_assignments[0]
except IndexError:
pass
# get things that are assigned inside ArgumentParser or its methods
argparse_assigned_variables = get_node_args_and_keywords(assignment_objs, argparse_assignments, 'ArgumentParser')
add_arg_assigned_variables = get_node_args_and_keywords(assignment_objs, add_arg_assignments, 'add_argument')
parse_args_assigned_variables = get_node_args_and_keywords(assignment_objs, parse_args_assignment, 'parse_args')
ast_argparse_source = chain(
module_imports,
specific_imports,
argparse_assigned_variables,
add_arg_assigned_variables,
parse_args_assigned_variables,
argparse_assignments,
group_arg_assignments,
add_arg_assignments,
)
return ast_argparse_source | def function[parse_source_file, parameter[file_name]]:
constant[
Parses the AST of Python file for lines containing
references to the argparse module.
returns the collection of ast objects found.
Example client code:
1. parser = ArgumentParser(desc="My help Message")
2. parser.add_argument('filename', help="Name of the file to load")
3. parser.add_argument('-f', '--format', help='Format of output
Options: ['md', 'html']
4. args = parser.parse_args()
Variables:
* nodes Primary syntax tree object
* argparse_assignments The assignment of the ArgumentParser (line 1 in example code)
* add_arg_assignments Calls to add_argument() (lines 2-3 in example code)
* parser_var_name The instance variable of the ArgumentParser (line 1 in example code)
* ast_source The curated collection of all parser related nodes in the client code
]
with call[name[open], parameter[name[file_name], constant[r]]] begin[:]
variable[s] assign[=] call[name[f].read, parameter[]]
variable[nodes] assign[=] call[name[ast].parse, parameter[name[s]]]
variable[module_imports] assign[=] call[name[get_nodes_by_instance_type], parameter[name[nodes], name[_ast].Import]]
variable[specific_imports] assign[=] call[name[get_nodes_by_instance_type], parameter[name[nodes], name[_ast].ImportFrom]]
variable[assignment_objs] assign[=] call[name[get_nodes_by_instance_type], parameter[name[nodes], name[_ast].Assign]]
variable[call_objects] assign[=] call[name[get_nodes_by_instance_type], parameter[name[nodes], name[_ast].Call]]
variable[argparse_assignments] assign[=] call[name[get_nodes_by_containing_attr], parameter[name[assignment_objs], constant[ArgumentParser]]]
variable[group_arg_assignments] assign[=] call[name[get_nodes_by_containing_attr], parameter[name[assignment_objs], constant[add_argument_group]]]
variable[add_arg_assignments] assign[=] call[name[get_nodes_by_containing_attr], parameter[name[call_objects], constant[add_argument]]]
variable[parse_args_assignment] assign[=] call[name[get_nodes_by_containing_attr], parameter[name[call_objects], constant[parse_args]]]
if <ast.UnaryOp object at 0x7da20e9b2d40> begin[:]
variable[aa_references] assign[=] call[name[set], parameter[<ast.ListComp object at 0x7da20e9b1b70>]]
variable[argparse_like_objects] assign[=] <ast.ListComp object at 0x7da20e9b2380>
variable[argparse_like_objects] assign[=] call[name[filter], parameter[constant[None], name[argparse_like_objects]]]
variable[argparse_assignments] assign[=] <ast.ListComp object at 0x7da2041da8c0>
<ast.Try object at 0x7da2041db4f0>
variable[argparse_assigned_variables] assign[=] call[name[get_node_args_and_keywords], parameter[name[assignment_objs], name[argparse_assignments], constant[ArgumentParser]]]
variable[add_arg_assigned_variables] assign[=] call[name[get_node_args_and_keywords], parameter[name[assignment_objs], name[add_arg_assignments], constant[add_argument]]]
variable[parse_args_assigned_variables] assign[=] call[name[get_node_args_and_keywords], parameter[name[assignment_objs], name[parse_args_assignment], constant[parse_args]]]
variable[ast_argparse_source] assign[=] call[name[chain], parameter[name[module_imports], name[specific_imports], name[argparse_assigned_variables], name[add_arg_assigned_variables], name[parse_args_assigned_variables], name[argparse_assignments], name[group_arg_assignments], name[add_arg_assignments]]]
return[name[ast_argparse_source]] | keyword[def] identifier[parse_source_file] ( identifier[file_name] ):
literal[string]
keyword[with] identifier[open] ( identifier[file_name] , literal[string] ) keyword[as] identifier[f] :
identifier[s] = identifier[f] . identifier[read] ()
identifier[nodes] = identifier[ast] . identifier[parse] ( identifier[s] )
identifier[module_imports] = identifier[get_nodes_by_instance_type] ( identifier[nodes] , identifier[_ast] . identifier[Import] )
identifier[specific_imports] = identifier[get_nodes_by_instance_type] ( identifier[nodes] , identifier[_ast] . identifier[ImportFrom] )
identifier[assignment_objs] = identifier[get_nodes_by_instance_type] ( identifier[nodes] , identifier[_ast] . identifier[Assign] )
identifier[call_objects] = identifier[get_nodes_by_instance_type] ( identifier[nodes] , identifier[_ast] . identifier[Call] )
identifier[argparse_assignments] = identifier[get_nodes_by_containing_attr] ( identifier[assignment_objs] , literal[string] )
identifier[group_arg_assignments] = identifier[get_nodes_by_containing_attr] ( identifier[assignment_objs] , literal[string] )
identifier[add_arg_assignments] = identifier[get_nodes_by_containing_attr] ( identifier[call_objects] , literal[string] )
identifier[parse_args_assignment] = identifier[get_nodes_by_containing_attr] ( identifier[call_objects] , literal[string] )
keyword[if] keyword[not] identifier[argparse_assignments] :
identifier[aa_references] = identifier[set] ([ identifier[i] . identifier[func] . identifier[value] . identifier[id] keyword[for] identifier[i] keyword[in] identifier[chain] ( identifier[add_arg_assignments] , identifier[parse_args_assignment] )])
identifier[argparse_like_objects] =[ identifier[getattr] ( identifier[i] . identifier[value] . identifier[func] , literal[string] , keyword[None] ) keyword[for] identifier[p_ref] keyword[in] identifier[aa_references] keyword[for] identifier[i] keyword[in] identifier[get_nodes_by_containing_attr] ( identifier[assignment_objs] , identifier[p_ref] )]
identifier[argparse_like_objects] = identifier[filter] ( keyword[None] , identifier[argparse_like_objects] )
identifier[argparse_assignments] =[ identifier[get_nodes_by_containing_attr] ( identifier[assignment_objs] , identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[argparse_like_objects] ]
keyword[try] :
identifier[argparse_assignments] = identifier[argparse_assignments] [ literal[int] ]
keyword[except] identifier[IndexError] :
keyword[pass]
identifier[argparse_assigned_variables] = identifier[get_node_args_and_keywords] ( identifier[assignment_objs] , identifier[argparse_assignments] , literal[string] )
identifier[add_arg_assigned_variables] = identifier[get_node_args_and_keywords] ( identifier[assignment_objs] , identifier[add_arg_assignments] , literal[string] )
identifier[parse_args_assigned_variables] = identifier[get_node_args_and_keywords] ( identifier[assignment_objs] , identifier[parse_args_assignment] , literal[string] )
identifier[ast_argparse_source] = identifier[chain] (
identifier[module_imports] ,
identifier[specific_imports] ,
identifier[argparse_assigned_variables] ,
identifier[add_arg_assigned_variables] ,
identifier[parse_args_assigned_variables] ,
identifier[argparse_assignments] ,
identifier[group_arg_assignments] ,
identifier[add_arg_assignments] ,
)
keyword[return] identifier[ast_argparse_source] | def parse_source_file(file_name):
"""
Parses the AST of Python file for lines containing
references to the argparse module.
returns the collection of ast objects found.
Example client code:
1. parser = ArgumentParser(desc="My help Message")
2. parser.add_argument('filename', help="Name of the file to load")
3. parser.add_argument('-f', '--format', help='Format of output
Options: ['md', 'html']
4. args = parser.parse_args()
Variables:
* nodes Primary syntax tree object
* argparse_assignments The assignment of the ArgumentParser (line 1 in example code)
* add_arg_assignments Calls to add_argument() (lines 2-3 in example code)
* parser_var_name The instance variable of the ArgumentParser (line 1 in example code)
* ast_source The curated collection of all parser related nodes in the client code
"""
with open(file_name, 'r') as f:
s = f.read() # depends on [control=['with'], data=['f']]
nodes = ast.parse(s)
module_imports = get_nodes_by_instance_type(nodes, _ast.Import)
specific_imports = get_nodes_by_instance_type(nodes, _ast.ImportFrom)
assignment_objs = get_nodes_by_instance_type(nodes, _ast.Assign)
call_objects = get_nodes_by_instance_type(nodes, _ast.Call)
argparse_assignments = get_nodes_by_containing_attr(assignment_objs, 'ArgumentParser')
group_arg_assignments = get_nodes_by_containing_attr(assignment_objs, 'add_argument_group')
add_arg_assignments = get_nodes_by_containing_attr(call_objects, 'add_argument')
parse_args_assignment = get_nodes_by_containing_attr(call_objects, 'parse_args')
# there are cases where we have custom argparsers, such as subclassing ArgumentParser. The above
# will fail on this. However, we can use the methods known to ArgumentParser to do a duck-type like
# approach to finding what is the arg parser
if not argparse_assignments:
aa_references = set([i.func.value.id for i in chain(add_arg_assignments, parse_args_assignment)])
argparse_like_objects = [getattr(i.value.func, 'id', None) for p_ref in aa_references for i in get_nodes_by_containing_attr(assignment_objs, p_ref)]
argparse_like_objects = filter(None, argparse_like_objects)
argparse_assignments = [get_nodes_by_containing_attr(assignment_objs, i) for i in argparse_like_objects]
# for now, we just choose one
try:
argparse_assignments = argparse_assignments[0] # depends on [control=['try'], data=[]]
except IndexError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
# get things that are assigned inside ArgumentParser or its methods
argparse_assigned_variables = get_node_args_and_keywords(assignment_objs, argparse_assignments, 'ArgumentParser')
add_arg_assigned_variables = get_node_args_and_keywords(assignment_objs, add_arg_assignments, 'add_argument')
parse_args_assigned_variables = get_node_args_and_keywords(assignment_objs, parse_args_assignment, 'parse_args')
ast_argparse_source = chain(module_imports, specific_imports, argparse_assigned_variables, add_arg_assigned_variables, parse_args_assigned_variables, argparse_assignments, group_arg_assignments, add_arg_assignments)
return ast_argparse_source |
def calc_mass_from_fit_and_conv_factor(A, Damping, ConvFactor):
"""
Calculates mass from the A parameter from fitting, the damping from
fitting in angular units and the Conversion factor calculated from
comparing the ratio of the z signal and first harmonic of z.
Parameters
----------
A : float
A factor calculated from fitting
Damping : float
damping in radians/second calcualted from fitting
ConvFactor : float
conversion factor between volts and nms
Returns
-------
mass : float
mass in kgs
"""
T0 = 300
mFromA = 2*Boltzmann*T0/(pi*A) * ConvFactor**2 * Damping
return mFromA | def function[calc_mass_from_fit_and_conv_factor, parameter[A, Damping, ConvFactor]]:
constant[
Calculates mass from the A parameter from fitting, the damping from
fitting in angular units and the Conversion factor calculated from
comparing the ratio of the z signal and first harmonic of z.
Parameters
----------
A : float
A factor calculated from fitting
Damping : float
damping in radians/second calcualted from fitting
ConvFactor : float
conversion factor between volts and nms
Returns
-------
mass : float
mass in kgs
]
variable[T0] assign[=] constant[300]
variable[mFromA] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[constant[2] * name[Boltzmann]] * name[T0]] / binary_operation[name[pi] * name[A]]] * binary_operation[name[ConvFactor] ** constant[2]]] * name[Damping]]
return[name[mFromA]] | keyword[def] identifier[calc_mass_from_fit_and_conv_factor] ( identifier[A] , identifier[Damping] , identifier[ConvFactor] ):
literal[string]
identifier[T0] = literal[int]
identifier[mFromA] = literal[int] * identifier[Boltzmann] * identifier[T0] /( identifier[pi] * identifier[A] )* identifier[ConvFactor] ** literal[int] * identifier[Damping]
keyword[return] identifier[mFromA] | def calc_mass_from_fit_and_conv_factor(A, Damping, ConvFactor):
"""
Calculates mass from the A parameter from fitting, the damping from
fitting in angular units and the Conversion factor calculated from
comparing the ratio of the z signal and first harmonic of z.
Parameters
----------
A : float
A factor calculated from fitting
Damping : float
damping in radians/second calcualted from fitting
ConvFactor : float
conversion factor between volts and nms
Returns
-------
mass : float
mass in kgs
"""
T0 = 300
mFromA = 2 * Boltzmann * T0 / (pi * A) * ConvFactor ** 2 * Damping
return mFromA |
def find_all(self, pattern):
"""
Return the subset of this RcParams dictionary whose keys match,
using :func:`re.search`, the given ``pattern``.
Parameters
----------
pattern: str
pattern as suitable for re.compile
Returns
-------
RcParams
RcParams instance with entries that match the given `pattern`
Notes
-----
Changes to the returned dictionary are (different from
:meth:`find_and_replace` are *not* propagated to the parent RcParams
dictionary.
See Also
--------
find_and_replace"""
pattern_re = re.compile(pattern)
ret = RcParams()
ret.defaultParams = self.defaultParams
ret.update((key, value) for key, value in self.items()
if pattern_re.search(key))
return ret | def function[find_all, parameter[self, pattern]]:
constant[
Return the subset of this RcParams dictionary whose keys match,
using :func:`re.search`, the given ``pattern``.
Parameters
----------
pattern: str
pattern as suitable for re.compile
Returns
-------
RcParams
RcParams instance with entries that match the given `pattern`
Notes
-----
Changes to the returned dictionary are (different from
:meth:`find_and_replace` are *not* propagated to the parent RcParams
dictionary.
See Also
--------
find_and_replace]
variable[pattern_re] assign[=] call[name[re].compile, parameter[name[pattern]]]
variable[ret] assign[=] call[name[RcParams], parameter[]]
name[ret].defaultParams assign[=] name[self].defaultParams
call[name[ret].update, parameter[<ast.GeneratorExp object at 0x7da18c4cd420>]]
return[name[ret]] | keyword[def] identifier[find_all] ( identifier[self] , identifier[pattern] ):
literal[string]
identifier[pattern_re] = identifier[re] . identifier[compile] ( identifier[pattern] )
identifier[ret] = identifier[RcParams] ()
identifier[ret] . identifier[defaultParams] = identifier[self] . identifier[defaultParams]
identifier[ret] . identifier[update] (( identifier[key] , identifier[value] ) keyword[for] identifier[key] , identifier[value] keyword[in] identifier[self] . identifier[items] ()
keyword[if] identifier[pattern_re] . identifier[search] ( identifier[key] ))
keyword[return] identifier[ret] | def find_all(self, pattern):
"""
Return the subset of this RcParams dictionary whose keys match,
using :func:`re.search`, the given ``pattern``.
Parameters
----------
pattern: str
pattern as suitable for re.compile
Returns
-------
RcParams
RcParams instance with entries that match the given `pattern`
Notes
-----
Changes to the returned dictionary are (different from
:meth:`find_and_replace` are *not* propagated to the parent RcParams
dictionary.
See Also
--------
find_and_replace"""
pattern_re = re.compile(pattern)
ret = RcParams()
ret.defaultParams = self.defaultParams
ret.update(((key, value) for (key, value) in self.items() if pattern_re.search(key)))
return ret |
def subtract_params(param_list_left, param_list_right):
"""Subtract two lists of parameters
:param param_list_left: list of numpy arrays
:param param_list_right: list of numpy arrays
:return: list of numpy arrays
"""
res = []
for x, y in zip(param_list_left, param_list_right):
res.append(x - y)
return res | def function[subtract_params, parameter[param_list_left, param_list_right]]:
constant[Subtract two lists of parameters
:param param_list_left: list of numpy arrays
:param param_list_right: list of numpy arrays
:return: list of numpy arrays
]
variable[res] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b1882320>, <ast.Name object at 0x7da1b1883f40>]]] in starred[call[name[zip], parameter[name[param_list_left], name[param_list_right]]]] begin[:]
call[name[res].append, parameter[binary_operation[name[x] - name[y]]]]
return[name[res]] | keyword[def] identifier[subtract_params] ( identifier[param_list_left] , identifier[param_list_right] ):
literal[string]
identifier[res] =[]
keyword[for] identifier[x] , identifier[y] keyword[in] identifier[zip] ( identifier[param_list_left] , identifier[param_list_right] ):
identifier[res] . identifier[append] ( identifier[x] - identifier[y] )
keyword[return] identifier[res] | def subtract_params(param_list_left, param_list_right):
"""Subtract two lists of parameters
:param param_list_left: list of numpy arrays
:param param_list_right: list of numpy arrays
:return: list of numpy arrays
"""
res = []
for (x, y) in zip(param_list_left, param_list_right):
res.append(x - y) # depends on [control=['for'], data=[]]
return res |
def masked_dilated_self_attention_1d(q,
k,
v,
query_block_size=64,
memory_block_size=64,
gap_size=2,
num_memory_blocks=2,
name=None):
"""Dilated self-attention. TODO(avaswani): Try it and write a paper on it.
Args:
q: a Tensor with shape [batch, heads, length, depth]
k: a Tensor with shape [batch, heads, length, depth]
v: a Tensor with shape [batch, heads, length, depth]
query_block_size: an integer
memory_block_size: an integer indicating how much to look left.
gap_size: an integer indicating the gap size
num_memory_blocks: how many memory blocks to look at to the left. Each will
be separated by gap_size.
name: an optional string
Returns:
a Tensor of shape [batch, heads, length, depth]
"""
with tf.variable_scope(
name, default_name="masked_dilated_self_attention_1d", values=[q, k, v]):
v_list_shape = v.get_shape().as_list()
assert v_list_shape == k.shape.as_list(), "K and V depths must be equal"
v_shape = common_layers.shape_list(v)
depth_v = v_shape[3]
batch_size = v_shape[0]
num_heads = v_shape[1]
original_length = common_layers.shape_list(q)[2]
# Pad query, key, value to ensure multiple of corresponding lengths.
def pad_to_multiple(x, pad_length):
x_length = common_layers.shape_list(x)[2]
return tf.pad(x, [[0, 0], [0, 0], [0, -x_length % pad_length], [0, 0]])
def pad_l(x, left_pad_length):
return tf.pad(x, [[0, 0], [0, 0], [left_pad_length, 0], [0, 0]])
q = pad_to_multiple(q, query_block_size)
v = pad_to_multiple(v, query_block_size)
k = pad_to_multiple(k, query_block_size)
# Set up query blocks.
new_q_shape = common_layers.shape_list(q)
q = reshape_by_blocks(q, new_q_shape, query_block_size)
# Set up key and value windows.
self_k_part = reshape_by_blocks(k, new_q_shape, query_block_size)
self_v_part = reshape_by_blocks(v, new_q_shape, query_block_size)
k_v_padding = (gap_size + memory_block_size) * num_memory_blocks
k = pad_l(k, k_v_padding)
v = pad_l(v, k_v_padding)
# Get gather indices.
index_length = (new_q_shape[2] - query_block_size + memory_block_size)
indices = tf.range(0, index_length, delta=1, name="index_range")
indices = tf.reshape(indices, [1, -1, 1]) # [1, length, 1] for convs
kernel = tf.expand_dims(tf.eye(memory_block_size), axis=1)
gather_indices = tf.nn.conv1d(
tf.cast(indices, tf.float32),
kernel,
query_block_size,
padding="VALID",
name="gather_conv")
gather_indices = tf.squeeze(tf.cast(gather_indices, tf.int32), axis=0)
# Get left and right memory blocks for each query.
# [length, batch, heads, dim]
k_t = tf.transpose(k, [2, 0, 1, 3])
v_t = tf.transpose(v, [2, 0, 1, 3])
k_unmasked_windows = gather_dilated_memory_blocks(
k_t, num_memory_blocks, gap_size, query_block_size, memory_block_size,
gather_indices)
v_unmasked_windows = gather_dilated_memory_blocks(
v_t, num_memory_blocks, gap_size, query_block_size, memory_block_size,
gather_indices)
# Combine memory windows.
block_q_shape = common_layers.shape_list(q)
masked_attention_bias = tf.tile(
tf.expand_dims(attention_bias_lower_triangle(query_block_size), axis=0),
[block_q_shape[0], block_q_shape[1], block_q_shape[2], 1, 1])
padding_attention_bias = tf.expand_dims(
embedding_to_padding(k_unmasked_windows) * -1e9, axis=-2)
padding_attention_bias = tf.tile(padding_attention_bias,
[1, 1, 1, query_block_size, 1])
attention_bias = tf.concat(
[masked_attention_bias, padding_attention_bias], axis=-1)
# combine memory windows
k_windows = tf.concat([self_k_part, k_unmasked_windows], 3)
v_windows = tf.concat([self_v_part, v_unmasked_windows], 3)
output = dot_product_attention(
q,
k_windows,
v_windows,
attention_bias,
dropout_rate=0.,
name="dilated_1d",
make_image_summary=False)
output = tf.reshape(output, [batch_size, num_heads, -1, depth_v])
# Remove the padding if introduced.
output = tf.slice(output, [0, 0, 0, 0], [-1, -1, original_length, -1])
output.set_shape(v_list_shape)
return output | def function[masked_dilated_self_attention_1d, parameter[q, k, v, query_block_size, memory_block_size, gap_size, num_memory_blocks, name]]:
constant[Dilated self-attention. TODO(avaswani): Try it and write a paper on it.
Args:
q: a Tensor with shape [batch, heads, length, depth]
k: a Tensor with shape [batch, heads, length, depth]
v: a Tensor with shape [batch, heads, length, depth]
query_block_size: an integer
memory_block_size: an integer indicating how much to look left.
gap_size: an integer indicating the gap size
num_memory_blocks: how many memory blocks to look at to the left. Each will
be separated by gap_size.
name: an optional string
Returns:
a Tensor of shape [batch, heads, length, depth]
]
with call[name[tf].variable_scope, parameter[name[name]]] begin[:]
variable[v_list_shape] assign[=] call[call[name[v].get_shape, parameter[]].as_list, parameter[]]
assert[compare[name[v_list_shape] equal[==] call[name[k].shape.as_list, parameter[]]]]
variable[v_shape] assign[=] call[name[common_layers].shape_list, parameter[name[v]]]
variable[depth_v] assign[=] call[name[v_shape]][constant[3]]
variable[batch_size] assign[=] call[name[v_shape]][constant[0]]
variable[num_heads] assign[=] call[name[v_shape]][constant[1]]
variable[original_length] assign[=] call[call[name[common_layers].shape_list, parameter[name[q]]]][constant[2]]
def function[pad_to_multiple, parameter[x, pad_length]]:
variable[x_length] assign[=] call[call[name[common_layers].shape_list, parameter[name[x]]]][constant[2]]
return[call[name[tf].pad, parameter[name[x], list[[<ast.List object at 0x7da2045657e0>, <ast.List object at 0x7da204564df0>, <ast.List object at 0x7da204565270>, <ast.List object at 0x7da204566110>]]]]]
def function[pad_l, parameter[x, left_pad_length]]:
return[call[name[tf].pad, parameter[name[x], list[[<ast.List object at 0x7da2045641c0>, <ast.List object at 0x7da204566cb0>, <ast.List object at 0x7da2045670d0>, <ast.List object at 0x7da204566050>]]]]]
variable[q] assign[=] call[name[pad_to_multiple], parameter[name[q], name[query_block_size]]]
variable[v] assign[=] call[name[pad_to_multiple], parameter[name[v], name[query_block_size]]]
variable[k] assign[=] call[name[pad_to_multiple], parameter[name[k], name[query_block_size]]]
variable[new_q_shape] assign[=] call[name[common_layers].shape_list, parameter[name[q]]]
variable[q] assign[=] call[name[reshape_by_blocks], parameter[name[q], name[new_q_shape], name[query_block_size]]]
variable[self_k_part] assign[=] call[name[reshape_by_blocks], parameter[name[k], name[new_q_shape], name[query_block_size]]]
variable[self_v_part] assign[=] call[name[reshape_by_blocks], parameter[name[v], name[new_q_shape], name[query_block_size]]]
variable[k_v_padding] assign[=] binary_operation[binary_operation[name[gap_size] + name[memory_block_size]] * name[num_memory_blocks]]
variable[k] assign[=] call[name[pad_l], parameter[name[k], name[k_v_padding]]]
variable[v] assign[=] call[name[pad_l], parameter[name[v], name[k_v_padding]]]
variable[index_length] assign[=] binary_operation[binary_operation[call[name[new_q_shape]][constant[2]] - name[query_block_size]] + name[memory_block_size]]
variable[indices] assign[=] call[name[tf].range, parameter[constant[0], name[index_length]]]
variable[indices] assign[=] call[name[tf].reshape, parameter[name[indices], list[[<ast.Constant object at 0x7da2045662f0>, <ast.UnaryOp object at 0x7da204567eb0>, <ast.Constant object at 0x7da204567790>]]]]
variable[kernel] assign[=] call[name[tf].expand_dims, parameter[call[name[tf].eye, parameter[name[memory_block_size]]]]]
variable[gather_indices] assign[=] call[name[tf].nn.conv1d, parameter[call[name[tf].cast, parameter[name[indices], name[tf].float32]], name[kernel], name[query_block_size]]]
variable[gather_indices] assign[=] call[name[tf].squeeze, parameter[call[name[tf].cast, parameter[name[gather_indices], name[tf].int32]]]]
variable[k_t] assign[=] call[name[tf].transpose, parameter[name[k], list[[<ast.Constant object at 0x7da2045643a0>, <ast.Constant object at 0x7da204564ee0>, <ast.Constant object at 0x7da204566d10>, <ast.Constant object at 0x7da204566bc0>]]]]
variable[v_t] assign[=] call[name[tf].transpose, parameter[name[v], list[[<ast.Constant object at 0x7da204566770>, <ast.Constant object at 0x7da204566950>, <ast.Constant object at 0x7da204564340>, <ast.Constant object at 0x7da204566440>]]]]
variable[k_unmasked_windows] assign[=] call[name[gather_dilated_memory_blocks], parameter[name[k_t], name[num_memory_blocks], name[gap_size], name[query_block_size], name[memory_block_size], name[gather_indices]]]
variable[v_unmasked_windows] assign[=] call[name[gather_dilated_memory_blocks], parameter[name[v_t], name[num_memory_blocks], name[gap_size], name[query_block_size], name[memory_block_size], name[gather_indices]]]
variable[block_q_shape] assign[=] call[name[common_layers].shape_list, parameter[name[q]]]
variable[masked_attention_bias] assign[=] call[name[tf].tile, parameter[call[name[tf].expand_dims, parameter[call[name[attention_bias_lower_triangle], parameter[name[query_block_size]]]]], list[[<ast.Subscript object at 0x7da1b2059870>, <ast.Subscript object at 0x7da1b205b670>, <ast.Subscript object at 0x7da20e9b16f0>, <ast.Constant object at 0x7da20e9b01c0>, <ast.Constant object at 0x7da20e9b3d30>]]]]
variable[padding_attention_bias] assign[=] call[name[tf].expand_dims, parameter[binary_operation[call[name[embedding_to_padding], parameter[name[k_unmasked_windows]]] * <ast.UnaryOp object at 0x7da20e9b3370>]]]
variable[padding_attention_bias] assign[=] call[name[tf].tile, parameter[name[padding_attention_bias], list[[<ast.Constant object at 0x7da20e9b1a80>, <ast.Constant object at 0x7da20e9b35b0>, <ast.Constant object at 0x7da20e9b1300>, <ast.Name object at 0x7da20e9b30a0>, <ast.Constant object at 0x7da20e9b1510>]]]]
variable[attention_bias] assign[=] call[name[tf].concat, parameter[list[[<ast.Name object at 0x7da20e9b1de0>, <ast.Name object at 0x7da20e9b0370>]]]]
variable[k_windows] assign[=] call[name[tf].concat, parameter[list[[<ast.Name object at 0x7da20e9b0a30>, <ast.Name object at 0x7da20e9b0070>]], constant[3]]]
variable[v_windows] assign[=] call[name[tf].concat, parameter[list[[<ast.Name object at 0x7da20e9b0160>, <ast.Name object at 0x7da20e9b2380>]], constant[3]]]
variable[output] assign[=] call[name[dot_product_attention], parameter[name[q], name[k_windows], name[v_windows], name[attention_bias]]]
variable[output] assign[=] call[name[tf].reshape, parameter[name[output], list[[<ast.Name object at 0x7da20e9b3820>, <ast.Name object at 0x7da20e9b0c40>, <ast.UnaryOp object at 0x7da20e9b3c10>, <ast.Name object at 0x7da20e9b2ad0>]]]]
variable[output] assign[=] call[name[tf].slice, parameter[name[output], list[[<ast.Constant object at 0x7da20e9b1c60>, <ast.Constant object at 0x7da20e9b2440>, <ast.Constant object at 0x7da20e9b3e80>, <ast.Constant object at 0x7da20e9b1f30>]], list[[<ast.UnaryOp object at 0x7da20e9b2a10>, <ast.UnaryOp object at 0x7da20e9b1780>, <ast.Name object at 0x7da20e9b1fc0>, <ast.UnaryOp object at 0x7da20e9b1930>]]]]
call[name[output].set_shape, parameter[name[v_list_shape]]]
return[name[output]] | keyword[def] identifier[masked_dilated_self_attention_1d] ( identifier[q] ,
identifier[k] ,
identifier[v] ,
identifier[query_block_size] = literal[int] ,
identifier[memory_block_size] = literal[int] ,
identifier[gap_size] = literal[int] ,
identifier[num_memory_blocks] = literal[int] ,
identifier[name] = keyword[None] ):
literal[string]
keyword[with] identifier[tf] . identifier[variable_scope] (
identifier[name] , identifier[default_name] = literal[string] , identifier[values] =[ identifier[q] , identifier[k] , identifier[v] ]):
identifier[v_list_shape] = identifier[v] . identifier[get_shape] (). identifier[as_list] ()
keyword[assert] identifier[v_list_shape] == identifier[k] . identifier[shape] . identifier[as_list] (), literal[string]
identifier[v_shape] = identifier[common_layers] . identifier[shape_list] ( identifier[v] )
identifier[depth_v] = identifier[v_shape] [ literal[int] ]
identifier[batch_size] = identifier[v_shape] [ literal[int] ]
identifier[num_heads] = identifier[v_shape] [ literal[int] ]
identifier[original_length] = identifier[common_layers] . identifier[shape_list] ( identifier[q] )[ literal[int] ]
keyword[def] identifier[pad_to_multiple] ( identifier[x] , identifier[pad_length] ):
identifier[x_length] = identifier[common_layers] . identifier[shape_list] ( identifier[x] )[ literal[int] ]
keyword[return] identifier[tf] . identifier[pad] ( identifier[x] ,[[ literal[int] , literal[int] ],[ literal[int] , literal[int] ],[ literal[int] ,- identifier[x_length] % identifier[pad_length] ],[ literal[int] , literal[int] ]])
keyword[def] identifier[pad_l] ( identifier[x] , identifier[left_pad_length] ):
keyword[return] identifier[tf] . identifier[pad] ( identifier[x] ,[[ literal[int] , literal[int] ],[ literal[int] , literal[int] ],[ identifier[left_pad_length] , literal[int] ],[ literal[int] , literal[int] ]])
identifier[q] = identifier[pad_to_multiple] ( identifier[q] , identifier[query_block_size] )
identifier[v] = identifier[pad_to_multiple] ( identifier[v] , identifier[query_block_size] )
identifier[k] = identifier[pad_to_multiple] ( identifier[k] , identifier[query_block_size] )
identifier[new_q_shape] = identifier[common_layers] . identifier[shape_list] ( identifier[q] )
identifier[q] = identifier[reshape_by_blocks] ( identifier[q] , identifier[new_q_shape] , identifier[query_block_size] )
identifier[self_k_part] = identifier[reshape_by_blocks] ( identifier[k] , identifier[new_q_shape] , identifier[query_block_size] )
identifier[self_v_part] = identifier[reshape_by_blocks] ( identifier[v] , identifier[new_q_shape] , identifier[query_block_size] )
identifier[k_v_padding] =( identifier[gap_size] + identifier[memory_block_size] )* identifier[num_memory_blocks]
identifier[k] = identifier[pad_l] ( identifier[k] , identifier[k_v_padding] )
identifier[v] = identifier[pad_l] ( identifier[v] , identifier[k_v_padding] )
identifier[index_length] =( identifier[new_q_shape] [ literal[int] ]- identifier[query_block_size] + identifier[memory_block_size] )
identifier[indices] = identifier[tf] . identifier[range] ( literal[int] , identifier[index_length] , identifier[delta] = literal[int] , identifier[name] = literal[string] )
identifier[indices] = identifier[tf] . identifier[reshape] ( identifier[indices] ,[ literal[int] ,- literal[int] , literal[int] ])
identifier[kernel] = identifier[tf] . identifier[expand_dims] ( identifier[tf] . identifier[eye] ( identifier[memory_block_size] ), identifier[axis] = literal[int] )
identifier[gather_indices] = identifier[tf] . identifier[nn] . identifier[conv1d] (
identifier[tf] . identifier[cast] ( identifier[indices] , identifier[tf] . identifier[float32] ),
identifier[kernel] ,
identifier[query_block_size] ,
identifier[padding] = literal[string] ,
identifier[name] = literal[string] )
identifier[gather_indices] = identifier[tf] . identifier[squeeze] ( identifier[tf] . identifier[cast] ( identifier[gather_indices] , identifier[tf] . identifier[int32] ), identifier[axis] = literal[int] )
identifier[k_t] = identifier[tf] . identifier[transpose] ( identifier[k] ,[ literal[int] , literal[int] , literal[int] , literal[int] ])
identifier[v_t] = identifier[tf] . identifier[transpose] ( identifier[v] ,[ literal[int] , literal[int] , literal[int] , literal[int] ])
identifier[k_unmasked_windows] = identifier[gather_dilated_memory_blocks] (
identifier[k_t] , identifier[num_memory_blocks] , identifier[gap_size] , identifier[query_block_size] , identifier[memory_block_size] ,
identifier[gather_indices] )
identifier[v_unmasked_windows] = identifier[gather_dilated_memory_blocks] (
identifier[v_t] , identifier[num_memory_blocks] , identifier[gap_size] , identifier[query_block_size] , identifier[memory_block_size] ,
identifier[gather_indices] )
identifier[block_q_shape] = identifier[common_layers] . identifier[shape_list] ( identifier[q] )
identifier[masked_attention_bias] = identifier[tf] . identifier[tile] (
identifier[tf] . identifier[expand_dims] ( identifier[attention_bias_lower_triangle] ( identifier[query_block_size] ), identifier[axis] = literal[int] ),
[ identifier[block_q_shape] [ literal[int] ], identifier[block_q_shape] [ literal[int] ], identifier[block_q_shape] [ literal[int] ], literal[int] , literal[int] ])
identifier[padding_attention_bias] = identifier[tf] . identifier[expand_dims] (
identifier[embedding_to_padding] ( identifier[k_unmasked_windows] )*- literal[int] , identifier[axis] =- literal[int] )
identifier[padding_attention_bias] = identifier[tf] . identifier[tile] ( identifier[padding_attention_bias] ,
[ literal[int] , literal[int] , literal[int] , identifier[query_block_size] , literal[int] ])
identifier[attention_bias] = identifier[tf] . identifier[concat] (
[ identifier[masked_attention_bias] , identifier[padding_attention_bias] ], identifier[axis] =- literal[int] )
identifier[k_windows] = identifier[tf] . identifier[concat] ([ identifier[self_k_part] , identifier[k_unmasked_windows] ], literal[int] )
identifier[v_windows] = identifier[tf] . identifier[concat] ([ identifier[self_v_part] , identifier[v_unmasked_windows] ], literal[int] )
identifier[output] = identifier[dot_product_attention] (
identifier[q] ,
identifier[k_windows] ,
identifier[v_windows] ,
identifier[attention_bias] ,
identifier[dropout_rate] = literal[int] ,
identifier[name] = literal[string] ,
identifier[make_image_summary] = keyword[False] )
identifier[output] = identifier[tf] . identifier[reshape] ( identifier[output] ,[ identifier[batch_size] , identifier[num_heads] ,- literal[int] , identifier[depth_v] ])
identifier[output] = identifier[tf] . identifier[slice] ( identifier[output] ,[ literal[int] , literal[int] , literal[int] , literal[int] ],[- literal[int] ,- literal[int] , identifier[original_length] ,- literal[int] ])
identifier[output] . identifier[set_shape] ( identifier[v_list_shape] )
keyword[return] identifier[output] | def masked_dilated_self_attention_1d(q, k, v, query_block_size=64, memory_block_size=64, gap_size=2, num_memory_blocks=2, name=None):
"""Dilated self-attention. TODO(avaswani): Try it and write a paper on it.
Args:
q: a Tensor with shape [batch, heads, length, depth]
k: a Tensor with shape [batch, heads, length, depth]
v: a Tensor with shape [batch, heads, length, depth]
query_block_size: an integer
memory_block_size: an integer indicating how much to look left.
gap_size: an integer indicating the gap size
num_memory_blocks: how many memory blocks to look at to the left. Each will
be separated by gap_size.
name: an optional string
Returns:
a Tensor of shape [batch, heads, length, depth]
"""
with tf.variable_scope(name, default_name='masked_dilated_self_attention_1d', values=[q, k, v]):
v_list_shape = v.get_shape().as_list()
assert v_list_shape == k.shape.as_list(), 'K and V depths must be equal'
v_shape = common_layers.shape_list(v)
depth_v = v_shape[3]
batch_size = v_shape[0]
num_heads = v_shape[1]
original_length = common_layers.shape_list(q)[2]
# Pad query, key, value to ensure multiple of corresponding lengths.
def pad_to_multiple(x, pad_length):
x_length = common_layers.shape_list(x)[2]
return tf.pad(x, [[0, 0], [0, 0], [0, -x_length % pad_length], [0, 0]])
def pad_l(x, left_pad_length):
return tf.pad(x, [[0, 0], [0, 0], [left_pad_length, 0], [0, 0]])
q = pad_to_multiple(q, query_block_size)
v = pad_to_multiple(v, query_block_size)
k = pad_to_multiple(k, query_block_size)
# Set up query blocks.
new_q_shape = common_layers.shape_list(q)
q = reshape_by_blocks(q, new_q_shape, query_block_size)
# Set up key and value windows.
self_k_part = reshape_by_blocks(k, new_q_shape, query_block_size)
self_v_part = reshape_by_blocks(v, new_q_shape, query_block_size)
k_v_padding = (gap_size + memory_block_size) * num_memory_blocks
k = pad_l(k, k_v_padding)
v = pad_l(v, k_v_padding)
# Get gather indices.
index_length = new_q_shape[2] - query_block_size + memory_block_size
indices = tf.range(0, index_length, delta=1, name='index_range')
indices = tf.reshape(indices, [1, -1, 1]) # [1, length, 1] for convs
kernel = tf.expand_dims(tf.eye(memory_block_size), axis=1)
gather_indices = tf.nn.conv1d(tf.cast(indices, tf.float32), kernel, query_block_size, padding='VALID', name='gather_conv')
gather_indices = tf.squeeze(tf.cast(gather_indices, tf.int32), axis=0)
# Get left and right memory blocks for each query.
# [length, batch, heads, dim]
k_t = tf.transpose(k, [2, 0, 1, 3])
v_t = tf.transpose(v, [2, 0, 1, 3])
k_unmasked_windows = gather_dilated_memory_blocks(k_t, num_memory_blocks, gap_size, query_block_size, memory_block_size, gather_indices)
v_unmasked_windows = gather_dilated_memory_blocks(v_t, num_memory_blocks, gap_size, query_block_size, memory_block_size, gather_indices)
# Combine memory windows.
block_q_shape = common_layers.shape_list(q)
masked_attention_bias = tf.tile(tf.expand_dims(attention_bias_lower_triangle(query_block_size), axis=0), [block_q_shape[0], block_q_shape[1], block_q_shape[2], 1, 1])
padding_attention_bias = tf.expand_dims(embedding_to_padding(k_unmasked_windows) * -1000000000.0, axis=-2)
padding_attention_bias = tf.tile(padding_attention_bias, [1, 1, 1, query_block_size, 1])
attention_bias = tf.concat([masked_attention_bias, padding_attention_bias], axis=-1)
# combine memory windows
k_windows = tf.concat([self_k_part, k_unmasked_windows], 3)
v_windows = tf.concat([self_v_part, v_unmasked_windows], 3)
output = dot_product_attention(q, k_windows, v_windows, attention_bias, dropout_rate=0.0, name='dilated_1d', make_image_summary=False)
output = tf.reshape(output, [batch_size, num_heads, -1, depth_v])
# Remove the padding if introduced.
output = tf.slice(output, [0, 0, 0, 0], [-1, -1, original_length, -1])
output.set_shape(v_list_shape)
return output # depends on [control=['with'], data=[]] |
def generate_hashfile(directory, blacklist=_BLACKLIST):
"""
Compute checksum for each file in `directory`, with exception of files
specified in `blacklist`.
Args:
directory (str): Absolute or relative path to the directory.
blacklist (list/set/tuple): List of blacklisted filenames. Only
filenames are checked, not paths!
Returns:
str: Content of hashfile as it is specified in ABNF specification for \
project.
"""
checksums = generate_checksums(directory, blacklist)
out = ""
for fn, checksum in sorted(checksums.items()):
out += "%s %s\n" % (checksum, fn)
return out | def function[generate_hashfile, parameter[directory, blacklist]]:
constant[
Compute checksum for each file in `directory`, with exception of files
specified in `blacklist`.
Args:
directory (str): Absolute or relative path to the directory.
blacklist (list/set/tuple): List of blacklisted filenames. Only
filenames are checked, not paths!
Returns:
str: Content of hashfile as it is specified in ABNF specification for project.
]
variable[checksums] assign[=] call[name[generate_checksums], parameter[name[directory], name[blacklist]]]
variable[out] assign[=] constant[]
for taget[tuple[[<ast.Name object at 0x7da2049639a0>, <ast.Name object at 0x7da204960580>]]] in starred[call[name[sorted], parameter[call[name[checksums].items, parameter[]]]]] begin[:]
<ast.AugAssign object at 0x7da2049629b0>
return[name[out]] | keyword[def] identifier[generate_hashfile] ( identifier[directory] , identifier[blacklist] = identifier[_BLACKLIST] ):
literal[string]
identifier[checksums] = identifier[generate_checksums] ( identifier[directory] , identifier[blacklist] )
identifier[out] = literal[string]
keyword[for] identifier[fn] , identifier[checksum] keyword[in] identifier[sorted] ( identifier[checksums] . identifier[items] ()):
identifier[out] += literal[string] %( identifier[checksum] , identifier[fn] )
keyword[return] identifier[out] | def generate_hashfile(directory, blacklist=_BLACKLIST):
"""
Compute checksum for each file in `directory`, with exception of files
specified in `blacklist`.
Args:
directory (str): Absolute or relative path to the directory.
blacklist (list/set/tuple): List of blacklisted filenames. Only
filenames are checked, not paths!
Returns:
str: Content of hashfile as it is specified in ABNF specification for project.
"""
checksums = generate_checksums(directory, blacklist)
out = ''
for (fn, checksum) in sorted(checksums.items()):
out += '%s %s\n' % (checksum, fn) # depends on [control=['for'], data=[]]
return out |
def values(self):
"""Gets the parameter values
:returns: dict of inputs:
| *'nfft'*: int -- length, in samples, of FFT chunks
| *'window'*: str -- name of window to apply to FFT chunks
| *'overlap'*: float -- percent overlap of windows
"""
self.vals['nfft'] = self.ui.nfftSpnbx.value()
self.vals['window'] = str(self.ui.windowCmbx.currentText()).lower()
self.vals['overlap'] = self.ui.overlapSpnbx.value()
return self.vals | def function[values, parameter[self]]:
constant[Gets the parameter values
:returns: dict of inputs:
| *'nfft'*: int -- length, in samples, of FFT chunks
| *'window'*: str -- name of window to apply to FFT chunks
| *'overlap'*: float -- percent overlap of windows
]
call[name[self].vals][constant[nfft]] assign[=] call[name[self].ui.nfftSpnbx.value, parameter[]]
call[name[self].vals][constant[window]] assign[=] call[call[name[str], parameter[call[name[self].ui.windowCmbx.currentText, parameter[]]]].lower, parameter[]]
call[name[self].vals][constant[overlap]] assign[=] call[name[self].ui.overlapSpnbx.value, parameter[]]
return[name[self].vals] | keyword[def] identifier[values] ( identifier[self] ):
literal[string]
identifier[self] . identifier[vals] [ literal[string] ]= identifier[self] . identifier[ui] . identifier[nfftSpnbx] . identifier[value] ()
identifier[self] . identifier[vals] [ literal[string] ]= identifier[str] ( identifier[self] . identifier[ui] . identifier[windowCmbx] . identifier[currentText] ()). identifier[lower] ()
identifier[self] . identifier[vals] [ literal[string] ]= identifier[self] . identifier[ui] . identifier[overlapSpnbx] . identifier[value] ()
keyword[return] identifier[self] . identifier[vals] | def values(self):
"""Gets the parameter values
:returns: dict of inputs:
| *'nfft'*: int -- length, in samples, of FFT chunks
| *'window'*: str -- name of window to apply to FFT chunks
| *'overlap'*: float -- percent overlap of windows
"""
self.vals['nfft'] = self.ui.nfftSpnbx.value()
self.vals['window'] = str(self.ui.windowCmbx.currentText()).lower()
self.vals['overlap'] = self.ui.overlapSpnbx.value()
return self.vals |
def calcRandomAnchors(args, inworld=True):
"""
Generates a list of random anchor points such that all circles will fit
in the world, given the specified radius and worldsize.
The number of anchors to generate is given by nPatches
"""
anchors = []
rng = (args.patchRadius, args.worldSize - args.patchRadius)
if not inworld:
rng = (0, args.worldSize)
for i in range(args.nPatches):
anchors.append((random.randrange(rng[0], rng[1]),
random.randrange(rng[0], rng[1])))
return anchors | def function[calcRandomAnchors, parameter[args, inworld]]:
constant[
Generates a list of random anchor points such that all circles will fit
in the world, given the specified radius and worldsize.
The number of anchors to generate is given by nPatches
]
variable[anchors] assign[=] list[[]]
variable[rng] assign[=] tuple[[<ast.Attribute object at 0x7da1b179c0a0>, <ast.BinOp object at 0x7da1b179d8a0>]]
if <ast.UnaryOp object at 0x7da1b179c730> begin[:]
variable[rng] assign[=] tuple[[<ast.Constant object at 0x7da1b179c250>, <ast.Attribute object at 0x7da1b179cb50>]]
for taget[name[i]] in starred[call[name[range], parameter[name[args].nPatches]]] begin[:]
call[name[anchors].append, parameter[tuple[[<ast.Call object at 0x7da1b179d660>, <ast.Call object at 0x7da1b179c520>]]]]
return[name[anchors]] | keyword[def] identifier[calcRandomAnchors] ( identifier[args] , identifier[inworld] = keyword[True] ):
literal[string]
identifier[anchors] =[]
identifier[rng] =( identifier[args] . identifier[patchRadius] , identifier[args] . identifier[worldSize] - identifier[args] . identifier[patchRadius] )
keyword[if] keyword[not] identifier[inworld] :
identifier[rng] =( literal[int] , identifier[args] . identifier[worldSize] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[args] . identifier[nPatches] ):
identifier[anchors] . identifier[append] (( identifier[random] . identifier[randrange] ( identifier[rng] [ literal[int] ], identifier[rng] [ literal[int] ]),
identifier[random] . identifier[randrange] ( identifier[rng] [ literal[int] ], identifier[rng] [ literal[int] ])))
keyword[return] identifier[anchors] | def calcRandomAnchors(args, inworld=True):
"""
Generates a list of random anchor points such that all circles will fit
in the world, given the specified radius and worldsize.
The number of anchors to generate is given by nPatches
"""
anchors = []
rng = (args.patchRadius, args.worldSize - args.patchRadius)
if not inworld:
rng = (0, args.worldSize) # depends on [control=['if'], data=[]]
for i in range(args.nPatches):
anchors.append((random.randrange(rng[0], rng[1]), random.randrange(rng[0], rng[1]))) # depends on [control=['for'], data=[]]
return anchors |
def _import_ucsmsdk(self):
"""Imports the Ucsm SDK module.
This module is not installed as part of the normal Neutron
distributions. It is imported dynamically in this module so that
the import can be mocked, allowing unit testing without requiring
the installation of UcsSdk.
"""
# Check if SSL certificate checking has been disabled.
# If so, warn the user before proceeding.
if not CONF.ml2_cisco_ucsm.ucsm_https_verify:
LOG.warning(const.SSL_WARNING)
# Monkey patch the UCS sdk version of urllib2 to disable
# https verify if required.
from networking_cisco.ml2_drivers.ucsm import ucs_urllib2
ucsmsdkhandle = importutils.import_module('UcsSdk.UcsHandle')
ucsmsdkhandle.urllib2 = ucs_urllib2
ucsmsdk = importutils.import_module('UcsSdk')
return ucsmsdk | def function[_import_ucsmsdk, parameter[self]]:
constant[Imports the Ucsm SDK module.
This module is not installed as part of the normal Neutron
distributions. It is imported dynamically in this module so that
the import can be mocked, allowing unit testing without requiring
the installation of UcsSdk.
]
if <ast.UnaryOp object at 0x7da2041db010> begin[:]
call[name[LOG].warning, parameter[name[const].SSL_WARNING]]
from relative_module[networking_cisco.ml2_drivers.ucsm] import module[ucs_urllib2]
variable[ucsmsdkhandle] assign[=] call[name[importutils].import_module, parameter[constant[UcsSdk.UcsHandle]]]
name[ucsmsdkhandle].urllib2 assign[=] name[ucs_urllib2]
variable[ucsmsdk] assign[=] call[name[importutils].import_module, parameter[constant[UcsSdk]]]
return[name[ucsmsdk]] | keyword[def] identifier[_import_ucsmsdk] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[CONF] . identifier[ml2_cisco_ucsm] . identifier[ucsm_https_verify] :
identifier[LOG] . identifier[warning] ( identifier[const] . identifier[SSL_WARNING] )
keyword[from] identifier[networking_cisco] . identifier[ml2_drivers] . identifier[ucsm] keyword[import] identifier[ucs_urllib2]
identifier[ucsmsdkhandle] = identifier[importutils] . identifier[import_module] ( literal[string] )
identifier[ucsmsdkhandle] . identifier[urllib2] = identifier[ucs_urllib2]
identifier[ucsmsdk] = identifier[importutils] . identifier[import_module] ( literal[string] )
keyword[return] identifier[ucsmsdk] | def _import_ucsmsdk(self):
"""Imports the Ucsm SDK module.
This module is not installed as part of the normal Neutron
distributions. It is imported dynamically in this module so that
the import can be mocked, allowing unit testing without requiring
the installation of UcsSdk.
"""
# Check if SSL certificate checking has been disabled.
# If so, warn the user before proceeding.
if not CONF.ml2_cisco_ucsm.ucsm_https_verify:
LOG.warning(const.SSL_WARNING) # depends on [control=['if'], data=[]]
# Monkey patch the UCS sdk version of urllib2 to disable
# https verify if required.
from networking_cisco.ml2_drivers.ucsm import ucs_urllib2
ucsmsdkhandle = importutils.import_module('UcsSdk.UcsHandle')
ucsmsdkhandle.urllib2 = ucs_urllib2
ucsmsdk = importutils.import_module('UcsSdk')
return ucsmsdk |
def __publish(topic, message, subject=None):
""" Publish a message to a SNS topic
:type topic: str
:param topic: SNS topic to publish the message to
:type message: str
:param message: Message to send via SNS
:type subject: str
:param subject: Subject to use for e-mail notifications
:returns: None
"""
try:
SNS_CONNECTION.publish(topic=topic, message=message, subject=subject)
logger.info('Sent SNS notification to {0}'.format(topic))
except BotoServerError as error:
logger.error('Problem sending SNS notification: {0}'.format(
error.message))
return | def function[__publish, parameter[topic, message, subject]]:
constant[ Publish a message to a SNS topic
:type topic: str
:param topic: SNS topic to publish the message to
:type message: str
:param message: Message to send via SNS
:type subject: str
:param subject: Subject to use for e-mail notifications
:returns: None
]
<ast.Try object at 0x7da1b10838e0>
return[None] | keyword[def] identifier[__publish] ( identifier[topic] , identifier[message] , identifier[subject] = keyword[None] ):
literal[string]
keyword[try] :
identifier[SNS_CONNECTION] . identifier[publish] ( identifier[topic] = identifier[topic] , identifier[message] = identifier[message] , identifier[subject] = identifier[subject] )
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[topic] ))
keyword[except] identifier[BotoServerError] keyword[as] identifier[error] :
identifier[logger] . identifier[error] ( literal[string] . identifier[format] (
identifier[error] . identifier[message] ))
keyword[return] | def __publish(topic, message, subject=None):
""" Publish a message to a SNS topic
:type topic: str
:param topic: SNS topic to publish the message to
:type message: str
:param message: Message to send via SNS
:type subject: str
:param subject: Subject to use for e-mail notifications
:returns: None
"""
try:
SNS_CONNECTION.publish(topic=topic, message=message, subject=subject)
logger.info('Sent SNS notification to {0}'.format(topic)) # depends on [control=['try'], data=[]]
except BotoServerError as error:
logger.error('Problem sending SNS notification: {0}'.format(error.message)) # depends on [control=['except'], data=['error']]
return |
def get_node(conn, name):
'''
Return a node for the named VM
'''
datacenter_id = get_datacenter_id()
for item in conn.list_servers(datacenter_id)['items']:
if item['properties']['name'] == name:
node = {'id': item['id']}
node.update(item['properties'])
return node | def function[get_node, parameter[conn, name]]:
constant[
Return a node for the named VM
]
variable[datacenter_id] assign[=] call[name[get_datacenter_id], parameter[]]
for taget[name[item]] in starred[call[call[name[conn].list_servers, parameter[name[datacenter_id]]]][constant[items]]] begin[:]
if compare[call[call[name[item]][constant[properties]]][constant[name]] equal[==] name[name]] begin[:]
variable[node] assign[=] dictionary[[<ast.Constant object at 0x7da1b1f35840>], [<ast.Subscript object at 0x7da1b1f354e0>]]
call[name[node].update, parameter[call[name[item]][constant[properties]]]]
return[name[node]] | keyword[def] identifier[get_node] ( identifier[conn] , identifier[name] ):
literal[string]
identifier[datacenter_id] = identifier[get_datacenter_id] ()
keyword[for] identifier[item] keyword[in] identifier[conn] . identifier[list_servers] ( identifier[datacenter_id] )[ literal[string] ]:
keyword[if] identifier[item] [ literal[string] ][ literal[string] ]== identifier[name] :
identifier[node] ={ literal[string] : identifier[item] [ literal[string] ]}
identifier[node] . identifier[update] ( identifier[item] [ literal[string] ])
keyword[return] identifier[node] | def get_node(conn, name):
"""
Return a node for the named VM
"""
datacenter_id = get_datacenter_id()
for item in conn.list_servers(datacenter_id)['items']:
if item['properties']['name'] == name:
node = {'id': item['id']}
node.update(item['properties'])
return node # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']] |
def getChildren(self, name=None, ns=None):
"""
Get a list of children by (optional) name and/or (optional) namespace.
@param name: The name of a child element (may contain prefix).
@type name: basestring
@param ns: An optional namespace used to match the child.
@type ns: (I{prefix}, I{name})
@return: The list of matching children.
@rtype: [L{Element},...]
"""
if name is None:
matched = self.__root
else:
matched = self.getChild(name, ns)
if matched is None:
return []
else:
return [matched,] | def function[getChildren, parameter[self, name, ns]]:
constant[
Get a list of children by (optional) name and/or (optional) namespace.
@param name: The name of a child element (may contain prefix).
@type name: basestring
@param ns: An optional namespace used to match the child.
@type ns: (I{prefix}, I{name})
@return: The list of matching children.
@rtype: [L{Element},...]
]
if compare[name[name] is constant[None]] begin[:]
variable[matched] assign[=] name[self].__root
if compare[name[matched] is constant[None]] begin[:]
return[list[[]]] | keyword[def] identifier[getChildren] ( identifier[self] , identifier[name] = keyword[None] , identifier[ns] = keyword[None] ):
literal[string]
keyword[if] identifier[name] keyword[is] keyword[None] :
identifier[matched] = identifier[self] . identifier[__root]
keyword[else] :
identifier[matched] = identifier[self] . identifier[getChild] ( identifier[name] , identifier[ns] )
keyword[if] identifier[matched] keyword[is] keyword[None] :
keyword[return] []
keyword[else] :
keyword[return] [ identifier[matched] ,] | def getChildren(self, name=None, ns=None):
"""
Get a list of children by (optional) name and/or (optional) namespace.
@param name: The name of a child element (may contain prefix).
@type name: basestring
@param ns: An optional namespace used to match the child.
@type ns: (I{prefix}, I{name})
@return: The list of matching children.
@rtype: [L{Element},...]
"""
if name is None:
matched = self.__root # depends on [control=['if'], data=[]]
else:
matched = self.getChild(name, ns)
if matched is None:
return [] # depends on [control=['if'], data=[]]
else:
return [matched] |
def erosion(x, radius=3):
"""Return greyscale morphological erosion of an image,
see `skimage.morphology.erosion <http://scikit-image.org/docs/dev/api/skimage.morphology.html#skimage.morphology.erosion>`__.
Parameters
-----------
x : 2D array
A greyscale image.
radius : int
For the radius of mask.
Returns
-------
numpy.array
A processed greyscale image.
"""
mask = disk(radius)
x = _erosion(x, selem=mask)
return x | def function[erosion, parameter[x, radius]]:
constant[Return greyscale morphological erosion of an image,
see `skimage.morphology.erosion <http://scikit-image.org/docs/dev/api/skimage.morphology.html#skimage.morphology.erosion>`__.
Parameters
-----------
x : 2D array
A greyscale image.
radius : int
For the radius of mask.
Returns
-------
numpy.array
A processed greyscale image.
]
variable[mask] assign[=] call[name[disk], parameter[name[radius]]]
variable[x] assign[=] call[name[_erosion], parameter[name[x]]]
return[name[x]] | keyword[def] identifier[erosion] ( identifier[x] , identifier[radius] = literal[int] ):
literal[string]
identifier[mask] = identifier[disk] ( identifier[radius] )
identifier[x] = identifier[_erosion] ( identifier[x] , identifier[selem] = identifier[mask] )
keyword[return] identifier[x] | def erosion(x, radius=3):
"""Return greyscale morphological erosion of an image,
see `skimage.morphology.erosion <http://scikit-image.org/docs/dev/api/skimage.morphology.html#skimage.morphology.erosion>`__.
Parameters
-----------
x : 2D array
A greyscale image.
radius : int
For the radius of mask.
Returns
-------
numpy.array
A processed greyscale image.
"""
mask = disk(radius)
x = _erosion(x, selem=mask)
return x |
def _assign_dates(self):
"""assign dates to nodes
Returns
-------
str
success/error code
"""
if self.tree is None:
self.logger("ClockTree._assign_dates: tree is not set, can't assign dates", 0)
return ttconf.ERROR
bad_branch_counter = 0
for node in self.tree.find_clades(order='postorder'):
if node.name in self.date_dict:
tmp_date = self.date_dict[node.name]
if np.isscalar(tmp_date) and np.isnan(tmp_date):
self.logger("WARNING: ClockTree.init: node %s has a bad date: %s"%(node.name, str(tmp_date)), 2, warn=True)
node.raw_date_constraint = None
node.bad_branch = True
else:
try:
tmp = np.mean(tmp_date)
node.raw_date_constraint = tmp_date
node.bad_branch = False
except:
self.logger("WARNING: ClockTree.init: node %s has a bad date: %s"%(node.name, str(tmp_date)), 2, warn=True)
node.raw_date_constraint = None
node.bad_branch = True
else: # nodes without date contraints
node.raw_date_constraint = None
if node.is_terminal():
# Terminal branches without date constraints marked as 'bad'
node.bad_branch = True
else:
# If all branches dowstream are 'bad', and there is no date constraint for
# this node, the branch is marked as 'bad'
node.bad_branch = np.all([x.bad_branch for x in node])
if node.is_terminal() and node.bad_branch:
bad_branch_counter += 1
if bad_branch_counter>self.tree.count_terminals()-3:
self.logger("ERROR: ALMOST NO VALID DATE CONSTRAINTS, EXITING", 1, warn=True)
return ttconf.ERROR
return ttconf.SUCCESS | def function[_assign_dates, parameter[self]]:
constant[assign dates to nodes
Returns
-------
str
success/error code
]
if compare[name[self].tree is constant[None]] begin[:]
call[name[self].logger, parameter[constant[ClockTree._assign_dates: tree is not set, can't assign dates], constant[0]]]
return[name[ttconf].ERROR]
variable[bad_branch_counter] assign[=] constant[0]
for taget[name[node]] in starred[call[name[self].tree.find_clades, parameter[]]] begin[:]
if compare[name[node].name in name[self].date_dict] begin[:]
variable[tmp_date] assign[=] call[name[self].date_dict][name[node].name]
if <ast.BoolOp object at 0x7da1b2344970> begin[:]
call[name[self].logger, parameter[binary_operation[constant[WARNING: ClockTree.init: node %s has a bad date: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b2345a80>, <ast.Call object at 0x7da1b2346860>]]], constant[2]]]
name[node].raw_date_constraint assign[=] constant[None]
name[node].bad_branch assign[=] constant[True]
if <ast.BoolOp object at 0x7da1b2346ce0> begin[:]
<ast.AugAssign object at 0x7da1b23479a0>
if compare[name[bad_branch_counter] greater[>] binary_operation[call[name[self].tree.count_terminals, parameter[]] - constant[3]]] begin[:]
call[name[self].logger, parameter[constant[ERROR: ALMOST NO VALID DATE CONSTRAINTS, EXITING], constant[1]]]
return[name[ttconf].ERROR]
return[name[ttconf].SUCCESS] | keyword[def] identifier[_assign_dates] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[tree] keyword[is] keyword[None] :
identifier[self] . identifier[logger] ( literal[string] , literal[int] )
keyword[return] identifier[ttconf] . identifier[ERROR]
identifier[bad_branch_counter] = literal[int]
keyword[for] identifier[node] keyword[in] identifier[self] . identifier[tree] . identifier[find_clades] ( identifier[order] = literal[string] ):
keyword[if] identifier[node] . identifier[name] keyword[in] identifier[self] . identifier[date_dict] :
identifier[tmp_date] = identifier[self] . identifier[date_dict] [ identifier[node] . identifier[name] ]
keyword[if] identifier[np] . identifier[isscalar] ( identifier[tmp_date] ) keyword[and] identifier[np] . identifier[isnan] ( identifier[tmp_date] ):
identifier[self] . identifier[logger] ( literal[string] %( identifier[node] . identifier[name] , identifier[str] ( identifier[tmp_date] )), literal[int] , identifier[warn] = keyword[True] )
identifier[node] . identifier[raw_date_constraint] = keyword[None]
identifier[node] . identifier[bad_branch] = keyword[True]
keyword[else] :
keyword[try] :
identifier[tmp] = identifier[np] . identifier[mean] ( identifier[tmp_date] )
identifier[node] . identifier[raw_date_constraint] = identifier[tmp_date]
identifier[node] . identifier[bad_branch] = keyword[False]
keyword[except] :
identifier[self] . identifier[logger] ( literal[string] %( identifier[node] . identifier[name] , identifier[str] ( identifier[tmp_date] )), literal[int] , identifier[warn] = keyword[True] )
identifier[node] . identifier[raw_date_constraint] = keyword[None]
identifier[node] . identifier[bad_branch] = keyword[True]
keyword[else] :
identifier[node] . identifier[raw_date_constraint] = keyword[None]
keyword[if] identifier[node] . identifier[is_terminal] ():
identifier[node] . identifier[bad_branch] = keyword[True]
keyword[else] :
identifier[node] . identifier[bad_branch] = identifier[np] . identifier[all] ([ identifier[x] . identifier[bad_branch] keyword[for] identifier[x] keyword[in] identifier[node] ])
keyword[if] identifier[node] . identifier[is_terminal] () keyword[and] identifier[node] . identifier[bad_branch] :
identifier[bad_branch_counter] += literal[int]
keyword[if] identifier[bad_branch_counter] > identifier[self] . identifier[tree] . identifier[count_terminals] ()- literal[int] :
identifier[self] . identifier[logger] ( literal[string] , literal[int] , identifier[warn] = keyword[True] )
keyword[return] identifier[ttconf] . identifier[ERROR]
keyword[return] identifier[ttconf] . identifier[SUCCESS] | def _assign_dates(self):
"""assign dates to nodes
Returns
-------
str
success/error code
"""
if self.tree is None:
self.logger("ClockTree._assign_dates: tree is not set, can't assign dates", 0)
return ttconf.ERROR # depends on [control=['if'], data=[]]
bad_branch_counter = 0
for node in self.tree.find_clades(order='postorder'):
if node.name in self.date_dict:
tmp_date = self.date_dict[node.name]
if np.isscalar(tmp_date) and np.isnan(tmp_date):
self.logger('WARNING: ClockTree.init: node %s has a bad date: %s' % (node.name, str(tmp_date)), 2, warn=True)
node.raw_date_constraint = None
node.bad_branch = True # depends on [control=['if'], data=[]]
else:
try:
tmp = np.mean(tmp_date)
node.raw_date_constraint = tmp_date
node.bad_branch = False # depends on [control=['try'], data=[]]
except:
self.logger('WARNING: ClockTree.init: node %s has a bad date: %s' % (node.name, str(tmp_date)), 2, warn=True)
node.raw_date_constraint = None
node.bad_branch = True # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else: # nodes without date contraints
node.raw_date_constraint = None
if node.is_terminal():
# Terminal branches without date constraints marked as 'bad'
node.bad_branch = True # depends on [control=['if'], data=[]]
else:
# If all branches dowstream are 'bad', and there is no date constraint for
# this node, the branch is marked as 'bad'
node.bad_branch = np.all([x.bad_branch for x in node])
if node.is_terminal() and node.bad_branch:
bad_branch_counter += 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['node']]
if bad_branch_counter > self.tree.count_terminals() - 3:
self.logger('ERROR: ALMOST NO VALID DATE CONSTRAINTS, EXITING', 1, warn=True)
return ttconf.ERROR # depends on [control=['if'], data=[]]
return ttconf.SUCCESS |
def scalar_term(self, st):
"""Return a _ScalarTermS or _ScalarTermU from a string, to perform text and HTML substitutions"""
if isinstance(st, binary_type):
return _ScalarTermS(st, self._jinja_sub)
elif isinstance(st, text_type):
return _ScalarTermU(st, self._jinja_sub)
elif st is None:
return _ScalarTermU(u(''), self._jinja_sub)
else:
return st | def function[scalar_term, parameter[self, st]]:
constant[Return a _ScalarTermS or _ScalarTermU from a string, to perform text and HTML substitutions]
if call[name[isinstance], parameter[name[st], name[binary_type]]] begin[:]
return[call[name[_ScalarTermS], parameter[name[st], name[self]._jinja_sub]]] | keyword[def] identifier[scalar_term] ( identifier[self] , identifier[st] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[st] , identifier[binary_type] ):
keyword[return] identifier[_ScalarTermS] ( identifier[st] , identifier[self] . identifier[_jinja_sub] )
keyword[elif] identifier[isinstance] ( identifier[st] , identifier[text_type] ):
keyword[return] identifier[_ScalarTermU] ( identifier[st] , identifier[self] . identifier[_jinja_sub] )
keyword[elif] identifier[st] keyword[is] keyword[None] :
keyword[return] identifier[_ScalarTermU] ( identifier[u] ( literal[string] ), identifier[self] . identifier[_jinja_sub] )
keyword[else] :
keyword[return] identifier[st] | def scalar_term(self, st):
"""Return a _ScalarTermS or _ScalarTermU from a string, to perform text and HTML substitutions"""
if isinstance(st, binary_type):
return _ScalarTermS(st, self._jinja_sub) # depends on [control=['if'], data=[]]
elif isinstance(st, text_type):
return _ScalarTermU(st, self._jinja_sub) # depends on [control=['if'], data=[]]
elif st is None:
return _ScalarTermU(u(''), self._jinja_sub) # depends on [control=['if'], data=[]]
else:
return st |
def get_ins_off(self, off):
"""
Get a particular instruction by using the address
:param off: address of the instruction
:type off: int
:rtype: an :class:`Instruction` object
"""
idx = 0
for i in self.get_instructions():
if idx == off:
return i
idx += i.get_length()
return None | def function[get_ins_off, parameter[self, off]]:
constant[
Get a particular instruction by using the address
:param off: address of the instruction
:type off: int
:rtype: an :class:`Instruction` object
]
variable[idx] assign[=] constant[0]
for taget[name[i]] in starred[call[name[self].get_instructions, parameter[]]] begin[:]
if compare[name[idx] equal[==] name[off]] begin[:]
return[name[i]]
<ast.AugAssign object at 0x7da20e957310>
return[constant[None]] | keyword[def] identifier[get_ins_off] ( identifier[self] , identifier[off] ):
literal[string]
identifier[idx] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[self] . identifier[get_instructions] ():
keyword[if] identifier[idx] == identifier[off] :
keyword[return] identifier[i]
identifier[idx] += identifier[i] . identifier[get_length] ()
keyword[return] keyword[None] | def get_ins_off(self, off):
"""
Get a particular instruction by using the address
:param off: address of the instruction
:type off: int
:rtype: an :class:`Instruction` object
"""
idx = 0
for i in self.get_instructions():
if idx == off:
return i # depends on [control=['if'], data=[]]
idx += i.get_length() # depends on [control=['for'], data=['i']]
return None |
def update(self, key, item):
"""
Update item into hash table with specified key and item.
If key is already present, destroys old item and inserts new one.
Use free_fn method to ensure deallocator is properly called on item.
"""
return lib.zhash_update(self._as_parameter_, key, item) | def function[update, parameter[self, key, item]]:
constant[
Update item into hash table with specified key and item.
If key is already present, destroys old item and inserts new one.
Use free_fn method to ensure deallocator is properly called on item.
]
return[call[name[lib].zhash_update, parameter[name[self]._as_parameter_, name[key], name[item]]]] | keyword[def] identifier[update] ( identifier[self] , identifier[key] , identifier[item] ):
literal[string]
keyword[return] identifier[lib] . identifier[zhash_update] ( identifier[self] . identifier[_as_parameter_] , identifier[key] , identifier[item] ) | def update(self, key, item):
"""
Update item into hash table with specified key and item.
If key is already present, destroys old item and inserts new one.
Use free_fn method to ensure deallocator is properly called on item.
"""
return lib.zhash_update(self._as_parameter_, key, item) |
def drain_rois(img):
"""Find all the ROIs in img and returns a similar volume with the ROIs
emptied, keeping only their border voxels.
This is useful for DTI tractography.
Parameters
----------
img: img-like object or str
Can either be:
- a file path to a Nifti image
- any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.
If niimg is a string, consider it as a path to Nifti image and
call nibabel.load on it. If it is an object, check if get_data()
and get_affine() methods are present, raise TypeError otherwise.
Returns
-------
np.ndarray
an array of same shape as img_data
"""
img_data = get_img_data(img)
out = np.zeros(img_data.shape, dtype=img_data.dtype)
krn_dim = [3] * img_data.ndim
kernel = np.ones(krn_dim, dtype=int)
vals = np.unique(img_data)
vals = vals[vals != 0]
for i in vals:
roi = img_data == i
hits = scn.binary_hit_or_miss(roi, kernel)
roi[hits] = 0
out[roi > 0] = i
return out | def function[drain_rois, parameter[img]]:
constant[Find all the ROIs in img and returns a similar volume with the ROIs
emptied, keeping only their border voxels.
This is useful for DTI tractography.
Parameters
----------
img: img-like object or str
Can either be:
- a file path to a Nifti image
- any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.
If niimg is a string, consider it as a path to Nifti image and
call nibabel.load on it. If it is an object, check if get_data()
and get_affine() methods are present, raise TypeError otherwise.
Returns
-------
np.ndarray
an array of same shape as img_data
]
variable[img_data] assign[=] call[name[get_img_data], parameter[name[img]]]
variable[out] assign[=] call[name[np].zeros, parameter[name[img_data].shape]]
variable[krn_dim] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1afef9f60>]] * name[img_data].ndim]
variable[kernel] assign[=] call[name[np].ones, parameter[name[krn_dim]]]
variable[vals] assign[=] call[name[np].unique, parameter[name[img_data]]]
variable[vals] assign[=] call[name[vals]][compare[name[vals] not_equal[!=] constant[0]]]
for taget[name[i]] in starred[name[vals]] begin[:]
variable[roi] assign[=] compare[name[img_data] equal[==] name[i]]
variable[hits] assign[=] call[name[scn].binary_hit_or_miss, parameter[name[roi], name[kernel]]]
call[name[roi]][name[hits]] assign[=] constant[0]
call[name[out]][compare[name[roi] greater[>] constant[0]]] assign[=] name[i]
return[name[out]] | keyword[def] identifier[drain_rois] ( identifier[img] ):
literal[string]
identifier[img_data] = identifier[get_img_data] ( identifier[img] )
identifier[out] = identifier[np] . identifier[zeros] ( identifier[img_data] . identifier[shape] , identifier[dtype] = identifier[img_data] . identifier[dtype] )
identifier[krn_dim] =[ literal[int] ]* identifier[img_data] . identifier[ndim]
identifier[kernel] = identifier[np] . identifier[ones] ( identifier[krn_dim] , identifier[dtype] = identifier[int] )
identifier[vals] = identifier[np] . identifier[unique] ( identifier[img_data] )
identifier[vals] = identifier[vals] [ identifier[vals] != literal[int] ]
keyword[for] identifier[i] keyword[in] identifier[vals] :
identifier[roi] = identifier[img_data] == identifier[i]
identifier[hits] = identifier[scn] . identifier[binary_hit_or_miss] ( identifier[roi] , identifier[kernel] )
identifier[roi] [ identifier[hits] ]= literal[int]
identifier[out] [ identifier[roi] > literal[int] ]= identifier[i]
keyword[return] identifier[out] | def drain_rois(img):
"""Find all the ROIs in img and returns a similar volume with the ROIs
emptied, keeping only their border voxels.
This is useful for DTI tractography.
Parameters
----------
img: img-like object or str
Can either be:
- a file path to a Nifti image
- any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.
If niimg is a string, consider it as a path to Nifti image and
call nibabel.load on it. If it is an object, check if get_data()
and get_affine() methods are present, raise TypeError otherwise.
Returns
-------
np.ndarray
an array of same shape as img_data
"""
img_data = get_img_data(img)
out = np.zeros(img_data.shape, dtype=img_data.dtype)
krn_dim = [3] * img_data.ndim
kernel = np.ones(krn_dim, dtype=int)
vals = np.unique(img_data)
vals = vals[vals != 0]
for i in vals:
roi = img_data == i
hits = scn.binary_hit_or_miss(roi, kernel)
roi[hits] = 0
out[roi > 0] = i # depends on [control=['for'], data=['i']]
return out |
def as_xml(self,parent):
"""
Create XML representation of `self`.
:Parameters:
- `parent`: the element to which the created node should be linked to.
:Types:
- `parent`: `libxml2.xmlNode`
:return: an XML node.
:returntype: `libxml2.xmlNode`
"""
n=parent.newChild(None,"item",None)
if self.actor:
n.newTextChild(None,"actor",to_utf8(self.actor))
if self.reason:
n.newTextChild(None,"reason",to_utf8(self.reason))
n.setProp("affiliation",to_utf8(self.affiliation))
if self.role:
n.setProp("role",to_utf8(self.role))
if self.jid:
n.setProp("jid",to_utf8(self.jid.as_unicode()))
if self.nick:
n.setProp("nick",to_utf8(self.nick))
return n | def function[as_xml, parameter[self, parent]]:
constant[
Create XML representation of `self`.
:Parameters:
- `parent`: the element to which the created node should be linked to.
:Types:
- `parent`: `libxml2.xmlNode`
:return: an XML node.
:returntype: `libxml2.xmlNode`
]
variable[n] assign[=] call[name[parent].newChild, parameter[constant[None], constant[item], constant[None]]]
if name[self].actor begin[:]
call[name[n].newTextChild, parameter[constant[None], constant[actor], call[name[to_utf8], parameter[name[self].actor]]]]
if name[self].reason begin[:]
call[name[n].newTextChild, parameter[constant[None], constant[reason], call[name[to_utf8], parameter[name[self].reason]]]]
call[name[n].setProp, parameter[constant[affiliation], call[name[to_utf8], parameter[name[self].affiliation]]]]
if name[self].role begin[:]
call[name[n].setProp, parameter[constant[role], call[name[to_utf8], parameter[name[self].role]]]]
if name[self].jid begin[:]
call[name[n].setProp, parameter[constant[jid], call[name[to_utf8], parameter[call[name[self].jid.as_unicode, parameter[]]]]]]
if name[self].nick begin[:]
call[name[n].setProp, parameter[constant[nick], call[name[to_utf8], parameter[name[self].nick]]]]
return[name[n]] | keyword[def] identifier[as_xml] ( identifier[self] , identifier[parent] ):
literal[string]
identifier[n] = identifier[parent] . identifier[newChild] ( keyword[None] , literal[string] , keyword[None] )
keyword[if] identifier[self] . identifier[actor] :
identifier[n] . identifier[newTextChild] ( keyword[None] , literal[string] , identifier[to_utf8] ( identifier[self] . identifier[actor] ))
keyword[if] identifier[self] . identifier[reason] :
identifier[n] . identifier[newTextChild] ( keyword[None] , literal[string] , identifier[to_utf8] ( identifier[self] . identifier[reason] ))
identifier[n] . identifier[setProp] ( literal[string] , identifier[to_utf8] ( identifier[self] . identifier[affiliation] ))
keyword[if] identifier[self] . identifier[role] :
identifier[n] . identifier[setProp] ( literal[string] , identifier[to_utf8] ( identifier[self] . identifier[role] ))
keyword[if] identifier[self] . identifier[jid] :
identifier[n] . identifier[setProp] ( literal[string] , identifier[to_utf8] ( identifier[self] . identifier[jid] . identifier[as_unicode] ()))
keyword[if] identifier[self] . identifier[nick] :
identifier[n] . identifier[setProp] ( literal[string] , identifier[to_utf8] ( identifier[self] . identifier[nick] ))
keyword[return] identifier[n] | def as_xml(self, parent):
"""
Create XML representation of `self`.
:Parameters:
- `parent`: the element to which the created node should be linked to.
:Types:
- `parent`: `libxml2.xmlNode`
:return: an XML node.
:returntype: `libxml2.xmlNode`
"""
n = parent.newChild(None, 'item', None)
if self.actor:
n.newTextChild(None, 'actor', to_utf8(self.actor)) # depends on [control=['if'], data=[]]
if self.reason:
n.newTextChild(None, 'reason', to_utf8(self.reason)) # depends on [control=['if'], data=[]]
n.setProp('affiliation', to_utf8(self.affiliation))
if self.role:
n.setProp('role', to_utf8(self.role)) # depends on [control=['if'], data=[]]
if self.jid:
n.setProp('jid', to_utf8(self.jid.as_unicode())) # depends on [control=['if'], data=[]]
if self.nick:
n.setProp('nick', to_utf8(self.nick)) # depends on [control=['if'], data=[]]
return n |
def _auth(profile=None, api_version=1, **connection_args):
'''
Set up heat credentials, returns
`heatclient.client.Client`. Optional parameter
"api_version" defaults to 1.
Only intended to be used within heat-enabled modules
'''
if profile:
prefix = profile + ':keystone.'
else:
prefix = 'keystone.'
def get(key, default=None):
'''
Checks connection_args, then salt-minion config,
falls back to specified default value.
'''
return connection_args.get('connection_' + key,
__salt__['config.get'](prefix + key, default))
user = get('user', 'admin')
password = get('password', None)
tenant = get('tenant', 'admin')
tenant_id = get('tenant_id')
auth_url = get('auth_url', 'http://127.0.0.1:35357/v2.0')
insecure = get('insecure', False)
admin_token = get('token')
region_name = get('region_name', None)
if admin_token and api_version != 1 and not password:
# If we had a password we could just
# ignore the admin-token and move on...
raise SaltInvocationError('Only can use keystone admin token ' +
'with Heat API v1')
elif password:
# Can't use the admin-token anyway
kwargs = {'username': user,
'password': password,
'tenant_id': tenant_id,
'auth_url': auth_url,
'region_name': region_name,
'tenant_name': tenant}
# 'insecure' keyword not supported by all v2.0 keystone clients
# this ensures it's only passed in when defined
if insecure:
kwargs['insecure'] = True
elif api_version == 1 and admin_token:
kwargs = {'token': admin_token,
'auth_url': auth_url}
else:
raise SaltInvocationError('No credentials to authenticate with.')
token = __salt__['keystone.token_get'](profile)
kwargs['token'] = token['id']
# This doesn't realy prevent the password to show up
# in the minion log as keystoneclient.session is
# logging it anyway when in debug-mode
kwargs.pop('password')
try:
heat_endpoint = __salt__['keystone.endpoint_get']('heat', profile)['url']
except KeyError:
heat_endpoint = __salt__['keystone.endpoint_get']('heat', profile)['publicurl']
heat_endpoint = heat_endpoint % token
log.debug('Calling heatclient.client.Client(%s, %s, **%s)',
api_version, heat_endpoint, kwargs)
# may raise exc.HTTPUnauthorized, exc.HTTPNotFound
# but we deal with those elsewhere
return heatclient.client.Client(api_version, endpoint=heat_endpoint, **kwargs) | def function[_auth, parameter[profile, api_version]]:
constant[
Set up heat credentials, returns
`heatclient.client.Client`. Optional parameter
"api_version" defaults to 1.
Only intended to be used within heat-enabled modules
]
if name[profile] begin[:]
variable[prefix] assign[=] binary_operation[name[profile] + constant[:keystone.]]
def function[get, parameter[key, default]]:
constant[
Checks connection_args, then salt-minion config,
falls back to specified default value.
]
return[call[name[connection_args].get, parameter[binary_operation[constant[connection_] + name[key]], call[call[name[__salt__]][constant[config.get]], parameter[binary_operation[name[prefix] + name[key]], name[default]]]]]]
variable[user] assign[=] call[name[get], parameter[constant[user], constant[admin]]]
variable[password] assign[=] call[name[get], parameter[constant[password], constant[None]]]
variable[tenant] assign[=] call[name[get], parameter[constant[tenant], constant[admin]]]
variable[tenant_id] assign[=] call[name[get], parameter[constant[tenant_id]]]
variable[auth_url] assign[=] call[name[get], parameter[constant[auth_url], constant[http://127.0.0.1:35357/v2.0]]]
variable[insecure] assign[=] call[name[get], parameter[constant[insecure], constant[False]]]
variable[admin_token] assign[=] call[name[get], parameter[constant[token]]]
variable[region_name] assign[=] call[name[get], parameter[constant[region_name], constant[None]]]
if <ast.BoolOp object at 0x7da1b1f251e0> begin[:]
<ast.Raise object at 0x7da1b1f24eb0>
variable[token] assign[=] call[call[name[__salt__]][constant[keystone.token_get]], parameter[name[profile]]]
call[name[kwargs]][constant[token]] assign[=] call[name[token]][constant[id]]
call[name[kwargs].pop, parameter[constant[password]]]
<ast.Try object at 0x7da1b2047910>
variable[heat_endpoint] assign[=] binary_operation[name[heat_endpoint] <ast.Mod object at 0x7da2590d6920> name[token]]
call[name[log].debug, parameter[constant[Calling heatclient.client.Client(%s, %s, **%s)], name[api_version], name[heat_endpoint], name[kwargs]]]
return[call[name[heatclient].client.Client, parameter[name[api_version]]]] | keyword[def] identifier[_auth] ( identifier[profile] = keyword[None] , identifier[api_version] = literal[int] ,** identifier[connection_args] ):
literal[string]
keyword[if] identifier[profile] :
identifier[prefix] = identifier[profile] + literal[string]
keyword[else] :
identifier[prefix] = literal[string]
keyword[def] identifier[get] ( identifier[key] , identifier[default] = keyword[None] ):
literal[string]
keyword[return] identifier[connection_args] . identifier[get] ( literal[string] + identifier[key] ,
identifier[__salt__] [ literal[string] ]( identifier[prefix] + identifier[key] , identifier[default] ))
identifier[user] = identifier[get] ( literal[string] , literal[string] )
identifier[password] = identifier[get] ( literal[string] , keyword[None] )
identifier[tenant] = identifier[get] ( literal[string] , literal[string] )
identifier[tenant_id] = identifier[get] ( literal[string] )
identifier[auth_url] = identifier[get] ( literal[string] , literal[string] )
identifier[insecure] = identifier[get] ( literal[string] , keyword[False] )
identifier[admin_token] = identifier[get] ( literal[string] )
identifier[region_name] = identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[admin_token] keyword[and] identifier[api_version] != literal[int] keyword[and] keyword[not] identifier[password] :
keyword[raise] identifier[SaltInvocationError] ( literal[string] +
literal[string] )
keyword[elif] identifier[password] :
identifier[kwargs] ={ literal[string] : identifier[user] ,
literal[string] : identifier[password] ,
literal[string] : identifier[tenant_id] ,
literal[string] : identifier[auth_url] ,
literal[string] : identifier[region_name] ,
literal[string] : identifier[tenant] }
keyword[if] identifier[insecure] :
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[elif] identifier[api_version] == literal[int] keyword[and] identifier[admin_token] :
identifier[kwargs] ={ literal[string] : identifier[admin_token] ,
literal[string] : identifier[auth_url] }
keyword[else] :
keyword[raise] identifier[SaltInvocationError] ( literal[string] )
identifier[token] = identifier[__salt__] [ literal[string] ]( identifier[profile] )
identifier[kwargs] [ literal[string] ]= identifier[token] [ literal[string] ]
identifier[kwargs] . identifier[pop] ( literal[string] )
keyword[try] :
identifier[heat_endpoint] = identifier[__salt__] [ literal[string] ]( literal[string] , identifier[profile] )[ literal[string] ]
keyword[except] identifier[KeyError] :
identifier[heat_endpoint] = identifier[__salt__] [ literal[string] ]( literal[string] , identifier[profile] )[ literal[string] ]
identifier[heat_endpoint] = identifier[heat_endpoint] % identifier[token]
identifier[log] . identifier[debug] ( literal[string] ,
identifier[api_version] , identifier[heat_endpoint] , identifier[kwargs] )
keyword[return] identifier[heatclient] . identifier[client] . identifier[Client] ( identifier[api_version] , identifier[endpoint] = identifier[heat_endpoint] ,** identifier[kwargs] ) | def _auth(profile=None, api_version=1, **connection_args):
"""
Set up heat credentials, returns
`heatclient.client.Client`. Optional parameter
"api_version" defaults to 1.
Only intended to be used within heat-enabled modules
"""
if profile:
prefix = profile + ':keystone.' # depends on [control=['if'], data=[]]
else:
prefix = 'keystone.'
def get(key, default=None):
"""
Checks connection_args, then salt-minion config,
falls back to specified default value.
"""
return connection_args.get('connection_' + key, __salt__['config.get'](prefix + key, default))
user = get('user', 'admin')
password = get('password', None)
tenant = get('tenant', 'admin')
tenant_id = get('tenant_id')
auth_url = get('auth_url', 'http://127.0.0.1:35357/v2.0')
insecure = get('insecure', False)
admin_token = get('token')
region_name = get('region_name', None)
if admin_token and api_version != 1 and (not password):
# If we had a password we could just
# ignore the admin-token and move on...
raise SaltInvocationError('Only can use keystone admin token ' + 'with Heat API v1') # depends on [control=['if'], data=[]]
elif password:
# Can't use the admin-token anyway
kwargs = {'username': user, 'password': password, 'tenant_id': tenant_id, 'auth_url': auth_url, 'region_name': region_name, 'tenant_name': tenant}
# 'insecure' keyword not supported by all v2.0 keystone clients
# this ensures it's only passed in when defined
if insecure:
kwargs['insecure'] = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif api_version == 1 and admin_token:
kwargs = {'token': admin_token, 'auth_url': auth_url} # depends on [control=['if'], data=[]]
else:
raise SaltInvocationError('No credentials to authenticate with.')
token = __salt__['keystone.token_get'](profile)
kwargs['token'] = token['id']
# This doesn't realy prevent the password to show up
# in the minion log as keystoneclient.session is
# logging it anyway when in debug-mode
kwargs.pop('password')
try:
heat_endpoint = __salt__['keystone.endpoint_get']('heat', profile)['url'] # depends on [control=['try'], data=[]]
except KeyError:
heat_endpoint = __salt__['keystone.endpoint_get']('heat', profile)['publicurl'] # depends on [control=['except'], data=[]]
heat_endpoint = heat_endpoint % token
log.debug('Calling heatclient.client.Client(%s, %s, **%s)', api_version, heat_endpoint, kwargs)
# may raise exc.HTTPUnauthorized, exc.HTTPNotFound
# but we deal with those elsewhere
return heatclient.client.Client(api_version, endpoint=heat_endpoint, **kwargs) |
def is_free(self):
""" returns True if any of the spectral model parameters is set to free, else False
"""
return bool(np.array([int(value.get("free", False)) for key, value in self.spectral_pars.items()]).sum()) | def function[is_free, parameter[self]]:
constant[ returns True if any of the spectral model parameters is set to free, else False
]
return[call[name[bool], parameter[call[call[name[np].array, parameter[<ast.ListComp object at 0x7da20e956380>]].sum, parameter[]]]]] | keyword[def] identifier[is_free] ( identifier[self] ):
literal[string]
keyword[return] identifier[bool] ( identifier[np] . identifier[array] ([ identifier[int] ( identifier[value] . identifier[get] ( literal[string] , keyword[False] )) keyword[for] identifier[key] , identifier[value] keyword[in] identifier[self] . identifier[spectral_pars] . identifier[items] ()]). identifier[sum] ()) | def is_free(self):
""" returns True if any of the spectral model parameters is set to free, else False
"""
return bool(np.array([int(value.get('free', False)) for (key, value) in self.spectral_pars.items()]).sum()) |
def p_declarations(self, p):
"""declarations : declarations declaration
| declaration"""
n = len(p)
if n == 3:
p[0] = p[1] + [p[2]]
elif n == 2:
p[0] = [p[1]] | def function[p_declarations, parameter[self, p]]:
constant[declarations : declarations declaration
| declaration]
variable[n] assign[=] call[name[len], parameter[name[p]]]
if compare[name[n] equal[==] constant[3]] begin[:]
call[name[p]][constant[0]] assign[=] binary_operation[call[name[p]][constant[1]] + list[[<ast.Subscript object at 0x7da1b016f850>]]] | keyword[def] identifier[p_declarations] ( identifier[self] , identifier[p] ):
literal[string]
identifier[n] = identifier[len] ( identifier[p] )
keyword[if] identifier[n] == literal[int] :
identifier[p] [ literal[int] ]= identifier[p] [ literal[int] ]+[ identifier[p] [ literal[int] ]]
keyword[elif] identifier[n] == literal[int] :
identifier[p] [ literal[int] ]=[ identifier[p] [ literal[int] ]] | def p_declarations(self, p):
"""declarations : declarations declaration
| declaration"""
n = len(p)
if n == 3:
p[0] = p[1] + [p[2]] # depends on [control=['if'], data=[]]
elif n == 2:
p[0] = [p[1]] # depends on [control=['if'], data=[]] |
def is_reseller_admin(self, req, admin_detail=None):
"""Returns True if the admin specified in the request represents a
.reseller_admin.
:param req: The swob.Request to check.
:param admin_detail: The previously retrieved dict from
:func:`get_admin_detail` or None for this function
to retrieve the admin_detail itself.
:param returns: True if .reseller_admin.
"""
req.credentials_valid = False
if self.is_super_admin(req):
return True
if not admin_detail:
admin_detail = self.get_admin_detail(req)
if not self.credentials_match(admin_detail,
req.headers.get('x-auth-admin-key')):
return False
req.credentials_valid = True
return '.reseller_admin' in (g['name'] for g in admin_detail['groups']) | def function[is_reseller_admin, parameter[self, req, admin_detail]]:
constant[Returns True if the admin specified in the request represents a
.reseller_admin.
:param req: The swob.Request to check.
:param admin_detail: The previously retrieved dict from
:func:`get_admin_detail` or None for this function
to retrieve the admin_detail itself.
:param returns: True if .reseller_admin.
]
name[req].credentials_valid assign[=] constant[False]
if call[name[self].is_super_admin, parameter[name[req]]] begin[:]
return[constant[True]]
if <ast.UnaryOp object at 0x7da1b04328c0> begin[:]
variable[admin_detail] assign[=] call[name[self].get_admin_detail, parameter[name[req]]]
if <ast.UnaryOp object at 0x7da1b04302e0> begin[:]
return[constant[False]]
name[req].credentials_valid assign[=] constant[True]
return[compare[constant[.reseller_admin] in <ast.GeneratorExp object at 0x7da1b04330d0>]] | keyword[def] identifier[is_reseller_admin] ( identifier[self] , identifier[req] , identifier[admin_detail] = keyword[None] ):
literal[string]
identifier[req] . identifier[credentials_valid] = keyword[False]
keyword[if] identifier[self] . identifier[is_super_admin] ( identifier[req] ):
keyword[return] keyword[True]
keyword[if] keyword[not] identifier[admin_detail] :
identifier[admin_detail] = identifier[self] . identifier[get_admin_detail] ( identifier[req] )
keyword[if] keyword[not] identifier[self] . identifier[credentials_match] ( identifier[admin_detail] ,
identifier[req] . identifier[headers] . identifier[get] ( literal[string] )):
keyword[return] keyword[False]
identifier[req] . identifier[credentials_valid] = keyword[True]
keyword[return] literal[string] keyword[in] ( identifier[g] [ literal[string] ] keyword[for] identifier[g] keyword[in] identifier[admin_detail] [ literal[string] ]) | def is_reseller_admin(self, req, admin_detail=None):
"""Returns True if the admin specified in the request represents a
.reseller_admin.
:param req: The swob.Request to check.
:param admin_detail: The previously retrieved dict from
:func:`get_admin_detail` or None for this function
to retrieve the admin_detail itself.
:param returns: True if .reseller_admin.
"""
req.credentials_valid = False
if self.is_super_admin(req):
return True # depends on [control=['if'], data=[]]
if not admin_detail:
admin_detail = self.get_admin_detail(req) # depends on [control=['if'], data=[]]
if not self.credentials_match(admin_detail, req.headers.get('x-auth-admin-key')):
return False # depends on [control=['if'], data=[]]
req.credentials_valid = True
return '.reseller_admin' in (g['name'] for g in admin_detail['groups']) |
def _initialize_references(model_class, name, bases, attrs):
"""Stores the list of reference field descriptors of a model."""
model_class._references = {}
h = {}
deferred = []
for k, v in attrs.iteritems():
if isinstance(v, ReferenceField):
model_class._references[k] = v
v.name = v.name or k
att = Attribute(name=v.attname)
h[v.attname] = att
setattr(model_class, v.attname, att)
refd = _initialize_referenced(model_class, v)
if refd:
deferred.append(refd)
attrs.update(h)
return deferred | def function[_initialize_references, parameter[model_class, name, bases, attrs]]:
constant[Stores the list of reference field descriptors of a model.]
name[model_class]._references assign[=] dictionary[[], []]
variable[h] assign[=] dictionary[[], []]
variable[deferred] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b1927cd0>, <ast.Name object at 0x7da1b19278b0>]]] in starred[call[name[attrs].iteritems, parameter[]]] begin[:]
if call[name[isinstance], parameter[name[v], name[ReferenceField]]] begin[:]
call[name[model_class]._references][name[k]] assign[=] name[v]
name[v].name assign[=] <ast.BoolOp object at 0x7da2044c3400>
variable[att] assign[=] call[name[Attribute], parameter[]]
call[name[h]][name[v].attname] assign[=] name[att]
call[name[setattr], parameter[name[model_class], name[v].attname, name[att]]]
variable[refd] assign[=] call[name[_initialize_referenced], parameter[name[model_class], name[v]]]
if name[refd] begin[:]
call[name[deferred].append, parameter[name[refd]]]
call[name[attrs].update, parameter[name[h]]]
return[name[deferred]] | keyword[def] identifier[_initialize_references] ( identifier[model_class] , identifier[name] , identifier[bases] , identifier[attrs] ):
literal[string]
identifier[model_class] . identifier[_references] ={}
identifier[h] ={}
identifier[deferred] =[]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[attrs] . identifier[iteritems] ():
keyword[if] identifier[isinstance] ( identifier[v] , identifier[ReferenceField] ):
identifier[model_class] . identifier[_references] [ identifier[k] ]= identifier[v]
identifier[v] . identifier[name] = identifier[v] . identifier[name] keyword[or] identifier[k]
identifier[att] = identifier[Attribute] ( identifier[name] = identifier[v] . identifier[attname] )
identifier[h] [ identifier[v] . identifier[attname] ]= identifier[att]
identifier[setattr] ( identifier[model_class] , identifier[v] . identifier[attname] , identifier[att] )
identifier[refd] = identifier[_initialize_referenced] ( identifier[model_class] , identifier[v] )
keyword[if] identifier[refd] :
identifier[deferred] . identifier[append] ( identifier[refd] )
identifier[attrs] . identifier[update] ( identifier[h] )
keyword[return] identifier[deferred] | def _initialize_references(model_class, name, bases, attrs):
"""Stores the list of reference field descriptors of a model."""
model_class._references = {}
h = {}
deferred = []
for (k, v) in attrs.iteritems():
if isinstance(v, ReferenceField):
model_class._references[k] = v
v.name = v.name or k
att = Attribute(name=v.attname)
h[v.attname] = att
setattr(model_class, v.attname, att)
refd = _initialize_referenced(model_class, v)
if refd:
deferred.append(refd) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
attrs.update(h)
return deferred |
def addinterval(instr, add, interval):
'''
adds string every n character. returns string
'''
if not isinstance(instr, str):
instr = str(instr)
return add.join(
instr[i:i+interval]
for i in xrange(0,len(instr),interval)) | def function[addinterval, parameter[instr, add, interval]]:
constant[
adds string every n character. returns string
]
if <ast.UnaryOp object at 0x7da18f09ded0> begin[:]
variable[instr] assign[=] call[name[str], parameter[name[instr]]]
return[call[name[add].join, parameter[<ast.GeneratorExp object at 0x7da18f09ec20>]]] | keyword[def] identifier[addinterval] ( identifier[instr] , identifier[add] , identifier[interval] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[instr] , identifier[str] ):
identifier[instr] = identifier[str] ( identifier[instr] )
keyword[return] identifier[add] . identifier[join] (
identifier[instr] [ identifier[i] : identifier[i] + identifier[interval] ]
keyword[for] identifier[i] keyword[in] identifier[xrange] ( literal[int] , identifier[len] ( identifier[instr] ), identifier[interval] )) | def addinterval(instr, add, interval):
"""
adds string every n character. returns string
"""
if not isinstance(instr, str):
instr = str(instr) # depends on [control=['if'], data=[]]
return add.join((instr[i:i + interval] for i in xrange(0, len(instr), interval))) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.