code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def get_token_network_identifiers(
chain_state: ChainState,
payment_network_id: PaymentNetworkID,
) -> List[TokenNetworkID]:
""" Return the list of token networks registered with the given payment network. """
payment_network = chain_state.identifiers_to_paymentnetworks.get(payment_network_id)
if payment_network is not None:
return [
token_network.address
for token_network in payment_network.tokenidentifiers_to_tokennetworks.values()
]
return list() | def function[get_token_network_identifiers, parameter[chain_state, payment_network_id]]:
constant[ Return the list of token networks registered with the given payment network. ]
variable[payment_network] assign[=] call[name[chain_state].identifiers_to_paymentnetworks.get, parameter[name[payment_network_id]]]
if compare[name[payment_network] is_not constant[None]] begin[:]
return[<ast.ListComp object at 0x7da1b170c040>]
return[call[name[list], parameter[]]] | keyword[def] identifier[get_token_network_identifiers] (
identifier[chain_state] : identifier[ChainState] ,
identifier[payment_network_id] : identifier[PaymentNetworkID] ,
)-> identifier[List] [ identifier[TokenNetworkID] ]:
literal[string]
identifier[payment_network] = identifier[chain_state] . identifier[identifiers_to_paymentnetworks] . identifier[get] ( identifier[payment_network_id] )
keyword[if] identifier[payment_network] keyword[is] keyword[not] keyword[None] :
keyword[return] [
identifier[token_network] . identifier[address]
keyword[for] identifier[token_network] keyword[in] identifier[payment_network] . identifier[tokenidentifiers_to_tokennetworks] . identifier[values] ()
]
keyword[return] identifier[list] () | def get_token_network_identifiers(chain_state: ChainState, payment_network_id: PaymentNetworkID) -> List[TokenNetworkID]:
""" Return the list of token networks registered with the given payment network. """
payment_network = chain_state.identifiers_to_paymentnetworks.get(payment_network_id)
if payment_network is not None:
return [token_network.address for token_network in payment_network.tokenidentifiers_to_tokennetworks.values()] # depends on [control=['if'], data=['payment_network']]
return list() |
def visit_Program(self, node):
"""Vsitor for `Program` AST node."""
for child in node.children:
if not isinstance(child, FunctionDeclaration):
self.visit(child) | def function[visit_Program, parameter[self, node]]:
constant[Vsitor for `Program` AST node.]
for taget[name[child]] in starred[name[node].children] begin[:]
if <ast.UnaryOp object at 0x7da1b0ab8a00> begin[:]
call[name[self].visit, parameter[name[child]]] | keyword[def] identifier[visit_Program] ( identifier[self] , identifier[node] ):
literal[string]
keyword[for] identifier[child] keyword[in] identifier[node] . identifier[children] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[child] , identifier[FunctionDeclaration] ):
identifier[self] . identifier[visit] ( identifier[child] ) | def visit_Program(self, node):
"""Vsitor for `Program` AST node."""
for child in node.children:
if not isinstance(child, FunctionDeclaration):
self.visit(child) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['child']] |
def update_snapshot(self, snapshot, display_name=None,
display_description=None):
"""
Update the specified values on the specified snapshot. You may specify
one or more values to update.
"""
return snapshot.update(display_name=display_name,
display_description=display_description) | def function[update_snapshot, parameter[self, snapshot, display_name, display_description]]:
constant[
Update the specified values on the specified snapshot. You may specify
one or more values to update.
]
return[call[name[snapshot].update, parameter[]]] | keyword[def] identifier[update_snapshot] ( identifier[self] , identifier[snapshot] , identifier[display_name] = keyword[None] ,
identifier[display_description] = keyword[None] ):
literal[string]
keyword[return] identifier[snapshot] . identifier[update] ( identifier[display_name] = identifier[display_name] ,
identifier[display_description] = identifier[display_description] ) | def update_snapshot(self, snapshot, display_name=None, display_description=None):
"""
Update the specified values on the specified snapshot. You may specify
one or more values to update.
"""
return snapshot.update(display_name=display_name, display_description=display_description) |
def delete(self, file_, delete_file=True):
"""
Deletes file_ references in Key Value store and optionally the file_
it self.
"""
image_file = ImageFile(file_)
if delete_file:
image_file.delete()
default.kvstore.delete(image_file) | def function[delete, parameter[self, file_, delete_file]]:
constant[
Deletes file_ references in Key Value store and optionally the file_
it self.
]
variable[image_file] assign[=] call[name[ImageFile], parameter[name[file_]]]
if name[delete_file] begin[:]
call[name[image_file].delete, parameter[]]
call[name[default].kvstore.delete, parameter[name[image_file]]] | keyword[def] identifier[delete] ( identifier[self] , identifier[file_] , identifier[delete_file] = keyword[True] ):
literal[string]
identifier[image_file] = identifier[ImageFile] ( identifier[file_] )
keyword[if] identifier[delete_file] :
identifier[image_file] . identifier[delete] ()
identifier[default] . identifier[kvstore] . identifier[delete] ( identifier[image_file] ) | def delete(self, file_, delete_file=True):
"""
Deletes file_ references in Key Value store and optionally the file_
it self.
"""
image_file = ImageFile(file_)
if delete_file:
image_file.delete() # depends on [control=['if'], data=[]]
default.kvstore.delete(image_file) |
def release(button=LEFT):
""" Sends an up event for the specified button, using the provided constants """
location = get_position()
button_code, _, button_up, _ = _button_mapping[button]
e = Quartz.CGEventCreateMouseEvent(
None,
button_up,
location,
button_code)
if _last_click["time"] is not None and _last_click["time"] > datetime.datetime.now() - datetime.timedelta(microseconds=300000) and _last_click["button"] == button and _last_click["position"] == location:
# Repeated Click
Quartz.CGEventSetIntegerValueField(
e,
Quartz.kCGMouseEventClickState,
_last_click["click_count"])
Quartz.CGEventPost(Quartz.kCGHIDEventTap, e)
_button_state[button] = False | def function[release, parameter[button]]:
constant[ Sends an up event for the specified button, using the provided constants ]
variable[location] assign[=] call[name[get_position], parameter[]]
<ast.Tuple object at 0x7da1b1bfaa10> assign[=] call[name[_button_mapping]][name[button]]
variable[e] assign[=] call[name[Quartz].CGEventCreateMouseEvent, parameter[constant[None], name[button_up], name[location], name[button_code]]]
if <ast.BoolOp object at 0x7da1b1bfbe80> begin[:]
call[name[Quartz].CGEventSetIntegerValueField, parameter[name[e], name[Quartz].kCGMouseEventClickState, call[name[_last_click]][constant[click_count]]]]
call[name[Quartz].CGEventPost, parameter[name[Quartz].kCGHIDEventTap, name[e]]]
call[name[_button_state]][name[button]] assign[=] constant[False] | keyword[def] identifier[release] ( identifier[button] = identifier[LEFT] ):
literal[string]
identifier[location] = identifier[get_position] ()
identifier[button_code] , identifier[_] , identifier[button_up] , identifier[_] = identifier[_button_mapping] [ identifier[button] ]
identifier[e] = identifier[Quartz] . identifier[CGEventCreateMouseEvent] (
keyword[None] ,
identifier[button_up] ,
identifier[location] ,
identifier[button_code] )
keyword[if] identifier[_last_click] [ literal[string] ] keyword[is] keyword[not] keyword[None] keyword[and] identifier[_last_click] [ literal[string] ]> identifier[datetime] . identifier[datetime] . identifier[now] ()- identifier[datetime] . identifier[timedelta] ( identifier[microseconds] = literal[int] ) keyword[and] identifier[_last_click] [ literal[string] ]== identifier[button] keyword[and] identifier[_last_click] [ literal[string] ]== identifier[location] :
identifier[Quartz] . identifier[CGEventSetIntegerValueField] (
identifier[e] ,
identifier[Quartz] . identifier[kCGMouseEventClickState] ,
identifier[_last_click] [ literal[string] ])
identifier[Quartz] . identifier[CGEventPost] ( identifier[Quartz] . identifier[kCGHIDEventTap] , identifier[e] )
identifier[_button_state] [ identifier[button] ]= keyword[False] | def release(button=LEFT):
""" Sends an up event for the specified button, using the provided constants """
location = get_position()
(button_code, _, button_up, _) = _button_mapping[button]
e = Quartz.CGEventCreateMouseEvent(None, button_up, location, button_code)
if _last_click['time'] is not None and _last_click['time'] > datetime.datetime.now() - datetime.timedelta(microseconds=300000) and (_last_click['button'] == button) and (_last_click['position'] == location):
# Repeated Click
Quartz.CGEventSetIntegerValueField(e, Quartz.kCGMouseEventClickState, _last_click['click_count']) # depends on [control=['if'], data=[]]
Quartz.CGEventPost(Quartz.kCGHIDEventTap, e)
_button_state[button] = False |
async def get_devices(self, covers_only: bool = True) -> list:
"""Get a list of all devices associated with the account."""
from .device import MyQDevice
_LOGGER.debug('Retrieving list of devices')
devices_resp = await self._request('get', DEVICE_LIST_ENDPOINT)
# print(json.dumps(devices_resp, indent=4))
device_list = []
if devices_resp is None:
return device_list
for device in devices_resp['Devices']:
if not covers_only or \
device['MyQDeviceTypeName'] in SUPPORTED_DEVICE_TYPE_NAMES:
self._devices.append({
'device_id': device['MyQDeviceId'],
'device_info': device
})
myq_device = MyQDevice(
self._devices[-1], self._brand, self)
device_list.append(myq_device)
# Store current device states.
self._store_device_states(devices_resp.get('Devices', []))
_LOGGER.debug('List of devices retrieved')
return device_list | <ast.AsyncFunctionDef object at 0x7da1b15165f0> | keyword[async] keyword[def] identifier[get_devices] ( identifier[self] , identifier[covers_only] : identifier[bool] = keyword[True] )-> identifier[list] :
literal[string]
keyword[from] . identifier[device] keyword[import] identifier[MyQDevice]
identifier[_LOGGER] . identifier[debug] ( literal[string] )
identifier[devices_resp] = keyword[await] identifier[self] . identifier[_request] ( literal[string] , identifier[DEVICE_LIST_ENDPOINT] )
identifier[device_list] =[]
keyword[if] identifier[devices_resp] keyword[is] keyword[None] :
keyword[return] identifier[device_list]
keyword[for] identifier[device] keyword[in] identifier[devices_resp] [ literal[string] ]:
keyword[if] keyword[not] identifier[covers_only] keyword[or] identifier[device] [ literal[string] ] keyword[in] identifier[SUPPORTED_DEVICE_TYPE_NAMES] :
identifier[self] . identifier[_devices] . identifier[append] ({
literal[string] : identifier[device] [ literal[string] ],
literal[string] : identifier[device]
})
identifier[myq_device] = identifier[MyQDevice] (
identifier[self] . identifier[_devices] [- literal[int] ], identifier[self] . identifier[_brand] , identifier[self] )
identifier[device_list] . identifier[append] ( identifier[myq_device] )
identifier[self] . identifier[_store_device_states] ( identifier[devices_resp] . identifier[get] ( literal[string] ,[]))
identifier[_LOGGER] . identifier[debug] ( literal[string] )
keyword[return] identifier[device_list] | async def get_devices(self, covers_only: bool=True) -> list:
"""Get a list of all devices associated with the account."""
from .device import MyQDevice
_LOGGER.debug('Retrieving list of devices')
devices_resp = await self._request('get', DEVICE_LIST_ENDPOINT)
# print(json.dumps(devices_resp, indent=4))
device_list = []
if devices_resp is None:
return device_list # depends on [control=['if'], data=[]]
for device in devices_resp['Devices']:
if not covers_only or device['MyQDeviceTypeName'] in SUPPORTED_DEVICE_TYPE_NAMES:
self._devices.append({'device_id': device['MyQDeviceId'], 'device_info': device})
myq_device = MyQDevice(self._devices[-1], self._brand, self)
device_list.append(myq_device) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['device']]
# Store current device states.
self._store_device_states(devices_resp.get('Devices', []))
_LOGGER.debug('List of devices retrieved')
return device_list |
def extend(self, extendwith):
"""
Appends an irsb to the current irsb. The irsb that is appended is invalidated. The appended irsb's jumpkind and
default exit are used.
:param extendwith: The IRSB to append to this IRSB
:vartype extendwith: :class:`IRSB`
"""
if self.stmts_used == 0:
self._from_py(extendwith)
return
conversion_dict = { }
invalid_vals = (0xffffffff, -1)
new_size = self.size + extendwith.size
new_instructions = self.instructions + extendwith.instructions
new_direct_next = extendwith.direct_next
def convert_tmp(tmp):
"""
Converts a tmp from the appended-block into one in the appended-to-block. Creates a new tmp if it does not
already exist. Prevents collisions in tmp numbers between the two blocks.
:param tmp: The tmp number to convert
"""
if tmp not in conversion_dict:
tmp_type = extendwith.tyenv.lookup(tmp)
conversion_dict[tmp] = self.tyenv.add(tmp_type)
return conversion_dict[tmp]
def convert_expr(expr_):
"""
Converts a VEX expression to use tmps in the appended-block instead of the appended-to-block. Used to prevent
collisions in tmp numbers between the two blocks.
:param tmp: The VEX expression to convert
:vartype expr: :class:`IRExpr`
"""
if type(expr_) is RdTmp:
return RdTmp.get_instance(convert_tmp(expr_.tmp))
return expr_
for stmt_ in extendwith.statements:
stmttype = type(stmt_)
if stmttype is WrTmp:
stmt_.tmp = convert_tmp(stmt_.tmp)
elif stmttype is LoadG:
stmt_.dst = convert_tmp(stmt_.dst)
elif stmttype is LLSC:
stmt_.result = convert_tmp(stmt_.result)
elif stmttype is Dirty:
if stmt_.tmp not in invalid_vals:
stmt_.tmp = convert_tmp(stmt_.tmp)
for e in stmt_.args:
convert_expr(e)
elif stmttype is CAS:
if stmt_.oldLo not in invalid_vals: stmt_.oldLo = convert_tmp(stmt_.oldLo)
if stmt_.oldHi not in invalid_vals: stmt_.oldHi = convert_tmp(stmt_.oldHi)
# Convert all expressions
to_replace = { }
for expr_ in stmt_.expressions:
replacement = convert_expr(expr_)
if replacement is not expr_:
to_replace[expr_] = replacement
for expr_, replacement in to_replace.items():
stmt_.replace_expression(expr_, replacement)
# Add the converted statement to self.statements
self.statements.append(stmt_)
extendwith.next = convert_expr(extendwith.next)
self.next = extendwith.next
self.jumpkind = extendwith.jumpkind
self._size = new_size
self._instructions = new_instructions
self._direct_next = new_direct_next | def function[extend, parameter[self, extendwith]]:
constant[
Appends an irsb to the current irsb. The irsb that is appended is invalidated. The appended irsb's jumpkind and
default exit are used.
:param extendwith: The IRSB to append to this IRSB
:vartype extendwith: :class:`IRSB`
]
if compare[name[self].stmts_used equal[==] constant[0]] begin[:]
call[name[self]._from_py, parameter[name[extendwith]]]
return[None]
variable[conversion_dict] assign[=] dictionary[[], []]
variable[invalid_vals] assign[=] tuple[[<ast.Constant object at 0x7da1b15748b0>, <ast.UnaryOp object at 0x7da1b17f9bd0>]]
variable[new_size] assign[=] binary_operation[name[self].size + name[extendwith].size]
variable[new_instructions] assign[=] binary_operation[name[self].instructions + name[extendwith].instructions]
variable[new_direct_next] assign[=] name[extendwith].direct_next
def function[convert_tmp, parameter[tmp]]:
constant[
Converts a tmp from the appended-block into one in the appended-to-block. Creates a new tmp if it does not
already exist. Prevents collisions in tmp numbers between the two blocks.
:param tmp: The tmp number to convert
]
if compare[name[tmp] <ast.NotIn object at 0x7da2590d7190> name[conversion_dict]] begin[:]
variable[tmp_type] assign[=] call[name[extendwith].tyenv.lookup, parameter[name[tmp]]]
call[name[conversion_dict]][name[tmp]] assign[=] call[name[self].tyenv.add, parameter[name[tmp_type]]]
return[call[name[conversion_dict]][name[tmp]]]
def function[convert_expr, parameter[expr_]]:
constant[
Converts a VEX expression to use tmps in the appended-block instead of the appended-to-block. Used to prevent
collisions in tmp numbers between the two blocks.
:param tmp: The VEX expression to convert
:vartype expr: :class:`IRExpr`
]
if compare[call[name[type], parameter[name[expr_]]] is name[RdTmp]] begin[:]
return[call[name[RdTmp].get_instance, parameter[call[name[convert_tmp], parameter[name[expr_].tmp]]]]]
return[name[expr_]]
for taget[name[stmt_]] in starred[name[extendwith].statements] begin[:]
variable[stmttype] assign[=] call[name[type], parameter[name[stmt_]]]
if compare[name[stmttype] is name[WrTmp]] begin[:]
name[stmt_].tmp assign[=] call[name[convert_tmp], parameter[name[stmt_].tmp]]
variable[to_replace] assign[=] dictionary[[], []]
for taget[name[expr_]] in starred[name[stmt_].expressions] begin[:]
variable[replacement] assign[=] call[name[convert_expr], parameter[name[expr_]]]
if compare[name[replacement] is_not name[expr_]] begin[:]
call[name[to_replace]][name[expr_]] assign[=] name[replacement]
for taget[tuple[[<ast.Name object at 0x7da1b1546740>, <ast.Name object at 0x7da1b155ef20>]]] in starred[call[name[to_replace].items, parameter[]]] begin[:]
call[name[stmt_].replace_expression, parameter[name[expr_], name[replacement]]]
call[name[self].statements.append, parameter[name[stmt_]]]
name[extendwith].next assign[=] call[name[convert_expr], parameter[name[extendwith].next]]
name[self].next assign[=] name[extendwith].next
name[self].jumpkind assign[=] name[extendwith].jumpkind
name[self]._size assign[=] name[new_size]
name[self]._instructions assign[=] name[new_instructions]
name[self]._direct_next assign[=] name[new_direct_next] | keyword[def] identifier[extend] ( identifier[self] , identifier[extendwith] ):
literal[string]
keyword[if] identifier[self] . identifier[stmts_used] == literal[int] :
identifier[self] . identifier[_from_py] ( identifier[extendwith] )
keyword[return]
identifier[conversion_dict] ={}
identifier[invalid_vals] =( literal[int] ,- literal[int] )
identifier[new_size] = identifier[self] . identifier[size] + identifier[extendwith] . identifier[size]
identifier[new_instructions] = identifier[self] . identifier[instructions] + identifier[extendwith] . identifier[instructions]
identifier[new_direct_next] = identifier[extendwith] . identifier[direct_next]
keyword[def] identifier[convert_tmp] ( identifier[tmp] ):
literal[string]
keyword[if] identifier[tmp] keyword[not] keyword[in] identifier[conversion_dict] :
identifier[tmp_type] = identifier[extendwith] . identifier[tyenv] . identifier[lookup] ( identifier[tmp] )
identifier[conversion_dict] [ identifier[tmp] ]= identifier[self] . identifier[tyenv] . identifier[add] ( identifier[tmp_type] )
keyword[return] identifier[conversion_dict] [ identifier[tmp] ]
keyword[def] identifier[convert_expr] ( identifier[expr_] ):
literal[string]
keyword[if] identifier[type] ( identifier[expr_] ) keyword[is] identifier[RdTmp] :
keyword[return] identifier[RdTmp] . identifier[get_instance] ( identifier[convert_tmp] ( identifier[expr_] . identifier[tmp] ))
keyword[return] identifier[expr_]
keyword[for] identifier[stmt_] keyword[in] identifier[extendwith] . identifier[statements] :
identifier[stmttype] = identifier[type] ( identifier[stmt_] )
keyword[if] identifier[stmttype] keyword[is] identifier[WrTmp] :
identifier[stmt_] . identifier[tmp] = identifier[convert_tmp] ( identifier[stmt_] . identifier[tmp] )
keyword[elif] identifier[stmttype] keyword[is] identifier[LoadG] :
identifier[stmt_] . identifier[dst] = identifier[convert_tmp] ( identifier[stmt_] . identifier[dst] )
keyword[elif] identifier[stmttype] keyword[is] identifier[LLSC] :
identifier[stmt_] . identifier[result] = identifier[convert_tmp] ( identifier[stmt_] . identifier[result] )
keyword[elif] identifier[stmttype] keyword[is] identifier[Dirty] :
keyword[if] identifier[stmt_] . identifier[tmp] keyword[not] keyword[in] identifier[invalid_vals] :
identifier[stmt_] . identifier[tmp] = identifier[convert_tmp] ( identifier[stmt_] . identifier[tmp] )
keyword[for] identifier[e] keyword[in] identifier[stmt_] . identifier[args] :
identifier[convert_expr] ( identifier[e] )
keyword[elif] identifier[stmttype] keyword[is] identifier[CAS] :
keyword[if] identifier[stmt_] . identifier[oldLo] keyword[not] keyword[in] identifier[invalid_vals] : identifier[stmt_] . identifier[oldLo] = identifier[convert_tmp] ( identifier[stmt_] . identifier[oldLo] )
keyword[if] identifier[stmt_] . identifier[oldHi] keyword[not] keyword[in] identifier[invalid_vals] : identifier[stmt_] . identifier[oldHi] = identifier[convert_tmp] ( identifier[stmt_] . identifier[oldHi] )
identifier[to_replace] ={}
keyword[for] identifier[expr_] keyword[in] identifier[stmt_] . identifier[expressions] :
identifier[replacement] = identifier[convert_expr] ( identifier[expr_] )
keyword[if] identifier[replacement] keyword[is] keyword[not] identifier[expr_] :
identifier[to_replace] [ identifier[expr_] ]= identifier[replacement]
keyword[for] identifier[expr_] , identifier[replacement] keyword[in] identifier[to_replace] . identifier[items] ():
identifier[stmt_] . identifier[replace_expression] ( identifier[expr_] , identifier[replacement] )
identifier[self] . identifier[statements] . identifier[append] ( identifier[stmt_] )
identifier[extendwith] . identifier[next] = identifier[convert_expr] ( identifier[extendwith] . identifier[next] )
identifier[self] . identifier[next] = identifier[extendwith] . identifier[next]
identifier[self] . identifier[jumpkind] = identifier[extendwith] . identifier[jumpkind]
identifier[self] . identifier[_size] = identifier[new_size]
identifier[self] . identifier[_instructions] = identifier[new_instructions]
identifier[self] . identifier[_direct_next] = identifier[new_direct_next] | def extend(self, extendwith):
"""
Appends an irsb to the current irsb. The irsb that is appended is invalidated. The appended irsb's jumpkind and
default exit are used.
:param extendwith: The IRSB to append to this IRSB
:vartype extendwith: :class:`IRSB`
"""
if self.stmts_used == 0:
self._from_py(extendwith)
return # depends on [control=['if'], data=[]]
conversion_dict = {}
invalid_vals = (4294967295, -1)
new_size = self.size + extendwith.size
new_instructions = self.instructions + extendwith.instructions
new_direct_next = extendwith.direct_next
def convert_tmp(tmp):
"""
Converts a tmp from the appended-block into one in the appended-to-block. Creates a new tmp if it does not
already exist. Prevents collisions in tmp numbers between the two blocks.
:param tmp: The tmp number to convert
"""
if tmp not in conversion_dict:
tmp_type = extendwith.tyenv.lookup(tmp)
conversion_dict[tmp] = self.tyenv.add(tmp_type) # depends on [control=['if'], data=['tmp', 'conversion_dict']]
return conversion_dict[tmp]
def convert_expr(expr_):
"""
Converts a VEX expression to use tmps in the appended-block instead of the appended-to-block. Used to prevent
collisions in tmp numbers between the two blocks.
:param tmp: The VEX expression to convert
:vartype expr: :class:`IRExpr`
"""
if type(expr_) is RdTmp:
return RdTmp.get_instance(convert_tmp(expr_.tmp)) # depends on [control=['if'], data=['RdTmp']]
return expr_
for stmt_ in extendwith.statements:
stmttype = type(stmt_)
if stmttype is WrTmp:
stmt_.tmp = convert_tmp(stmt_.tmp) # depends on [control=['if'], data=[]]
elif stmttype is LoadG:
stmt_.dst = convert_tmp(stmt_.dst) # depends on [control=['if'], data=[]]
elif stmttype is LLSC:
stmt_.result = convert_tmp(stmt_.result) # depends on [control=['if'], data=[]]
elif stmttype is Dirty:
if stmt_.tmp not in invalid_vals:
stmt_.tmp = convert_tmp(stmt_.tmp) # depends on [control=['if'], data=[]]
for e in stmt_.args:
convert_expr(e) # depends on [control=['for'], data=['e']] # depends on [control=['if'], data=[]]
elif stmttype is CAS:
if stmt_.oldLo not in invalid_vals:
stmt_.oldLo = convert_tmp(stmt_.oldLo) # depends on [control=['if'], data=[]]
if stmt_.oldHi not in invalid_vals:
stmt_.oldHi = convert_tmp(stmt_.oldHi) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Convert all expressions
to_replace = {}
for expr_ in stmt_.expressions:
replacement = convert_expr(expr_)
if replacement is not expr_:
to_replace[expr_] = replacement # depends on [control=['if'], data=['replacement', 'expr_']] # depends on [control=['for'], data=['expr_']]
for (expr_, replacement) in to_replace.items():
stmt_.replace_expression(expr_, replacement) # depends on [control=['for'], data=[]]
# Add the converted statement to self.statements
self.statements.append(stmt_) # depends on [control=['for'], data=['stmt_']]
extendwith.next = convert_expr(extendwith.next)
self.next = extendwith.next
self.jumpkind = extendwith.jumpkind
self._size = new_size
self._instructions = new_instructions
self._direct_next = new_direct_next |
def get_collections_for_image(self, image_id):
"""Get identifier of all collections that contain a given image.
Parameters
----------
image_id : string
Unique identifierof image object
Returns
-------
List(string)
List of image collection identifier
"""
result = []
# Get all active collections that contain the image identifier
for document in self.collection.find({'active' : True, 'images.identifier' : image_id}):
result.append(str(document['_id']))
return result | def function[get_collections_for_image, parameter[self, image_id]]:
constant[Get identifier of all collections that contain a given image.
Parameters
----------
image_id : string
Unique identifierof image object
Returns
-------
List(string)
List of image collection identifier
]
variable[result] assign[=] list[[]]
for taget[name[document]] in starred[call[name[self].collection.find, parameter[dictionary[[<ast.Constant object at 0x7da1b13498a0>, <ast.Constant object at 0x7da1b13495d0>], [<ast.Constant object at 0x7da1b1349690>, <ast.Name object at 0x7da1b134b9d0>]]]]] begin[:]
call[name[result].append, parameter[call[name[str], parameter[call[name[document]][constant[_id]]]]]]
return[name[result]] | keyword[def] identifier[get_collections_for_image] ( identifier[self] , identifier[image_id] ):
literal[string]
identifier[result] =[]
keyword[for] identifier[document] keyword[in] identifier[self] . identifier[collection] . identifier[find] ({ literal[string] : keyword[True] , literal[string] : identifier[image_id] }):
identifier[result] . identifier[append] ( identifier[str] ( identifier[document] [ literal[string] ]))
keyword[return] identifier[result] | def get_collections_for_image(self, image_id):
"""Get identifier of all collections that contain a given image.
Parameters
----------
image_id : string
Unique identifierof image object
Returns
-------
List(string)
List of image collection identifier
"""
result = []
# Get all active collections that contain the image identifier
for document in self.collection.find({'active': True, 'images.identifier': image_id}):
result.append(str(document['_id'])) # depends on [control=['for'], data=['document']]
return result |
def localize(self, i, **kwargs):
r"""Localize the kernels at a node (to visualize them).
That is particularly useful to visualize a filter in the vertex domain.
A kernel is localized on vertex :math:`v_i` by filtering a Kronecker
delta :math:`\delta_i` as
.. math:: (g(L) \delta_i)(j) = g(L)(i,j),
\text{ where } \delta_i(j) =
\begin{cases} 0 \text{ if } i \neq j, \\
1 \text{ if } i = j. \end{cases}
Parameters
----------
i : int
Index of the node where to localize the kernel.
kwargs: dict
Parameters to be passed to the :meth:`analyze` method.
Returns
-------
s : ndarray
Kernel localized at vertex i.
Examples
--------
Visualize heat diffusion on a grid by localizing the heat kernel.
>>> import matplotlib
>>> N = 20
>>> DELTA = N//2 * (N+1)
>>> G = graphs.Grid2d(N)
>>> G.estimate_lmax()
>>> g = filters.Heat(G, 100)
>>> s = g.localize(DELTA)
>>> _ = G.plot(s, highlight=DELTA)
"""
s = np.zeros(self.G.N)
s[i] = 1
return np.sqrt(self.G.N) * self.filter(s, **kwargs) | def function[localize, parameter[self, i]]:
constant[Localize the kernels at a node (to visualize them).
That is particularly useful to visualize a filter in the vertex domain.
A kernel is localized on vertex :math:`v_i` by filtering a Kronecker
delta :math:`\delta_i` as
.. math:: (g(L) \delta_i)(j) = g(L)(i,j),
\text{ where } \delta_i(j) =
\begin{cases} 0 \text{ if } i \neq j, \\
1 \text{ if } i = j. \end{cases}
Parameters
----------
i : int
Index of the node where to localize the kernel.
kwargs: dict
Parameters to be passed to the :meth:`analyze` method.
Returns
-------
s : ndarray
Kernel localized at vertex i.
Examples
--------
Visualize heat diffusion on a grid by localizing the heat kernel.
>>> import matplotlib
>>> N = 20
>>> DELTA = N//2 * (N+1)
>>> G = graphs.Grid2d(N)
>>> G.estimate_lmax()
>>> g = filters.Heat(G, 100)
>>> s = g.localize(DELTA)
>>> _ = G.plot(s, highlight=DELTA)
]
variable[s] assign[=] call[name[np].zeros, parameter[name[self].G.N]]
call[name[s]][name[i]] assign[=] constant[1]
return[binary_operation[call[name[np].sqrt, parameter[name[self].G.N]] * call[name[self].filter, parameter[name[s]]]]] | keyword[def] identifier[localize] ( identifier[self] , identifier[i] ,** identifier[kwargs] ):
literal[string]
identifier[s] = identifier[np] . identifier[zeros] ( identifier[self] . identifier[G] . identifier[N] )
identifier[s] [ identifier[i] ]= literal[int]
keyword[return] identifier[np] . identifier[sqrt] ( identifier[self] . identifier[G] . identifier[N] )* identifier[self] . identifier[filter] ( identifier[s] ,** identifier[kwargs] ) | def localize(self, i, **kwargs):
"""Localize the kernels at a node (to visualize them).
That is particularly useful to visualize a filter in the vertex domain.
A kernel is localized on vertex :math:`v_i` by filtering a Kronecker
delta :math:`\\delta_i` as
.. math:: (g(L) \\delta_i)(j) = g(L)(i,j),
\\text{ where } \\delta_i(j) =
\\begin{cases} 0 \\text{ if } i \\neq j, \\\\
1 \\text{ if } i = j. \\end{cases}
Parameters
----------
i : int
Index of the node where to localize the kernel.
kwargs: dict
Parameters to be passed to the :meth:`analyze` method.
Returns
-------
s : ndarray
Kernel localized at vertex i.
Examples
--------
Visualize heat diffusion on a grid by localizing the heat kernel.
>>> import matplotlib
>>> N = 20
>>> DELTA = N//2 * (N+1)
>>> G = graphs.Grid2d(N)
>>> G.estimate_lmax()
>>> g = filters.Heat(G, 100)
>>> s = g.localize(DELTA)
>>> _ = G.plot(s, highlight=DELTA)
"""
s = np.zeros(self.G.N)
s[i] = 1
return np.sqrt(self.G.N) * self.filter(s, **kwargs) |
def is_bool(dtype):
"""Returns whether this is a boolean data type."""
dtype = tf.as_dtype(dtype)
if hasattr(dtype, 'is_bool'):
return dtype.is_bool
# We use `kind` because:
# np.issubdtype(np.uint8, np.bool) == True.
return np.dtype(dtype).kind == 'b' | def function[is_bool, parameter[dtype]]:
constant[Returns whether this is a boolean data type.]
variable[dtype] assign[=] call[name[tf].as_dtype, parameter[name[dtype]]]
if call[name[hasattr], parameter[name[dtype], constant[is_bool]]] begin[:]
return[name[dtype].is_bool]
return[compare[call[name[np].dtype, parameter[name[dtype]]].kind equal[==] constant[b]]] | keyword[def] identifier[is_bool] ( identifier[dtype] ):
literal[string]
identifier[dtype] = identifier[tf] . identifier[as_dtype] ( identifier[dtype] )
keyword[if] identifier[hasattr] ( identifier[dtype] , literal[string] ):
keyword[return] identifier[dtype] . identifier[is_bool]
keyword[return] identifier[np] . identifier[dtype] ( identifier[dtype] ). identifier[kind] == literal[string] | def is_bool(dtype):
"""Returns whether this is a boolean data type."""
dtype = tf.as_dtype(dtype)
if hasattr(dtype, 'is_bool'):
return dtype.is_bool # depends on [control=['if'], data=[]]
# We use `kind` because:
# np.issubdtype(np.uint8, np.bool) == True.
return np.dtype(dtype).kind == 'b' |
def tracker_print(msg):
"""Print message to the tracker.
This function can be used to communicate the information of
the progress to the tracker
Parameters
----------
msg : str
The message to be printed to tracker.
"""
if not isinstance(msg, str):
msg = str(msg)
_LIB.RabitTrackerPrint(ctypes.c_char_p(msg).encode('utf-8')) | def function[tracker_print, parameter[msg]]:
constant[Print message to the tracker.
This function can be used to communicate the information of
the progress to the tracker
Parameters
----------
msg : str
The message to be printed to tracker.
]
if <ast.UnaryOp object at 0x7da1b20647c0> begin[:]
variable[msg] assign[=] call[name[str], parameter[name[msg]]]
call[name[_LIB].RabitTrackerPrint, parameter[call[call[name[ctypes].c_char_p, parameter[name[msg]]].encode, parameter[constant[utf-8]]]]] | keyword[def] identifier[tracker_print] ( identifier[msg] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[msg] , identifier[str] ):
identifier[msg] = identifier[str] ( identifier[msg] )
identifier[_LIB] . identifier[RabitTrackerPrint] ( identifier[ctypes] . identifier[c_char_p] ( identifier[msg] ). identifier[encode] ( literal[string] )) | def tracker_print(msg):
"""Print message to the tracker.
This function can be used to communicate the information of
the progress to the tracker
Parameters
----------
msg : str
The message to be printed to tracker.
"""
if not isinstance(msg, str):
msg = str(msg) # depends on [control=['if'], data=[]]
_LIB.RabitTrackerPrint(ctypes.c_char_p(msg).encode('utf-8')) |
def group_barh(self, column_label, **vargs):
"""Plot a horizontal bar chart for the table.
The values of the specified column are grouped and counted, and one
bar is produced for each group.
Note: This differs from ``barh`` in that there is no need to specify
bar heights; the size of a category's bar is the number of copies
of that category in the given column. This method behaves more like
``hist`` in that regard, while ``barh`` behaves more like ``plot`` or
``scatter`` (which require the second coordinate of each point to be
specified in another column).
Args:
``column_label`` (str or int): The name or index of a column
Kwargs:
overlay (bool): create a chart with one color per data column;
if False, each will be displayed separately.
width (float): The width of the plot, in inches
height (float): The height of the plot, in inches
vargs: Additional arguments that get passed into `plt.bar`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.bar
for additional arguments that can be passed into vargs.
"""
self.group(column_label).barh(column_label, **vargs) | def function[group_barh, parameter[self, column_label]]:
constant[Plot a horizontal bar chart for the table.
The values of the specified column are grouped and counted, and one
bar is produced for each group.
Note: This differs from ``barh`` in that there is no need to specify
bar heights; the size of a category's bar is the number of copies
of that category in the given column. This method behaves more like
``hist`` in that regard, while ``barh`` behaves more like ``plot`` or
``scatter`` (which require the second coordinate of each point to be
specified in another column).
Args:
``column_label`` (str or int): The name or index of a column
Kwargs:
overlay (bool): create a chart with one color per data column;
if False, each will be displayed separately.
width (float): The width of the plot, in inches
height (float): The height of the plot, in inches
vargs: Additional arguments that get passed into `plt.bar`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.bar
for additional arguments that can be passed into vargs.
]
call[call[name[self].group, parameter[name[column_label]]].barh, parameter[name[column_label]]] | keyword[def] identifier[group_barh] ( identifier[self] , identifier[column_label] ,** identifier[vargs] ):
literal[string]
identifier[self] . identifier[group] ( identifier[column_label] ). identifier[barh] ( identifier[column_label] ,** identifier[vargs] ) | def group_barh(self, column_label, **vargs):
"""Plot a horizontal bar chart for the table.
The values of the specified column are grouped and counted, and one
bar is produced for each group.
Note: This differs from ``barh`` in that there is no need to specify
bar heights; the size of a category's bar is the number of copies
of that category in the given column. This method behaves more like
``hist`` in that regard, while ``barh`` behaves more like ``plot`` or
``scatter`` (which require the second coordinate of each point to be
specified in another column).
Args:
``column_label`` (str or int): The name or index of a column
Kwargs:
overlay (bool): create a chart with one color per data column;
if False, each will be displayed separately.
width (float): The width of the plot, in inches
height (float): The height of the plot, in inches
vargs: Additional arguments that get passed into `plt.bar`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.bar
for additional arguments that can be passed into vargs.
"""
self.group(column_label).barh(column_label, **vargs) |
def _discovery_resp(self, data):
""" Handle a discovery response.
:param data: Payload.
:param addr: Address tuple.
:returns: MAC and reversed MAC.
"""
if _is_discovery_response(data):
_LOGGER.debug("Discovered MAC of %s: %s", self.host,
binascii.hexlify(data[7:13]).decode())
return (data[7:13], data[19:25]) | def function[_discovery_resp, parameter[self, data]]:
constant[ Handle a discovery response.
:param data: Payload.
:param addr: Address tuple.
:returns: MAC and reversed MAC.
]
if call[name[_is_discovery_response], parameter[name[data]]] begin[:]
call[name[_LOGGER].debug, parameter[constant[Discovered MAC of %s: %s], name[self].host, call[call[name[binascii].hexlify, parameter[call[name[data]][<ast.Slice object at 0x7da1b2566fb0>]]].decode, parameter[]]]]
return[tuple[[<ast.Subscript object at 0x7da1b2490cd0>, <ast.Subscript object at 0x7da1b24908e0>]]] | keyword[def] identifier[_discovery_resp] ( identifier[self] , identifier[data] ):
literal[string]
keyword[if] identifier[_is_discovery_response] ( identifier[data] ):
identifier[_LOGGER] . identifier[debug] ( literal[string] , identifier[self] . identifier[host] ,
identifier[binascii] . identifier[hexlify] ( identifier[data] [ literal[int] : literal[int] ]). identifier[decode] ())
keyword[return] ( identifier[data] [ literal[int] : literal[int] ], identifier[data] [ literal[int] : literal[int] ]) | def _discovery_resp(self, data):
""" Handle a discovery response.
:param data: Payload.
:param addr: Address tuple.
:returns: MAC and reversed MAC.
"""
if _is_discovery_response(data):
_LOGGER.debug('Discovered MAC of %s: %s', self.host, binascii.hexlify(data[7:13]).decode())
return (data[7:13], data[19:25]) # depends on [control=['if'], data=[]] |
def image_url(self, pixel_size=None):
"""
Get the URL for the user icon in the desired pixel size, if it exists. If no
size is supplied, give the URL for the full-size image.
"""
if "profile" not in self._raw:
return
profile = self._raw["profile"]
if (pixel_size):
img_key = "image_%s" % pixel_size
if img_key in profile:
return profile[img_key]
return profile[self._DEFAULT_IMAGE_KEY] | def function[image_url, parameter[self, pixel_size]]:
constant[
Get the URL for the user icon in the desired pixel size, if it exists. If no
size is supplied, give the URL for the full-size image.
]
if compare[constant[profile] <ast.NotIn object at 0x7da2590d7190> name[self]._raw] begin[:]
return[None]
variable[profile] assign[=] call[name[self]._raw][constant[profile]]
if name[pixel_size] begin[:]
variable[img_key] assign[=] binary_operation[constant[image_%s] <ast.Mod object at 0x7da2590d6920> name[pixel_size]]
if compare[name[img_key] in name[profile]] begin[:]
return[call[name[profile]][name[img_key]]]
return[call[name[profile]][name[self]._DEFAULT_IMAGE_KEY]] | keyword[def] identifier[image_url] ( identifier[self] , identifier[pixel_size] = keyword[None] ):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[_raw] :
keyword[return]
identifier[profile] = identifier[self] . identifier[_raw] [ literal[string] ]
keyword[if] ( identifier[pixel_size] ):
identifier[img_key] = literal[string] % identifier[pixel_size]
keyword[if] identifier[img_key] keyword[in] identifier[profile] :
keyword[return] identifier[profile] [ identifier[img_key] ]
keyword[return] identifier[profile] [ identifier[self] . identifier[_DEFAULT_IMAGE_KEY] ] | def image_url(self, pixel_size=None):
"""
Get the URL for the user icon in the desired pixel size, if it exists. If no
size is supplied, give the URL for the full-size image.
"""
if 'profile' not in self._raw:
return # depends on [control=['if'], data=[]]
profile = self._raw['profile']
if pixel_size:
img_key = 'image_%s' % pixel_size
if img_key in profile:
return profile[img_key] # depends on [control=['if'], data=['img_key', 'profile']] # depends on [control=['if'], data=[]]
return profile[self._DEFAULT_IMAGE_KEY] |
def _get_voltage_angle_var(self, refs, buses):
""" Returns the voltage angle variable set.
"""
Va = array([b.v_angle * (pi / 180.0) for b in buses])
Vau = Inf * ones(len(buses))
Val = -Vau
Vau[refs] = Va[refs]
Val[refs] = Va[refs]
return Variable("Va", len(buses), Va, Val, Vau) | def function[_get_voltage_angle_var, parameter[self, refs, buses]]:
constant[ Returns the voltage angle variable set.
]
variable[Va] assign[=] call[name[array], parameter[<ast.ListComp object at 0x7da1b2518d00>]]
variable[Vau] assign[=] binary_operation[name[Inf] * call[name[ones], parameter[call[name[len], parameter[name[buses]]]]]]
variable[Val] assign[=] <ast.UnaryOp object at 0x7da1b25198a0>
call[name[Vau]][name[refs]] assign[=] call[name[Va]][name[refs]]
call[name[Val]][name[refs]] assign[=] call[name[Va]][name[refs]]
return[call[name[Variable], parameter[constant[Va], call[name[len], parameter[name[buses]]], name[Va], name[Val], name[Vau]]]] | keyword[def] identifier[_get_voltage_angle_var] ( identifier[self] , identifier[refs] , identifier[buses] ):
literal[string]
identifier[Va] = identifier[array] ([ identifier[b] . identifier[v_angle] *( identifier[pi] / literal[int] ) keyword[for] identifier[b] keyword[in] identifier[buses] ])
identifier[Vau] = identifier[Inf] * identifier[ones] ( identifier[len] ( identifier[buses] ))
identifier[Val] =- identifier[Vau]
identifier[Vau] [ identifier[refs] ]= identifier[Va] [ identifier[refs] ]
identifier[Val] [ identifier[refs] ]= identifier[Va] [ identifier[refs] ]
keyword[return] identifier[Variable] ( literal[string] , identifier[len] ( identifier[buses] ), identifier[Va] , identifier[Val] , identifier[Vau] ) | def _get_voltage_angle_var(self, refs, buses):
""" Returns the voltage angle variable set.
"""
Va = array([b.v_angle * (pi / 180.0) for b in buses])
Vau = Inf * ones(len(buses))
Val = -Vau
Vau[refs] = Va[refs]
Val[refs] = Va[refs]
return Variable('Va', len(buses), Va, Val, Vau) |
def set_headers(context):
"""
Parameters:
+--------------+---------------+
| header_name | header_value |
+==============+===============+
| header1 | value1 |
+--------------+---------------+
| header2 | value2 |
+--------------+---------------+
"""
safe_add_http_request_context_to_behave_context(context)
headers = dict()
for row in context.table:
headers[row["header_name"]] = row["header_value"]
context.http_request_context.headers = headers | def function[set_headers, parameter[context]]:
constant[
Parameters:
+--------------+---------------+
| header_name | header_value |
+==============+===============+
| header1 | value1 |
+--------------+---------------+
| header2 | value2 |
+--------------+---------------+
]
call[name[safe_add_http_request_context_to_behave_context], parameter[name[context]]]
variable[headers] assign[=] call[name[dict], parameter[]]
for taget[name[row]] in starred[name[context].table] begin[:]
call[name[headers]][call[name[row]][constant[header_name]]] assign[=] call[name[row]][constant[header_value]]
name[context].http_request_context.headers assign[=] name[headers] | keyword[def] identifier[set_headers] ( identifier[context] ):
literal[string]
identifier[safe_add_http_request_context_to_behave_context] ( identifier[context] )
identifier[headers] = identifier[dict] ()
keyword[for] identifier[row] keyword[in] identifier[context] . identifier[table] :
identifier[headers] [ identifier[row] [ literal[string] ]]= identifier[row] [ literal[string] ]
identifier[context] . identifier[http_request_context] . identifier[headers] = identifier[headers] | def set_headers(context):
"""
Parameters:
+--------------+---------------+
| header_name | header_value |
+==============+===============+
| header1 | value1 |
+--------------+---------------+
| header2 | value2 |
+--------------+---------------+
"""
safe_add_http_request_context_to_behave_context(context)
headers = dict()
for row in context.table:
headers[row['header_name']] = row['header_value']
context.http_request_context.headers = headers # depends on [control=['for'], data=['row']] |
def score(count_bigram, count1, count2, n_words):
"""Collocation score"""
if n_words <= count1 or n_words <= count2:
# only one words appears in the whole document
return 0
N = n_words
c12 = count_bigram
c1 = count1
c2 = count2
p = c2 / N
p1 = c12 / c1
p2 = (c2 - c12) / (N - c1)
score = (l(c12, c1, p) + l(c2 - c12, N - c1, p)
- l(c12, c1, p1) - l(c2 - c12, N - c1, p2))
return -2 * score | def function[score, parameter[count_bigram, count1, count2, n_words]]:
constant[Collocation score]
if <ast.BoolOp object at 0x7da20c992590> begin[:]
return[constant[0]]
variable[N] assign[=] name[n_words]
variable[c12] assign[=] name[count_bigram]
variable[c1] assign[=] name[count1]
variable[c2] assign[=] name[count2]
variable[p] assign[=] binary_operation[name[c2] / name[N]]
variable[p1] assign[=] binary_operation[name[c12] / name[c1]]
variable[p2] assign[=] binary_operation[binary_operation[name[c2] - name[c12]] / binary_operation[name[N] - name[c1]]]
variable[score] assign[=] binary_operation[binary_operation[binary_operation[call[name[l], parameter[name[c12], name[c1], name[p]]] + call[name[l], parameter[binary_operation[name[c2] - name[c12]], binary_operation[name[N] - name[c1]], name[p]]]] - call[name[l], parameter[name[c12], name[c1], name[p1]]]] - call[name[l], parameter[binary_operation[name[c2] - name[c12]], binary_operation[name[N] - name[c1]], name[p2]]]]
return[binary_operation[<ast.UnaryOp object at 0x7da20c992fe0> * name[score]]] | keyword[def] identifier[score] ( identifier[count_bigram] , identifier[count1] , identifier[count2] , identifier[n_words] ):
literal[string]
keyword[if] identifier[n_words] <= identifier[count1] keyword[or] identifier[n_words] <= identifier[count2] :
keyword[return] literal[int]
identifier[N] = identifier[n_words]
identifier[c12] = identifier[count_bigram]
identifier[c1] = identifier[count1]
identifier[c2] = identifier[count2]
identifier[p] = identifier[c2] / identifier[N]
identifier[p1] = identifier[c12] / identifier[c1]
identifier[p2] =( identifier[c2] - identifier[c12] )/( identifier[N] - identifier[c1] )
identifier[score] =( identifier[l] ( identifier[c12] , identifier[c1] , identifier[p] )+ identifier[l] ( identifier[c2] - identifier[c12] , identifier[N] - identifier[c1] , identifier[p] )
- identifier[l] ( identifier[c12] , identifier[c1] , identifier[p1] )- identifier[l] ( identifier[c2] - identifier[c12] , identifier[N] - identifier[c1] , identifier[p2] ))
keyword[return] - literal[int] * identifier[score] | def score(count_bigram, count1, count2, n_words):
"""Collocation score"""
if n_words <= count1 or n_words <= count2:
# only one words appears in the whole document
return 0 # depends on [control=['if'], data=[]]
N = n_words
c12 = count_bigram
c1 = count1
c2 = count2
p = c2 / N
p1 = c12 / c1
p2 = (c2 - c12) / (N - c1)
score = l(c12, c1, p) + l(c2 - c12, N - c1, p) - l(c12, c1, p1) - l(c2 - c12, N - c1, p2)
return -2 * score |
def VerifySignature(message, signature, public_key, unhex=True):
"""
Verify the integrity of the message.
Args:
message (str): the message to verify.
signature (bytearray): the signature belonging to the message.
public_key (ECPoint|bytes): the public key to use for verifying the signature. If `public_key` is of type bytes then it should be raw bytes (i.e. b'\xAA\xBB').
unhex (bool): whether the message should be unhexlified before verifying
Returns:
bool: True if verification passes. False otherwise.
"""
if type(public_key) is EllipticCurve.ECPoint:
pubkey_x = public_key.x.value.to_bytes(32, 'big')
pubkey_y = public_key.y.value.to_bytes(32, 'big')
public_key = pubkey_x + pubkey_y
if unhex:
try:
message = binascii.unhexlify(message)
except Exception as e:
logger.error("could not get m: %s" % e)
elif isinstance(message, str):
message = message.encode('utf-8')
if len(public_key) == 33:
public_key = bitcoin.decompress(public_key)
public_key = public_key[1:]
try:
vk = VerifyingKey.from_string(public_key, curve=NIST256p, hashfunc=hashlib.sha256)
res = vk.verify(signature, message, hashfunc=hashlib.sha256)
return res
except Exception as e:
pass
return False | def function[VerifySignature, parameter[message, signature, public_key, unhex]]:
constant[
Verify the integrity of the message.
Args:
message (str): the message to verify.
signature (bytearray): the signature belonging to the message.
public_key (ECPoint|bytes): the public key to use for verifying the signature. If `public_key` is of type bytes then it should be raw bytes (i.e. b'ª»').
unhex (bool): whether the message should be unhexlified before verifying
Returns:
bool: True if verification passes. False otherwise.
]
if compare[call[name[type], parameter[name[public_key]]] is name[EllipticCurve].ECPoint] begin[:]
variable[pubkey_x] assign[=] call[name[public_key].x.value.to_bytes, parameter[constant[32], constant[big]]]
variable[pubkey_y] assign[=] call[name[public_key].y.value.to_bytes, parameter[constant[32], constant[big]]]
variable[public_key] assign[=] binary_operation[name[pubkey_x] + name[pubkey_y]]
if name[unhex] begin[:]
<ast.Try object at 0x7da204620f40>
if compare[call[name[len], parameter[name[public_key]]] equal[==] constant[33]] begin[:]
variable[public_key] assign[=] call[name[bitcoin].decompress, parameter[name[public_key]]]
variable[public_key] assign[=] call[name[public_key]][<ast.Slice object at 0x7da204621d50>]
<ast.Try object at 0x7da204623160>
return[constant[False]] | keyword[def] identifier[VerifySignature] ( identifier[message] , identifier[signature] , identifier[public_key] , identifier[unhex] = keyword[True] ):
literal[string]
keyword[if] identifier[type] ( identifier[public_key] ) keyword[is] identifier[EllipticCurve] . identifier[ECPoint] :
identifier[pubkey_x] = identifier[public_key] . identifier[x] . identifier[value] . identifier[to_bytes] ( literal[int] , literal[string] )
identifier[pubkey_y] = identifier[public_key] . identifier[y] . identifier[value] . identifier[to_bytes] ( literal[int] , literal[string] )
identifier[public_key] = identifier[pubkey_x] + identifier[pubkey_y]
keyword[if] identifier[unhex] :
keyword[try] :
identifier[message] = identifier[binascii] . identifier[unhexlify] ( identifier[message] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logger] . identifier[error] ( literal[string] % identifier[e] )
keyword[elif] identifier[isinstance] ( identifier[message] , identifier[str] ):
identifier[message] = identifier[message] . identifier[encode] ( literal[string] )
keyword[if] identifier[len] ( identifier[public_key] )== literal[int] :
identifier[public_key] = identifier[bitcoin] . identifier[decompress] ( identifier[public_key] )
identifier[public_key] = identifier[public_key] [ literal[int] :]
keyword[try] :
identifier[vk] = identifier[VerifyingKey] . identifier[from_string] ( identifier[public_key] , identifier[curve] = identifier[NIST256p] , identifier[hashfunc] = identifier[hashlib] . identifier[sha256] )
identifier[res] = identifier[vk] . identifier[verify] ( identifier[signature] , identifier[message] , identifier[hashfunc] = identifier[hashlib] . identifier[sha256] )
keyword[return] identifier[res]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[pass]
keyword[return] keyword[False] | def VerifySignature(message, signature, public_key, unhex=True):
"""
Verify the integrity of the message.
Args:
message (str): the message to verify.
signature (bytearray): the signature belonging to the message.
public_key (ECPoint|bytes): the public key to use for verifying the signature. If `public_key` is of type bytes then it should be raw bytes (i.e. b'ª»').
unhex (bool): whether the message should be unhexlified before verifying
Returns:
bool: True if verification passes. False otherwise.
"""
if type(public_key) is EllipticCurve.ECPoint:
pubkey_x = public_key.x.value.to_bytes(32, 'big')
pubkey_y = public_key.y.value.to_bytes(32, 'big')
public_key = pubkey_x + pubkey_y # depends on [control=['if'], data=[]]
if unhex:
try:
message = binascii.unhexlify(message) # depends on [control=['try'], data=[]]
except Exception as e:
logger.error('could not get m: %s' % e) # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]]
elif isinstance(message, str):
message = message.encode('utf-8') # depends on [control=['if'], data=[]]
if len(public_key) == 33:
public_key = bitcoin.decompress(public_key)
public_key = public_key[1:] # depends on [control=['if'], data=[]]
try:
vk = VerifyingKey.from_string(public_key, curve=NIST256p, hashfunc=hashlib.sha256)
res = vk.verify(signature, message, hashfunc=hashlib.sha256)
return res # depends on [control=['try'], data=[]]
except Exception as e:
pass # depends on [control=['except'], data=[]]
return False |
def prompt_choice_list(msg, a_list, default=1, help_string=None):
"""Prompt user to select from a list of possible choices.
:param msg:A message displayed to the user before the choice list
:type msg: str
:param a_list:The list of choices (list of strings or list of dicts with 'name' & 'desc')
"type a_list: list
:param default:The default option that should be chosen if user doesn't enter a choice
:type default: int
:returns: The list index of the item chosen.
"""
verify_is_a_tty()
options = '\n'.join([' [{}] {}{}'
.format(i + 1,
x['name'] if isinstance(x, dict) and 'name' in x else x,
' - ' + x['desc'] if isinstance(x, dict) and 'desc' in x else '')
for i, x in enumerate(a_list)])
allowed_vals = list(range(1, len(a_list) + 1))
while True:
val = _input('{}\n{}\nPlease enter a choice [Default choice({})]: '.format(msg, options, default))
if val == '?' and help_string is not None:
print(help_string)
continue
if not val:
val = '{}'.format(default)
try:
ans = int(val)
if ans in allowed_vals:
# array index is 0-based, user input is 1-based
return ans - 1
raise ValueError
except ValueError:
logger.warning('Valid values are %s', allowed_vals) | def function[prompt_choice_list, parameter[msg, a_list, default, help_string]]:
constant[Prompt user to select from a list of possible choices.
:param msg:A message displayed to the user before the choice list
:type msg: str
:param a_list:The list of choices (list of strings or list of dicts with 'name' & 'desc')
"type a_list: list
:param default:The default option that should be chosen if user doesn't enter a choice
:type default: int
:returns: The list index of the item chosen.
]
call[name[verify_is_a_tty], parameter[]]
variable[options] assign[=] call[constant[
].join, parameter[<ast.ListComp object at 0x7da1b167cd90>]]
variable[allowed_vals] assign[=] call[name[list], parameter[call[name[range], parameter[constant[1], binary_operation[call[name[len], parameter[name[a_list]]] + constant[1]]]]]]
while constant[True] begin[:]
variable[val] assign[=] call[name[_input], parameter[call[constant[{}
{}
Please enter a choice [Default choice({})]: ].format, parameter[name[msg], name[options], name[default]]]]]
if <ast.BoolOp object at 0x7da1b167ffd0> begin[:]
call[name[print], parameter[name[help_string]]]
continue
if <ast.UnaryOp object at 0x7da1b180d210> begin[:]
variable[val] assign[=] call[constant[{}].format, parameter[name[default]]]
<ast.Try object at 0x7da1b180c550> | keyword[def] identifier[prompt_choice_list] ( identifier[msg] , identifier[a_list] , identifier[default] = literal[int] , identifier[help_string] = keyword[None] ):
literal[string]
identifier[verify_is_a_tty] ()
identifier[options] = literal[string] . identifier[join] ([ literal[string]
. identifier[format] ( identifier[i] + literal[int] ,
identifier[x] [ literal[string] ] keyword[if] identifier[isinstance] ( identifier[x] , identifier[dict] ) keyword[and] literal[string] keyword[in] identifier[x] keyword[else] identifier[x] ,
literal[string] + identifier[x] [ literal[string] ] keyword[if] identifier[isinstance] ( identifier[x] , identifier[dict] ) keyword[and] literal[string] keyword[in] identifier[x] keyword[else] literal[string] )
keyword[for] identifier[i] , identifier[x] keyword[in] identifier[enumerate] ( identifier[a_list] )])
identifier[allowed_vals] = identifier[list] ( identifier[range] ( literal[int] , identifier[len] ( identifier[a_list] )+ literal[int] ))
keyword[while] keyword[True] :
identifier[val] = identifier[_input] ( literal[string] . identifier[format] ( identifier[msg] , identifier[options] , identifier[default] ))
keyword[if] identifier[val] == literal[string] keyword[and] identifier[help_string] keyword[is] keyword[not] keyword[None] :
identifier[print] ( identifier[help_string] )
keyword[continue]
keyword[if] keyword[not] identifier[val] :
identifier[val] = literal[string] . identifier[format] ( identifier[default] )
keyword[try] :
identifier[ans] = identifier[int] ( identifier[val] )
keyword[if] identifier[ans] keyword[in] identifier[allowed_vals] :
keyword[return] identifier[ans] - literal[int]
keyword[raise] identifier[ValueError]
keyword[except] identifier[ValueError] :
identifier[logger] . identifier[warning] ( literal[string] , identifier[allowed_vals] ) | def prompt_choice_list(msg, a_list, default=1, help_string=None):
"""Prompt user to select from a list of possible choices.
:param msg:A message displayed to the user before the choice list
:type msg: str
:param a_list:The list of choices (list of strings or list of dicts with 'name' & 'desc')
"type a_list: list
:param default:The default option that should be chosen if user doesn't enter a choice
:type default: int
:returns: The list index of the item chosen.
"""
verify_is_a_tty()
options = '\n'.join([' [{}] {}{}'.format(i + 1, x['name'] if isinstance(x, dict) and 'name' in x else x, ' - ' + x['desc'] if isinstance(x, dict) and 'desc' in x else '') for (i, x) in enumerate(a_list)])
allowed_vals = list(range(1, len(a_list) + 1))
while True:
val = _input('{}\n{}\nPlease enter a choice [Default choice({})]: '.format(msg, options, default))
if val == '?' and help_string is not None:
print(help_string)
continue # depends on [control=['if'], data=[]]
if not val:
val = '{}'.format(default) # depends on [control=['if'], data=[]]
try:
ans = int(val)
if ans in allowed_vals:
# array index is 0-based, user input is 1-based
return ans - 1 # depends on [control=['if'], data=['ans']]
raise ValueError # depends on [control=['try'], data=[]]
except ValueError:
logger.warning('Valid values are %s', allowed_vals) # depends on [control=['except'], data=[]] # depends on [control=['while'], data=[]] |
def find_first_stop_codon(nucleotide_sequence):
"""
Given a sequence of codons (expected to have length multiple of three),
return index of first stop codon, or -1 if none is in the sequence.
"""
n_mutant_codons = len(nucleotide_sequence) // 3
for i in range(n_mutant_codons):
codon = nucleotide_sequence[3 * i:3 * i + 3]
if codon in STOP_CODONS:
return i
return -1 | def function[find_first_stop_codon, parameter[nucleotide_sequence]]:
constant[
Given a sequence of codons (expected to have length multiple of three),
return index of first stop codon, or -1 if none is in the sequence.
]
variable[n_mutant_codons] assign[=] binary_operation[call[name[len], parameter[name[nucleotide_sequence]]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[3]]
for taget[name[i]] in starred[call[name[range], parameter[name[n_mutant_codons]]]] begin[:]
variable[codon] assign[=] call[name[nucleotide_sequence]][<ast.Slice object at 0x7da1b04db640>]
if compare[name[codon] in name[STOP_CODONS]] begin[:]
return[name[i]]
return[<ast.UnaryOp object at 0x7da1b04dbc70>] | keyword[def] identifier[find_first_stop_codon] ( identifier[nucleotide_sequence] ):
literal[string]
identifier[n_mutant_codons] = identifier[len] ( identifier[nucleotide_sequence] )// literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[n_mutant_codons] ):
identifier[codon] = identifier[nucleotide_sequence] [ literal[int] * identifier[i] : literal[int] * identifier[i] + literal[int] ]
keyword[if] identifier[codon] keyword[in] identifier[STOP_CODONS] :
keyword[return] identifier[i]
keyword[return] - literal[int] | def find_first_stop_codon(nucleotide_sequence):
"""
Given a sequence of codons (expected to have length multiple of three),
return index of first stop codon, or -1 if none is in the sequence.
"""
n_mutant_codons = len(nucleotide_sequence) // 3
for i in range(n_mutant_codons):
codon = nucleotide_sequence[3 * i:3 * i + 3]
if codon in STOP_CODONS:
return i # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
return -1 |
def reconcile(constraint):
'''
Returns an assignment of type variable names to
types that makes this constraint satisfiable, or a Refutation
'''
if isinstance(constraint.subtype, NamedType):
if isinstance(constraint.supertype, NamedType):
if constraint.subtype.name == constraint.supertype.name:
return {}
else:
return Refutation('Cannot reconcile different atomic types: %s' % constraint)
elif isinstance(constraint.supertype, Variable):
return {constraint.supertype.name: constraint.subtype}
else:
return Refutation('Cannot reconcile atomic type with non-atomic type: %s' % constraint)
elif isinstance(constraint.supertype, NamedType):
if isinstance(constraint.subtype, NamedType):
if constraint.subtype.name == constraint.supertype.name:
return {}
else:
return Refutation('Cannot reconcile different atomic types: %s' % constraint)
elif isinstance(constraint.subtype, Variable):
return {constraint.subtype.name: constraint.supertype}
else:
return Refutation('Cannot reconcile non-atomic type with atomic type: %s' % constraint)
elif isinstance(constraint.supertype, Union):
# Lots of stuff could happen here; unsure if there's research to bring to bear
if constraint.subtype in constraint.supertype.types:
return {}
return Stumper(constraint) | def function[reconcile, parameter[constraint]]:
constant[
Returns an assignment of type variable names to
types that makes this constraint satisfiable, or a Refutation
]
if call[name[isinstance], parameter[name[constraint].subtype, name[NamedType]]] begin[:]
if call[name[isinstance], parameter[name[constraint].supertype, name[NamedType]]] begin[:]
if compare[name[constraint].subtype.name equal[==] name[constraint].supertype.name] begin[:]
return[dictionary[[], []]]
return[call[name[Stumper], parameter[name[constraint]]]] | keyword[def] identifier[reconcile] ( identifier[constraint] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[constraint] . identifier[subtype] , identifier[NamedType] ):
keyword[if] identifier[isinstance] ( identifier[constraint] . identifier[supertype] , identifier[NamedType] ):
keyword[if] identifier[constraint] . identifier[subtype] . identifier[name] == identifier[constraint] . identifier[supertype] . identifier[name] :
keyword[return] {}
keyword[else] :
keyword[return] identifier[Refutation] ( literal[string] % identifier[constraint] )
keyword[elif] identifier[isinstance] ( identifier[constraint] . identifier[supertype] , identifier[Variable] ):
keyword[return] { identifier[constraint] . identifier[supertype] . identifier[name] : identifier[constraint] . identifier[subtype] }
keyword[else] :
keyword[return] identifier[Refutation] ( literal[string] % identifier[constraint] )
keyword[elif] identifier[isinstance] ( identifier[constraint] . identifier[supertype] , identifier[NamedType] ):
keyword[if] identifier[isinstance] ( identifier[constraint] . identifier[subtype] , identifier[NamedType] ):
keyword[if] identifier[constraint] . identifier[subtype] . identifier[name] == identifier[constraint] . identifier[supertype] . identifier[name] :
keyword[return] {}
keyword[else] :
keyword[return] identifier[Refutation] ( literal[string] % identifier[constraint] )
keyword[elif] identifier[isinstance] ( identifier[constraint] . identifier[subtype] , identifier[Variable] ):
keyword[return] { identifier[constraint] . identifier[subtype] . identifier[name] : identifier[constraint] . identifier[supertype] }
keyword[else] :
keyword[return] identifier[Refutation] ( literal[string] % identifier[constraint] )
keyword[elif] identifier[isinstance] ( identifier[constraint] . identifier[supertype] , identifier[Union] ):
keyword[if] identifier[constraint] . identifier[subtype] keyword[in] identifier[constraint] . identifier[supertype] . identifier[types] :
keyword[return] {}
keyword[return] identifier[Stumper] ( identifier[constraint] ) | def reconcile(constraint):
"""
Returns an assignment of type variable names to
types that makes this constraint satisfiable, or a Refutation
"""
if isinstance(constraint.subtype, NamedType):
if isinstance(constraint.supertype, NamedType):
if constraint.subtype.name == constraint.supertype.name:
return {} # depends on [control=['if'], data=[]]
else:
return Refutation('Cannot reconcile different atomic types: %s' % constraint) # depends on [control=['if'], data=[]]
elif isinstance(constraint.supertype, Variable):
return {constraint.supertype.name: constraint.subtype} # depends on [control=['if'], data=[]]
else:
return Refutation('Cannot reconcile atomic type with non-atomic type: %s' % constraint) # depends on [control=['if'], data=[]]
elif isinstance(constraint.supertype, NamedType):
if isinstance(constraint.subtype, NamedType):
if constraint.subtype.name == constraint.supertype.name:
return {} # depends on [control=['if'], data=[]]
else:
return Refutation('Cannot reconcile different atomic types: %s' % constraint) # depends on [control=['if'], data=[]]
elif isinstance(constraint.subtype, Variable):
return {constraint.subtype.name: constraint.supertype} # depends on [control=['if'], data=[]]
else:
return Refutation('Cannot reconcile non-atomic type with atomic type: %s' % constraint) # depends on [control=['if'], data=[]]
elif isinstance(constraint.supertype, Union):
# Lots of stuff could happen here; unsure if there's research to bring to bear
if constraint.subtype in constraint.supertype.types:
return {} # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return Stumper(constraint) |
def _update_event_type(self_, watcher, event, triggered):
"""
Returns an updated Event object with the type field set appropriately.
"""
if triggered:
event_type = 'triggered'
else:
event_type = 'changed' if watcher.onlychanged else 'set'
return Event(what=event.what, name=event.name, obj=event.obj, cls=event.cls,
old=event.old, new=event.new, type=event_type) | def function[_update_event_type, parameter[self_, watcher, event, triggered]]:
constant[
Returns an updated Event object with the type field set appropriately.
]
if name[triggered] begin[:]
variable[event_type] assign[=] constant[triggered]
return[call[name[Event], parameter[]]] | keyword[def] identifier[_update_event_type] ( identifier[self_] , identifier[watcher] , identifier[event] , identifier[triggered] ):
literal[string]
keyword[if] identifier[triggered] :
identifier[event_type] = literal[string]
keyword[else] :
identifier[event_type] = literal[string] keyword[if] identifier[watcher] . identifier[onlychanged] keyword[else] literal[string]
keyword[return] identifier[Event] ( identifier[what] = identifier[event] . identifier[what] , identifier[name] = identifier[event] . identifier[name] , identifier[obj] = identifier[event] . identifier[obj] , identifier[cls] = identifier[event] . identifier[cls] ,
identifier[old] = identifier[event] . identifier[old] , identifier[new] = identifier[event] . identifier[new] , identifier[type] = identifier[event_type] ) | def _update_event_type(self_, watcher, event, triggered):
"""
Returns an updated Event object with the type field set appropriately.
"""
if triggered:
event_type = 'triggered' # depends on [control=['if'], data=[]]
else:
event_type = 'changed' if watcher.onlychanged else 'set'
return Event(what=event.what, name=event.name, obj=event.obj, cls=event.cls, old=event.old, new=event.new, type=event_type) |
def store_integers(items, allow_zero=True):
"""Store integers from the given list in a storage.
This is an example function to show autodoc style.
Return :class:`Storage` instance with integers from the given list.
Examples::
>>> storage = store_integers([1, 'foo', 2, 'bar', 0])
>>> storage.items
[1, 2, 0]
>>> storage = store_integers([1, 'foo', 2, 'bar', 0], allow_zero=False)
>>> storage.items
[1, 2]
:param items:
List of objects of any type, only :class:`int` instances will be
stored.
:param allow_zero:
Boolean -- if ``False``, ``0`` integers will be skipped.
Defaults to ``True``.
"""
ints = [x for x in items if isinstance(x, int) and (allow_zero or x != 0)]
storage = Storage(ints)
return storage | def function[store_integers, parameter[items, allow_zero]]:
constant[Store integers from the given list in a storage.
This is an example function to show autodoc style.
Return :class:`Storage` instance with integers from the given list.
Examples::
>>> storage = store_integers([1, 'foo', 2, 'bar', 0])
>>> storage.items
[1, 2, 0]
>>> storage = store_integers([1, 'foo', 2, 'bar', 0], allow_zero=False)
>>> storage.items
[1, 2]
:param items:
List of objects of any type, only :class:`int` instances will be
stored.
:param allow_zero:
Boolean -- if ``False``, ``0`` integers will be skipped.
Defaults to ``True``.
]
variable[ints] assign[=] <ast.ListComp object at 0x7da1b0a2e200>
variable[storage] assign[=] call[name[Storage], parameter[name[ints]]]
return[name[storage]] | keyword[def] identifier[store_integers] ( identifier[items] , identifier[allow_zero] = keyword[True] ):
literal[string]
identifier[ints] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[items] keyword[if] identifier[isinstance] ( identifier[x] , identifier[int] ) keyword[and] ( identifier[allow_zero] keyword[or] identifier[x] != literal[int] )]
identifier[storage] = identifier[Storage] ( identifier[ints] )
keyword[return] identifier[storage] | def store_integers(items, allow_zero=True):
"""Store integers from the given list in a storage.
This is an example function to show autodoc style.
Return :class:`Storage` instance with integers from the given list.
Examples::
>>> storage = store_integers([1, 'foo', 2, 'bar', 0])
>>> storage.items
[1, 2, 0]
>>> storage = store_integers([1, 'foo', 2, 'bar', 0], allow_zero=False)
>>> storage.items
[1, 2]
:param items:
List of objects of any type, only :class:`int` instances will be
stored.
:param allow_zero:
Boolean -- if ``False``, ``0`` integers will be skipped.
Defaults to ``True``.
"""
ints = [x for x in items if isinstance(x, int) and (allow_zero or x != 0)]
storage = Storage(ints)
return storage |
def change_column_name(
conn,
table,
old_column_name,
new_column_name,
schema=None
):
"""
Changes given `activity` jsonb data column key. This function is useful
when you want to reflect column name changes to activity table.
::
from alembic import op
from postgresql_audit import change_column_name
def upgrade():
op.alter_column(
'my_table',
'my_column',
new_column_name='some_column'
)
change_column_name(op, 'my_table', 'my_column', 'some_column')
:param conn:
An object that is able to execute SQL (either SQLAlchemy Connection,
Engine or Alembic Operations object)
:param table:
The table to run the column name changes against
:param old_column_name:
Name of the column to change
:param new_column_name:
New colum name
:param schema:
Optional name of schema to use.
"""
activity_table = get_activity_table(schema=schema)
query = (
activity_table
.update()
.values(
old_data=jsonb_change_key_name(
activity_table.c.old_data,
old_column_name,
new_column_name
),
changed_data=jsonb_change_key_name(
activity_table.c.changed_data,
old_column_name,
new_column_name
)
)
.where(activity_table.c.table_name == table)
)
return conn.execute(query) | def function[change_column_name, parameter[conn, table, old_column_name, new_column_name, schema]]:
constant[
Changes given `activity` jsonb data column key. This function is useful
when you want to reflect column name changes to activity table.
::
from alembic import op
from postgresql_audit import change_column_name
def upgrade():
op.alter_column(
'my_table',
'my_column',
new_column_name='some_column'
)
change_column_name(op, 'my_table', 'my_column', 'some_column')
:param conn:
An object that is able to execute SQL (either SQLAlchemy Connection,
Engine or Alembic Operations object)
:param table:
The table to run the column name changes against
:param old_column_name:
Name of the column to change
:param new_column_name:
New colum name
:param schema:
Optional name of schema to use.
]
variable[activity_table] assign[=] call[name[get_activity_table], parameter[]]
variable[query] assign[=] call[call[call[name[activity_table].update, parameter[]].values, parameter[]].where, parameter[compare[name[activity_table].c.table_name equal[==] name[table]]]]
return[call[name[conn].execute, parameter[name[query]]]] | keyword[def] identifier[change_column_name] (
identifier[conn] ,
identifier[table] ,
identifier[old_column_name] ,
identifier[new_column_name] ,
identifier[schema] = keyword[None]
):
literal[string]
identifier[activity_table] = identifier[get_activity_table] ( identifier[schema] = identifier[schema] )
identifier[query] =(
identifier[activity_table]
. identifier[update] ()
. identifier[values] (
identifier[old_data] = identifier[jsonb_change_key_name] (
identifier[activity_table] . identifier[c] . identifier[old_data] ,
identifier[old_column_name] ,
identifier[new_column_name]
),
identifier[changed_data] = identifier[jsonb_change_key_name] (
identifier[activity_table] . identifier[c] . identifier[changed_data] ,
identifier[old_column_name] ,
identifier[new_column_name]
)
)
. identifier[where] ( identifier[activity_table] . identifier[c] . identifier[table_name] == identifier[table] )
)
keyword[return] identifier[conn] . identifier[execute] ( identifier[query] ) | def change_column_name(conn, table, old_column_name, new_column_name, schema=None):
"""
Changes given `activity` jsonb data column key. This function is useful
when you want to reflect column name changes to activity table.
::
from alembic import op
from postgresql_audit import change_column_name
def upgrade():
op.alter_column(
'my_table',
'my_column',
new_column_name='some_column'
)
change_column_name(op, 'my_table', 'my_column', 'some_column')
:param conn:
An object that is able to execute SQL (either SQLAlchemy Connection,
Engine or Alembic Operations object)
:param table:
The table to run the column name changes against
:param old_column_name:
Name of the column to change
:param new_column_name:
New colum name
:param schema:
Optional name of schema to use.
"""
activity_table = get_activity_table(schema=schema)
query = activity_table.update().values(old_data=jsonb_change_key_name(activity_table.c.old_data, old_column_name, new_column_name), changed_data=jsonb_change_key_name(activity_table.c.changed_data, old_column_name, new_column_name)).where(activity_table.c.table_name == table)
return conn.execute(query) |
def dist_abs(self, src, tar, cost=(0, 1, 2), local=False):
"""Return the Editex distance between two strings.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
cost : tuple
A 3-tuple representing the cost of the four possible edits: match,
same-group, and mismatch respectively (by default: (0, 1, 2))
local : bool
If True, the local variant of Editex is used
Returns
-------
int
Editex distance
Examples
--------
>>> cmp = Editex()
>>> cmp.dist_abs('cat', 'hat')
2
>>> cmp.dist_abs('Niall', 'Neil')
2
>>> cmp.dist_abs('aluminum', 'Catalan')
12
>>> cmp.dist_abs('ATCG', 'TAGC')
6
"""
match_cost, group_cost, mismatch_cost = cost
def r_cost(ch1, ch2):
"""Return r(a,b) according to Zobel & Dart's definition.
Parameters
----------
ch1 : str
The first character to compare
ch2 : str
The second character to compare
Returns
-------
int
r(a,b) according to Zobel & Dart's definition
"""
if ch1 == ch2:
return match_cost
if ch1 in self._all_letters and ch2 in self._all_letters:
for group in self._letter_groups:
if ch1 in group and ch2 in group:
return group_cost
return mismatch_cost
def d_cost(ch1, ch2):
"""Return d(a,b) according to Zobel & Dart's definition.
Parameters
----------
ch1 : str
The first character to compare
ch2 : str
The second character to compare
Returns
-------
int
d(a,b) according to Zobel & Dart's definition
"""
if ch1 != ch2 and (ch1 == 'H' or ch1 == 'W'):
return group_cost
return r_cost(ch1, ch2)
# convert both src & tar to NFKD normalized unicode
src = unicode_normalize('NFKD', text_type(src.upper()))
tar = unicode_normalize('NFKD', text_type(tar.upper()))
# convert ß to SS (for Python2)
src = src.replace('ß', 'SS')
tar = tar.replace('ß', 'SS')
if src == tar:
return 0.0
if not src:
return len(tar) * mismatch_cost
if not tar:
return len(src) * mismatch_cost
d_mat = np_zeros((len(src) + 1, len(tar) + 1), dtype=np_int)
lens = len(src)
lent = len(tar)
src = ' ' + src
tar = ' ' + tar
if not local:
for i in range(1, lens + 1):
d_mat[i, 0] = d_mat[i - 1, 0] + d_cost(src[i - 1], src[i])
for j in range(1, lent + 1):
d_mat[0, j] = d_mat[0, j - 1] + d_cost(tar[j - 1], tar[j])
for i in range(1, lens + 1):
for j in range(1, lent + 1):
d_mat[i, j] = min(
d_mat[i - 1, j] + d_cost(src[i - 1], src[i]),
d_mat[i, j - 1] + d_cost(tar[j - 1], tar[j]),
d_mat[i - 1, j - 1] + r_cost(src[i], tar[j]),
)
return d_mat[lens, lent] | def function[dist_abs, parameter[self, src, tar, cost, local]]:
constant[Return the Editex distance between two strings.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
cost : tuple
A 3-tuple representing the cost of the four possible edits: match,
same-group, and mismatch respectively (by default: (0, 1, 2))
local : bool
If True, the local variant of Editex is used
Returns
-------
int
Editex distance
Examples
--------
>>> cmp = Editex()
>>> cmp.dist_abs('cat', 'hat')
2
>>> cmp.dist_abs('Niall', 'Neil')
2
>>> cmp.dist_abs('aluminum', 'Catalan')
12
>>> cmp.dist_abs('ATCG', 'TAGC')
6
]
<ast.Tuple object at 0x7da1b0193c70> assign[=] name[cost]
def function[r_cost, parameter[ch1, ch2]]:
constant[Return r(a,b) according to Zobel & Dart's definition.
Parameters
----------
ch1 : str
The first character to compare
ch2 : str
The second character to compare
Returns
-------
int
r(a,b) according to Zobel & Dart's definition
]
if compare[name[ch1] equal[==] name[ch2]] begin[:]
return[name[match_cost]]
if <ast.BoolOp object at 0x7da1b01938b0> begin[:]
for taget[name[group]] in starred[name[self]._letter_groups] begin[:]
if <ast.BoolOp object at 0x7da1b0193610> begin[:]
return[name[group_cost]]
return[name[mismatch_cost]]
def function[d_cost, parameter[ch1, ch2]]:
constant[Return d(a,b) according to Zobel & Dart's definition.
Parameters
----------
ch1 : str
The first character to compare
ch2 : str
The second character to compare
Returns
-------
int
d(a,b) according to Zobel & Dart's definition
]
if <ast.BoolOp object at 0x7da1b0193220> begin[:]
return[name[group_cost]]
return[call[name[r_cost], parameter[name[ch1], name[ch2]]]]
variable[src] assign[=] call[name[unicode_normalize], parameter[constant[NFKD], call[name[text_type], parameter[call[name[src].upper, parameter[]]]]]]
variable[tar] assign[=] call[name[unicode_normalize], parameter[constant[NFKD], call[name[text_type], parameter[call[name[tar].upper, parameter[]]]]]]
variable[src] assign[=] call[name[src].replace, parameter[constant[ß], constant[SS]]]
variable[tar] assign[=] call[name[tar].replace, parameter[constant[ß], constant[SS]]]
if compare[name[src] equal[==] name[tar]] begin[:]
return[constant[0.0]]
if <ast.UnaryOp object at 0x7da1b0191b10> begin[:]
return[binary_operation[call[name[len], parameter[name[tar]]] * name[mismatch_cost]]]
if <ast.UnaryOp object at 0x7da1b0191930> begin[:]
return[binary_operation[call[name[len], parameter[name[src]]] * name[mismatch_cost]]]
variable[d_mat] assign[=] call[name[np_zeros], parameter[tuple[[<ast.BinOp object at 0x7da1b0191690>, <ast.BinOp object at 0x7da1b01915a0>]]]]
variable[lens] assign[=] call[name[len], parameter[name[src]]]
variable[lent] assign[=] call[name[len], parameter[name[tar]]]
variable[src] assign[=] binary_operation[constant[ ] + name[src]]
variable[tar] assign[=] binary_operation[constant[ ] + name[tar]]
if <ast.UnaryOp object at 0x7da1b0191150> begin[:]
for taget[name[i]] in starred[call[name[range], parameter[constant[1], binary_operation[name[lens] + constant[1]]]]] begin[:]
call[name[d_mat]][tuple[[<ast.Name object at 0x7da1b0190100>, <ast.Constant object at 0x7da1b0190130>]]] assign[=] binary_operation[call[name[d_mat]][tuple[[<ast.BinOp object at 0x7da1b0190f10>, <ast.Constant object at 0x7da1b0190e80>]]] + call[name[d_cost], parameter[call[name[src]][binary_operation[name[i] - constant[1]]], call[name[src]][name[i]]]]]
for taget[name[j]] in starred[call[name[range], parameter[constant[1], binary_operation[name[lent] + constant[1]]]]] begin[:]
call[name[d_mat]][tuple[[<ast.Constant object at 0x7da1b0190a30>, <ast.Name object at 0x7da1b0190a00>]]] assign[=] binary_operation[call[name[d_mat]][tuple[[<ast.Constant object at 0x7da1b0190910>, <ast.BinOp object at 0x7da1b01908e0>]]] + call[name[d_cost], parameter[call[name[tar]][binary_operation[name[j] - constant[1]]], call[name[tar]][name[j]]]]]
for taget[name[i]] in starred[call[name[range], parameter[constant[1], binary_operation[name[lens] + constant[1]]]]] begin[:]
for taget[name[j]] in starred[call[name[range], parameter[constant[1], binary_operation[name[lent] + constant[1]]]]] begin[:]
call[name[d_mat]][tuple[[<ast.Name object at 0x7da1b01902b0>, <ast.Name object at 0x7da1b0190280>]]] assign[=] call[name[min], parameter[binary_operation[call[name[d_mat]][tuple[[<ast.BinOp object at 0x7da1b006a8c0>, <ast.Name object at 0x7da1b006b790>]]] + call[name[d_cost], parameter[call[name[src]][binary_operation[name[i] - constant[1]]], call[name[src]][name[i]]]]], binary_operation[call[name[d_mat]][tuple[[<ast.Name object at 0x7da1b006a680>, <ast.BinOp object at 0x7da1b006a830>]]] + call[name[d_cost], parameter[call[name[tar]][binary_operation[name[j] - constant[1]]], call[name[tar]][name[j]]]]], binary_operation[call[name[d_mat]][tuple[[<ast.BinOp object at 0x7da1b006ab60>, <ast.BinOp object at 0x7da1b00692a0>]]] + call[name[r_cost], parameter[call[name[src]][name[i]], call[name[tar]][name[j]]]]]]]
return[call[name[d_mat]][tuple[[<ast.Name object at 0x7da1b0069660>, <ast.Name object at 0x7da1b006ae30>]]]] | keyword[def] identifier[dist_abs] ( identifier[self] , identifier[src] , identifier[tar] , identifier[cost] =( literal[int] , literal[int] , literal[int] ), identifier[local] = keyword[False] ):
literal[string]
identifier[match_cost] , identifier[group_cost] , identifier[mismatch_cost] = identifier[cost]
keyword[def] identifier[r_cost] ( identifier[ch1] , identifier[ch2] ):
literal[string]
keyword[if] identifier[ch1] == identifier[ch2] :
keyword[return] identifier[match_cost]
keyword[if] identifier[ch1] keyword[in] identifier[self] . identifier[_all_letters] keyword[and] identifier[ch2] keyword[in] identifier[self] . identifier[_all_letters] :
keyword[for] identifier[group] keyword[in] identifier[self] . identifier[_letter_groups] :
keyword[if] identifier[ch1] keyword[in] identifier[group] keyword[and] identifier[ch2] keyword[in] identifier[group] :
keyword[return] identifier[group_cost]
keyword[return] identifier[mismatch_cost]
keyword[def] identifier[d_cost] ( identifier[ch1] , identifier[ch2] ):
literal[string]
keyword[if] identifier[ch1] != identifier[ch2] keyword[and] ( identifier[ch1] == literal[string] keyword[or] identifier[ch1] == literal[string] ):
keyword[return] identifier[group_cost]
keyword[return] identifier[r_cost] ( identifier[ch1] , identifier[ch2] )
identifier[src] = identifier[unicode_normalize] ( literal[string] , identifier[text_type] ( identifier[src] . identifier[upper] ()))
identifier[tar] = identifier[unicode_normalize] ( literal[string] , identifier[text_type] ( identifier[tar] . identifier[upper] ()))
identifier[src] = identifier[src] . identifier[replace] ( literal[string] , literal[string] )
identifier[tar] = identifier[tar] . identifier[replace] ( literal[string] , literal[string] )
keyword[if] identifier[src] == identifier[tar] :
keyword[return] literal[int]
keyword[if] keyword[not] identifier[src] :
keyword[return] identifier[len] ( identifier[tar] )* identifier[mismatch_cost]
keyword[if] keyword[not] identifier[tar] :
keyword[return] identifier[len] ( identifier[src] )* identifier[mismatch_cost]
identifier[d_mat] = identifier[np_zeros] (( identifier[len] ( identifier[src] )+ literal[int] , identifier[len] ( identifier[tar] )+ literal[int] ), identifier[dtype] = identifier[np_int] )
identifier[lens] = identifier[len] ( identifier[src] )
identifier[lent] = identifier[len] ( identifier[tar] )
identifier[src] = literal[string] + identifier[src]
identifier[tar] = literal[string] + identifier[tar]
keyword[if] keyword[not] identifier[local] :
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[lens] + literal[int] ):
identifier[d_mat] [ identifier[i] , literal[int] ]= identifier[d_mat] [ identifier[i] - literal[int] , literal[int] ]+ identifier[d_cost] ( identifier[src] [ identifier[i] - literal[int] ], identifier[src] [ identifier[i] ])
keyword[for] identifier[j] keyword[in] identifier[range] ( literal[int] , identifier[lent] + literal[int] ):
identifier[d_mat] [ literal[int] , identifier[j] ]= identifier[d_mat] [ literal[int] , identifier[j] - literal[int] ]+ identifier[d_cost] ( identifier[tar] [ identifier[j] - literal[int] ], identifier[tar] [ identifier[j] ])
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[lens] + literal[int] ):
keyword[for] identifier[j] keyword[in] identifier[range] ( literal[int] , identifier[lent] + literal[int] ):
identifier[d_mat] [ identifier[i] , identifier[j] ]= identifier[min] (
identifier[d_mat] [ identifier[i] - literal[int] , identifier[j] ]+ identifier[d_cost] ( identifier[src] [ identifier[i] - literal[int] ], identifier[src] [ identifier[i] ]),
identifier[d_mat] [ identifier[i] , identifier[j] - literal[int] ]+ identifier[d_cost] ( identifier[tar] [ identifier[j] - literal[int] ], identifier[tar] [ identifier[j] ]),
identifier[d_mat] [ identifier[i] - literal[int] , identifier[j] - literal[int] ]+ identifier[r_cost] ( identifier[src] [ identifier[i] ], identifier[tar] [ identifier[j] ]),
)
keyword[return] identifier[d_mat] [ identifier[lens] , identifier[lent] ] | def dist_abs(self, src, tar, cost=(0, 1, 2), local=False):
"""Return the Editex distance between two strings.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
cost : tuple
A 3-tuple representing the cost of the four possible edits: match,
same-group, and mismatch respectively (by default: (0, 1, 2))
local : bool
If True, the local variant of Editex is used
Returns
-------
int
Editex distance
Examples
--------
>>> cmp = Editex()
>>> cmp.dist_abs('cat', 'hat')
2
>>> cmp.dist_abs('Niall', 'Neil')
2
>>> cmp.dist_abs('aluminum', 'Catalan')
12
>>> cmp.dist_abs('ATCG', 'TAGC')
6
"""
(match_cost, group_cost, mismatch_cost) = cost
def r_cost(ch1, ch2):
"""Return r(a,b) according to Zobel & Dart's definition.
Parameters
----------
ch1 : str
The first character to compare
ch2 : str
The second character to compare
Returns
-------
int
r(a,b) according to Zobel & Dart's definition
"""
if ch1 == ch2:
return match_cost # depends on [control=['if'], data=[]]
if ch1 in self._all_letters and ch2 in self._all_letters:
for group in self._letter_groups:
if ch1 in group and ch2 in group:
return group_cost # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['group']] # depends on [control=['if'], data=[]]
return mismatch_cost
def d_cost(ch1, ch2):
"""Return d(a,b) according to Zobel & Dart's definition.
Parameters
----------
ch1 : str
The first character to compare
ch2 : str
The second character to compare
Returns
-------
int
d(a,b) according to Zobel & Dart's definition
"""
if ch1 != ch2 and (ch1 == 'H' or ch1 == 'W'):
return group_cost # depends on [control=['if'], data=[]]
return r_cost(ch1, ch2)
# convert both src & tar to NFKD normalized unicode
src = unicode_normalize('NFKD', text_type(src.upper()))
tar = unicode_normalize('NFKD', text_type(tar.upper()))
# convert ß to SS (for Python2)
src = src.replace('ß', 'SS')
tar = tar.replace('ß', 'SS')
if src == tar:
return 0.0 # depends on [control=['if'], data=[]]
if not src:
return len(tar) * mismatch_cost # depends on [control=['if'], data=[]]
if not tar:
return len(src) * mismatch_cost # depends on [control=['if'], data=[]]
d_mat = np_zeros((len(src) + 1, len(tar) + 1), dtype=np_int)
lens = len(src)
lent = len(tar)
src = ' ' + src
tar = ' ' + tar
if not local:
for i in range(1, lens + 1):
d_mat[i, 0] = d_mat[i - 1, 0] + d_cost(src[i - 1], src[i]) # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]]
for j in range(1, lent + 1):
d_mat[0, j] = d_mat[0, j - 1] + d_cost(tar[j - 1], tar[j]) # depends on [control=['for'], data=['j']]
for i in range(1, lens + 1):
for j in range(1, lent + 1):
d_mat[i, j] = min(d_mat[i - 1, j] + d_cost(src[i - 1], src[i]), d_mat[i, j - 1] + d_cost(tar[j - 1], tar[j]), d_mat[i - 1, j - 1] + r_cost(src[i], tar[j])) # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=['i']]
return d_mat[lens, lent] |
def set_sampling_strategies(self, filter, strategy_and_parms):
"""Set sampling strategies for filtered sensors - these sensors have to exsist"""
result_list = yield self.list_sensors(filter=filter)
sensor_dict = {}
for result in result_list:
sensor_name = result.object.normalised_name
resource_name = result.object.parent_name
if resource_name not in sensor_dict:
sensor_dict[resource_name] = {}
try:
resource_obj = self.children[resource_name]
yield resource_obj.set_sampling_strategy(sensor_name, strategy_and_parms)
sensor_dict[resource_name][sensor_name] = strategy_and_parms
self._logger.debug(
'Set sampling strategy on resource %s for %s'
% (resource_name, sensor_name))
except Exception as exc:
self._logger.error(
'Cannot set sampling strategy on resource %s for %s (%s)'
% (resource_name, sensor_name, exc))
sensor_dict[resource_name][sensor_name] = None
raise tornado.gen.Return(sensor_dict) | def function[set_sampling_strategies, parameter[self, filter, strategy_and_parms]]:
constant[Set sampling strategies for filtered sensors - these sensors have to exsist]
variable[result_list] assign[=] <ast.Yield object at 0x7da1b054a110>
variable[sensor_dict] assign[=] dictionary[[], []]
for taget[name[result]] in starred[name[result_list]] begin[:]
variable[sensor_name] assign[=] name[result].object.normalised_name
variable[resource_name] assign[=] name[result].object.parent_name
if compare[name[resource_name] <ast.NotIn object at 0x7da2590d7190> name[sensor_dict]] begin[:]
call[name[sensor_dict]][name[resource_name]] assign[=] dictionary[[], []]
<ast.Try object at 0x7da1b0549540>
<ast.Raise object at 0x7da1b0549ff0> | keyword[def] identifier[set_sampling_strategies] ( identifier[self] , identifier[filter] , identifier[strategy_and_parms] ):
literal[string]
identifier[result_list] = keyword[yield] identifier[self] . identifier[list_sensors] ( identifier[filter] = identifier[filter] )
identifier[sensor_dict] ={}
keyword[for] identifier[result] keyword[in] identifier[result_list] :
identifier[sensor_name] = identifier[result] . identifier[object] . identifier[normalised_name]
identifier[resource_name] = identifier[result] . identifier[object] . identifier[parent_name]
keyword[if] identifier[resource_name] keyword[not] keyword[in] identifier[sensor_dict] :
identifier[sensor_dict] [ identifier[resource_name] ]={}
keyword[try] :
identifier[resource_obj] = identifier[self] . identifier[children] [ identifier[resource_name] ]
keyword[yield] identifier[resource_obj] . identifier[set_sampling_strategy] ( identifier[sensor_name] , identifier[strategy_and_parms] )
identifier[sensor_dict] [ identifier[resource_name] ][ identifier[sensor_name] ]= identifier[strategy_and_parms]
identifier[self] . identifier[_logger] . identifier[debug] (
literal[string]
%( identifier[resource_name] , identifier[sensor_name] ))
keyword[except] identifier[Exception] keyword[as] identifier[exc] :
identifier[self] . identifier[_logger] . identifier[error] (
literal[string]
%( identifier[resource_name] , identifier[sensor_name] , identifier[exc] ))
identifier[sensor_dict] [ identifier[resource_name] ][ identifier[sensor_name] ]= keyword[None]
keyword[raise] identifier[tornado] . identifier[gen] . identifier[Return] ( identifier[sensor_dict] ) | def set_sampling_strategies(self, filter, strategy_and_parms):
"""Set sampling strategies for filtered sensors - these sensors have to exsist"""
result_list = (yield self.list_sensors(filter=filter))
sensor_dict = {}
for result in result_list:
sensor_name = result.object.normalised_name
resource_name = result.object.parent_name
if resource_name not in sensor_dict:
sensor_dict[resource_name] = {} # depends on [control=['if'], data=['resource_name', 'sensor_dict']]
try:
resource_obj = self.children[resource_name]
yield resource_obj.set_sampling_strategy(sensor_name, strategy_and_parms)
sensor_dict[resource_name][sensor_name] = strategy_and_parms
self._logger.debug('Set sampling strategy on resource %s for %s' % (resource_name, sensor_name)) # depends on [control=['try'], data=[]]
except Exception as exc:
self._logger.error('Cannot set sampling strategy on resource %s for %s (%s)' % (resource_name, sensor_name, exc))
sensor_dict[resource_name][sensor_name] = None # depends on [control=['except'], data=['exc']] # depends on [control=['for'], data=['result']]
raise tornado.gen.Return(sensor_dict) |
def html_print_file(self, catalog, destination):
"""
Prints text_file in html.
:param catalog: text file you wish to pretty print
:param destination: where you wish to save the HTML data
:return: output in html_file.html.
"""
with open(destination, mode='r+', encoding='utf8') as t_f:
for text in catalog:
pnum = catalog[text]['pnum']
edition = catalog[text]['edition']
metadata = '<br>\n'.join(catalog[text]['metadata'])
transliteration = '<br>\n'.join(catalog[text]['transliteration'])
normalization = '<br>\n'.join(catalog[text]['normalization'])
translation = '<br>\n'.join(catalog[text]['translation'])
self.html_file = """<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>{edition}</title>
</head>
<body><table cellpadding="10"; border="1">
<tr><th>
<h2>{edition}<br>{pnum}</h2>
</th><th>
<h3>transliteration</h3>
</th><th>
<h3>normalization</h3>
</th><th>
<h3>translation</h3>
</tr><tr><td>
{metadata}</td><td>
<p>{trans}
</td><td>
<p>{norm}
</td><td>
<font size='2'>
{translation}
</font></td></tr>
</table>
<br>
</body>
</html>""".format(
pnum=pnum, edition=edition, metadata=metadata,
trans=transliteration, norm=normalization,
translation=translation)
t_f.write(self.html_file) | def function[html_print_file, parameter[self, catalog, destination]]:
constant[
Prints text_file in html.
:param catalog: text file you wish to pretty print
:param destination: where you wish to save the HTML data
:return: output in html_file.html.
]
with call[name[open], parameter[name[destination]]] begin[:]
for taget[name[text]] in starred[name[catalog]] begin[:]
variable[pnum] assign[=] call[call[name[catalog]][name[text]]][constant[pnum]]
variable[edition] assign[=] call[call[name[catalog]][name[text]]][constant[edition]]
variable[metadata] assign[=] call[constant[<br>
].join, parameter[call[call[name[catalog]][name[text]]][constant[metadata]]]]
variable[transliteration] assign[=] call[constant[<br>
].join, parameter[call[call[name[catalog]][name[text]]][constant[transliteration]]]]
variable[normalization] assign[=] call[constant[<br>
].join, parameter[call[call[name[catalog]][name[text]]][constant[normalization]]]]
variable[translation] assign[=] call[constant[<br>
].join, parameter[call[call[name[catalog]][name[text]]][constant[translation]]]]
name[self].html_file assign[=] call[constant[<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>{edition}</title>
</head>
<body><table cellpadding="10"; border="1">
<tr><th>
<h2>{edition}<br>{pnum}</h2>
</th><th>
<h3>transliteration</h3>
</th><th>
<h3>normalization</h3>
</th><th>
<h3>translation</h3>
</tr><tr><td>
{metadata}</td><td>
<p>{trans}
</td><td>
<p>{norm}
</td><td>
<font size='2'>
{translation}
</font></td></tr>
</table>
<br>
</body>
</html>].format, parameter[]]
call[name[t_f].write, parameter[name[self].html_file]] | keyword[def] identifier[html_print_file] ( identifier[self] , identifier[catalog] , identifier[destination] ):
literal[string]
keyword[with] identifier[open] ( identifier[destination] , identifier[mode] = literal[string] , identifier[encoding] = literal[string] ) keyword[as] identifier[t_f] :
keyword[for] identifier[text] keyword[in] identifier[catalog] :
identifier[pnum] = identifier[catalog] [ identifier[text] ][ literal[string] ]
identifier[edition] = identifier[catalog] [ identifier[text] ][ literal[string] ]
identifier[metadata] = literal[string] . identifier[join] ( identifier[catalog] [ identifier[text] ][ literal[string] ])
identifier[transliteration] = literal[string] . identifier[join] ( identifier[catalog] [ identifier[text] ][ literal[string] ])
identifier[normalization] = literal[string] . identifier[join] ( identifier[catalog] [ identifier[text] ][ literal[string] ])
identifier[translation] = literal[string] . identifier[join] ( identifier[catalog] [ identifier[text] ][ literal[string] ])
identifier[self] . identifier[html_file] = literal[string] . identifier[format] (
identifier[pnum] = identifier[pnum] , identifier[edition] = identifier[edition] , identifier[metadata] = identifier[metadata] ,
identifier[trans] = identifier[transliteration] , identifier[norm] = identifier[normalization] ,
identifier[translation] = identifier[translation] )
identifier[t_f] . identifier[write] ( identifier[self] . identifier[html_file] ) | def html_print_file(self, catalog, destination):
"""
Prints text_file in html.
:param catalog: text file you wish to pretty print
:param destination: where you wish to save the HTML data
:return: output in html_file.html.
"""
with open(destination, mode='r+', encoding='utf8') as t_f:
for text in catalog:
pnum = catalog[text]['pnum']
edition = catalog[text]['edition']
metadata = '<br>\n'.join(catalog[text]['metadata'])
transliteration = '<br>\n'.join(catalog[text]['transliteration'])
normalization = '<br>\n'.join(catalog[text]['normalization'])
translation = '<br>\n'.join(catalog[text]['translation'])
self.html_file = '<!DOCTYPE html>\n<html lang="en">\n<head>\n<meta charset="UTF-8">\n<title>{edition}</title>\n</head>\n<body><table cellpadding="10"; border="1">\n<tr><th>\n<h2>{edition}<br>{pnum}</h2>\n</th><th>\n<h3>transliteration</h3>\n</th><th>\n<h3>normalization</h3>\n</th><th>\n<h3>translation</h3>\n</tr><tr><td>\n{metadata}</td><td>\n<p>{trans}\n</td><td>\n<p>{norm}\n</td><td>\n<font size=\'2\'>\n{translation}\n</font></td></tr>\n\n</table>\n<br>\n</body>\n</html>'.format(pnum=pnum, edition=edition, metadata=metadata, trans=transliteration, norm=normalization, translation=translation)
t_f.write(self.html_file) # depends on [control=['for'], data=['text']] # depends on [control=['with'], data=['t_f']] |
def send(self, test=None):
'''
Send the email through the Postmark system.
Pass test=True to just print out the resulting
JSON message being sent to Postmark
'''
self._check_values()
# Set up message dictionary
json_message = self.to_json_message()
# if (self.__html_body and not self.__text_body) and self.__multipart:
# # TODO: Set up regex to strip html
# pass
# If test is not specified, attempt to read the Django setting
if test is None:
try:
from django.conf import settings as django_settings
test = getattr(django_settings, "POSTMARK_TEST_MODE", None)
except ImportError:
pass
# If this is a test, just print the message
if test:
print('JSON message is:\n%s' % json.dumps(json_message, cls=PMJSONEncoder))
return
if self.__template_id:
endpoint_url = __POSTMARK_URL__ + 'email/withTemplate/'
else:
endpoint_url = __POSTMARK_URL__ + 'email'
# Set up the url Request
req = Request(
endpoint_url,
json.dumps(json_message, cls=PMJSONEncoder).encode('utf8'),
{
'Accept': 'application/json',
'Content-Type': 'application/json',
'X-Postmark-Server-Token': self.__api_key,
'User-agent': self.__user_agent
}
)
# Attempt send
try:
# print 'sending request to postmark: %s' % json_message
result = urlopen(req)
jsontxt = result.read().decode('utf8')
result.close()
if result.code == 200:
self.message_id = json.loads(jsontxt).get('MessageID', None)
return True
else:
raise PMMailSendException('Return code %d: %s' % (result.code, result.msg))
except HTTPError as err:
if err.code == 401:
raise PMMailUnauthorizedException('Sending Unauthorized - incorrect API key.', err)
elif err.code == 422:
try:
jsontxt = err.read().decode('utf8')
jsonobj = json.loads(jsontxt)
desc = jsonobj['Message']
error_code = jsonobj['ErrorCode']
except KeyError:
raise PMMailUnprocessableEntityException('Unprocessable Entity: Description not given')
if error_code == 406:
raise PMMailInactiveRecipientException('You tried to send email to a recipient that has been marked as inactive.')
raise PMMailUnprocessableEntityException('Unprocessable Entity: %s' % desc)
elif err.code == 500:
raise PMMailServerErrorException('Internal server error at Postmark. Admins have been alerted.', err)
except URLError as err:
if hasattr(err, 'reason'):
raise PMMailURLException('URLError: Failed to reach the server: %s (See "inner_exception" for details)' % err.reason, err)
elif hasattr(err, 'code'):
raise PMMailURLException('URLError: %d: The server couldn\'t fufill the request. (See "inner_exception" for details)' % err.code, err)
else:
raise PMMailURLException('URLError: The server couldn\'t fufill the request. (See "inner_exception" for details)', err) | def function[send, parameter[self, test]]:
constant[
Send the email through the Postmark system.
Pass test=True to just print out the resulting
JSON message being sent to Postmark
]
call[name[self]._check_values, parameter[]]
variable[json_message] assign[=] call[name[self].to_json_message, parameter[]]
if compare[name[test] is constant[None]] begin[:]
<ast.Try object at 0x7da1b06262f0>
if name[test] begin[:]
call[name[print], parameter[binary_operation[constant[JSON message is:
%s] <ast.Mod object at 0x7da2590d6920> call[name[json].dumps, parameter[name[json_message]]]]]]
return[None]
if name[self].__template_id begin[:]
variable[endpoint_url] assign[=] binary_operation[name[__POSTMARK_URL__] + constant[email/withTemplate/]]
variable[req] assign[=] call[name[Request], parameter[name[endpoint_url], call[call[name[json].dumps, parameter[name[json_message]]].encode, parameter[constant[utf8]]], dictionary[[<ast.Constant object at 0x7da1b06262c0>, <ast.Constant object at 0x7da1b0625840>, <ast.Constant object at 0x7da1b0626f20>, <ast.Constant object at 0x7da1b0625a80>], [<ast.Constant object at 0x7da1b06263e0>, <ast.Constant object at 0x7da1b0625900>, <ast.Attribute object at 0x7da1b0626350>, <ast.Attribute object at 0x7da1b0625ff0>]]]]
<ast.Try object at 0x7da1b0626c50> | keyword[def] identifier[send] ( identifier[self] , identifier[test] = keyword[None] ):
literal[string]
identifier[self] . identifier[_check_values] ()
identifier[json_message] = identifier[self] . identifier[to_json_message] ()
keyword[if] identifier[test] keyword[is] keyword[None] :
keyword[try] :
keyword[from] identifier[django] . identifier[conf] keyword[import] identifier[settings] keyword[as] identifier[django_settings]
identifier[test] = identifier[getattr] ( identifier[django_settings] , literal[string] , keyword[None] )
keyword[except] identifier[ImportError] :
keyword[pass]
keyword[if] identifier[test] :
identifier[print] ( literal[string] % identifier[json] . identifier[dumps] ( identifier[json_message] , identifier[cls] = identifier[PMJSONEncoder] ))
keyword[return]
keyword[if] identifier[self] . identifier[__template_id] :
identifier[endpoint_url] = identifier[__POSTMARK_URL__] + literal[string]
keyword[else] :
identifier[endpoint_url] = identifier[__POSTMARK_URL__] + literal[string]
identifier[req] = identifier[Request] (
identifier[endpoint_url] ,
identifier[json] . identifier[dumps] ( identifier[json_message] , identifier[cls] = identifier[PMJSONEncoder] ). identifier[encode] ( literal[string] ),
{
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : identifier[self] . identifier[__api_key] ,
literal[string] : identifier[self] . identifier[__user_agent]
}
)
keyword[try] :
identifier[result] = identifier[urlopen] ( identifier[req] )
identifier[jsontxt] = identifier[result] . identifier[read] (). identifier[decode] ( literal[string] )
identifier[result] . identifier[close] ()
keyword[if] identifier[result] . identifier[code] == literal[int] :
identifier[self] . identifier[message_id] = identifier[json] . identifier[loads] ( identifier[jsontxt] ). identifier[get] ( literal[string] , keyword[None] )
keyword[return] keyword[True]
keyword[else] :
keyword[raise] identifier[PMMailSendException] ( literal[string] %( identifier[result] . identifier[code] , identifier[result] . identifier[msg] ))
keyword[except] identifier[HTTPError] keyword[as] identifier[err] :
keyword[if] identifier[err] . identifier[code] == literal[int] :
keyword[raise] identifier[PMMailUnauthorizedException] ( literal[string] , identifier[err] )
keyword[elif] identifier[err] . identifier[code] == literal[int] :
keyword[try] :
identifier[jsontxt] = identifier[err] . identifier[read] (). identifier[decode] ( literal[string] )
identifier[jsonobj] = identifier[json] . identifier[loads] ( identifier[jsontxt] )
identifier[desc] = identifier[jsonobj] [ literal[string] ]
identifier[error_code] = identifier[jsonobj] [ literal[string] ]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[PMMailUnprocessableEntityException] ( literal[string] )
keyword[if] identifier[error_code] == literal[int] :
keyword[raise] identifier[PMMailInactiveRecipientException] ( literal[string] )
keyword[raise] identifier[PMMailUnprocessableEntityException] ( literal[string] % identifier[desc] )
keyword[elif] identifier[err] . identifier[code] == literal[int] :
keyword[raise] identifier[PMMailServerErrorException] ( literal[string] , identifier[err] )
keyword[except] identifier[URLError] keyword[as] identifier[err] :
keyword[if] identifier[hasattr] ( identifier[err] , literal[string] ):
keyword[raise] identifier[PMMailURLException] ( literal[string] % identifier[err] . identifier[reason] , identifier[err] )
keyword[elif] identifier[hasattr] ( identifier[err] , literal[string] ):
keyword[raise] identifier[PMMailURLException] ( literal[string] % identifier[err] . identifier[code] , identifier[err] )
keyword[else] :
keyword[raise] identifier[PMMailURLException] ( literal[string] , identifier[err] ) | def send(self, test=None):
"""
Send the email through the Postmark system.
Pass test=True to just print out the resulting
JSON message being sent to Postmark
"""
self._check_values()
# Set up message dictionary
json_message = self.to_json_message()
# if (self.__html_body and not self.__text_body) and self.__multipart:
# # TODO: Set up regex to strip html
# pass
# If test is not specified, attempt to read the Django setting
if test is None:
try:
from django.conf import settings as django_settings
test = getattr(django_settings, 'POSTMARK_TEST_MODE', None) # depends on [control=['try'], data=[]]
except ImportError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['test']]
# If this is a test, just print the message
if test:
print('JSON message is:\n%s' % json.dumps(json_message, cls=PMJSONEncoder))
return # depends on [control=['if'], data=[]]
if self.__template_id:
endpoint_url = __POSTMARK_URL__ + 'email/withTemplate/' # depends on [control=['if'], data=[]]
else:
endpoint_url = __POSTMARK_URL__ + 'email'
# Set up the url Request
req = Request(endpoint_url, json.dumps(json_message, cls=PMJSONEncoder).encode('utf8'), {'Accept': 'application/json', 'Content-Type': 'application/json', 'X-Postmark-Server-Token': self.__api_key, 'User-agent': self.__user_agent})
# Attempt send
try:
# print 'sending request to postmark: %s' % json_message
result = urlopen(req)
jsontxt = result.read().decode('utf8')
result.close()
if result.code == 200:
self.message_id = json.loads(jsontxt).get('MessageID', None)
return True # depends on [control=['if'], data=[]]
else:
raise PMMailSendException('Return code %d: %s' % (result.code, result.msg)) # depends on [control=['try'], data=[]]
except HTTPError as err:
if err.code == 401:
raise PMMailUnauthorizedException('Sending Unauthorized - incorrect API key.', err) # depends on [control=['if'], data=[]]
elif err.code == 422:
try:
jsontxt = err.read().decode('utf8')
jsonobj = json.loads(jsontxt)
desc = jsonobj['Message']
error_code = jsonobj['ErrorCode'] # depends on [control=['try'], data=[]]
except KeyError:
raise PMMailUnprocessableEntityException('Unprocessable Entity: Description not given') # depends on [control=['except'], data=[]]
if error_code == 406:
raise PMMailInactiveRecipientException('You tried to send email to a recipient that has been marked as inactive.') # depends on [control=['if'], data=[]]
raise PMMailUnprocessableEntityException('Unprocessable Entity: %s' % desc) # depends on [control=['if'], data=[]]
elif err.code == 500:
raise PMMailServerErrorException('Internal server error at Postmark. Admins have been alerted.', err) # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['err']]
except URLError as err:
if hasattr(err, 'reason'):
raise PMMailURLException('URLError: Failed to reach the server: %s (See "inner_exception" for details)' % err.reason, err) # depends on [control=['if'], data=[]]
elif hasattr(err, 'code'):
raise PMMailURLException('URLError: %d: The server couldn\'t fufill the request. (See "inner_exception" for details)' % err.code, err) # depends on [control=['if'], data=[]]
else:
raise PMMailURLException('URLError: The server couldn\'t fufill the request. (See "inner_exception" for details)', err) # depends on [control=['except'], data=['err']] |
def _count_relevant_tb_levels(tb):
"""Return the number of frames in ``tb`` before all that's left is unittest frames.
Unlike its namesake in unittest, this doesn't bail out as soon as it hits a
unittest frame, which means we don't bail out as soon as somebody uses the
mock library, which defines ``__unittest``.
"""
length = contiguous_unittest_frames = 0
while tb:
length += 1
if _is_unittest_frame(tb):
contiguous_unittest_frames += 1
else:
contiguous_unittest_frames = 0
tb = tb.tb_next
return length - contiguous_unittest_frames | def function[_count_relevant_tb_levels, parameter[tb]]:
constant[Return the number of frames in ``tb`` before all that's left is unittest frames.
Unlike its namesake in unittest, this doesn't bail out as soon as it hits a
unittest frame, which means we don't bail out as soon as somebody uses the
mock library, which defines ``__unittest``.
]
variable[length] assign[=] constant[0]
while name[tb] begin[:]
<ast.AugAssign object at 0x7da204960490>
if call[name[_is_unittest_frame], parameter[name[tb]]] begin[:]
<ast.AugAssign object at 0x7da204960070>
variable[tb] assign[=] name[tb].tb_next
return[binary_operation[name[length] - name[contiguous_unittest_frames]]] | keyword[def] identifier[_count_relevant_tb_levels] ( identifier[tb] ):
literal[string]
identifier[length] = identifier[contiguous_unittest_frames] = literal[int]
keyword[while] identifier[tb] :
identifier[length] += literal[int]
keyword[if] identifier[_is_unittest_frame] ( identifier[tb] ):
identifier[contiguous_unittest_frames] += literal[int]
keyword[else] :
identifier[contiguous_unittest_frames] = literal[int]
identifier[tb] = identifier[tb] . identifier[tb_next]
keyword[return] identifier[length] - identifier[contiguous_unittest_frames] | def _count_relevant_tb_levels(tb):
"""Return the number of frames in ``tb`` before all that's left is unittest frames.
Unlike its namesake in unittest, this doesn't bail out as soon as it hits a
unittest frame, which means we don't bail out as soon as somebody uses the
mock library, which defines ``__unittest``.
"""
length = contiguous_unittest_frames = 0
while tb:
length += 1
if _is_unittest_frame(tb):
contiguous_unittest_frames += 1 # depends on [control=['if'], data=[]]
else:
contiguous_unittest_frames = 0
tb = tb.tb_next # depends on [control=['while'], data=[]]
return length - contiguous_unittest_frames |
def _lazy_turbo_mapping(initial, pre_size):
'''
_lazy_turbo_mapping is a blatant copy of the pyrsistent._pmap._turbo_mapping function, except
it works for lazy maps; this seems like the only way to fully overload PMap.
'''
size = pre_size or (2 * len(initial)) or 8
buckets = size * [None]
if not isinstance(initial, colls.Mapping): initial = dict(initial)
for k, v in six.iteritems(initial):
h = hash(k)
index = h % size
bucket = buckets[index]
if bucket: bucket.append((k, v))
else: buckets[index] = [(k, v)]
return LazyPMap(len(initial), ps.pvector().extend(buckets)) | def function[_lazy_turbo_mapping, parameter[initial, pre_size]]:
constant[
_lazy_turbo_mapping is a blatant copy of the pyrsistent._pmap._turbo_mapping function, except
it works for lazy maps; this seems like the only way to fully overload PMap.
]
variable[size] assign[=] <ast.BoolOp object at 0x7da18bcc9210>
variable[buckets] assign[=] binary_operation[name[size] * list[[<ast.Constant object at 0x7da18bcca4a0>]]]
if <ast.UnaryOp object at 0x7da18bcc9c00> begin[:]
variable[initial] assign[=] call[name[dict], parameter[name[initial]]]
for taget[tuple[[<ast.Name object at 0x7da18bcc8910>, <ast.Name object at 0x7da18bcc9ae0>]]] in starred[call[name[six].iteritems, parameter[name[initial]]]] begin[:]
variable[h] assign[=] call[name[hash], parameter[name[k]]]
variable[index] assign[=] binary_operation[name[h] <ast.Mod object at 0x7da2590d6920> name[size]]
variable[bucket] assign[=] call[name[buckets]][name[index]]
if name[bucket] begin[:]
call[name[bucket].append, parameter[tuple[[<ast.Name object at 0x7da18bccad70>, <ast.Name object at 0x7da18bcca320>]]]]
return[call[name[LazyPMap], parameter[call[name[len], parameter[name[initial]]], call[call[name[ps].pvector, parameter[]].extend, parameter[name[buckets]]]]]] | keyword[def] identifier[_lazy_turbo_mapping] ( identifier[initial] , identifier[pre_size] ):
literal[string]
identifier[size] = identifier[pre_size] keyword[or] ( literal[int] * identifier[len] ( identifier[initial] )) keyword[or] literal[int]
identifier[buckets] = identifier[size] *[ keyword[None] ]
keyword[if] keyword[not] identifier[isinstance] ( identifier[initial] , identifier[colls] . identifier[Mapping] ): identifier[initial] = identifier[dict] ( identifier[initial] )
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[six] . identifier[iteritems] ( identifier[initial] ):
identifier[h] = identifier[hash] ( identifier[k] )
identifier[index] = identifier[h] % identifier[size]
identifier[bucket] = identifier[buckets] [ identifier[index] ]
keyword[if] identifier[bucket] : identifier[bucket] . identifier[append] (( identifier[k] , identifier[v] ))
keyword[else] : identifier[buckets] [ identifier[index] ]=[( identifier[k] , identifier[v] )]
keyword[return] identifier[LazyPMap] ( identifier[len] ( identifier[initial] ), identifier[ps] . identifier[pvector] (). identifier[extend] ( identifier[buckets] )) | def _lazy_turbo_mapping(initial, pre_size):
"""
_lazy_turbo_mapping is a blatant copy of the pyrsistent._pmap._turbo_mapping function, except
it works for lazy maps; this seems like the only way to fully overload PMap.
"""
size = pre_size or 2 * len(initial) or 8
buckets = size * [None]
if not isinstance(initial, colls.Mapping):
initial = dict(initial) # depends on [control=['if'], data=[]]
for (k, v) in six.iteritems(initial):
h = hash(k)
index = h % size
bucket = buckets[index]
if bucket:
bucket.append((k, v)) # depends on [control=['if'], data=[]]
else:
buckets[index] = [(k, v)] # depends on [control=['for'], data=[]]
return LazyPMap(len(initial), ps.pvector().extend(buckets)) |
def maxind_numba(block):
""" filter for indels """
## remove terminal edges
inds = 0
for row in xrange(block.shape[0]):
where = np.where(block[row] != 45)[0]
if len(where) == 0:
obs = 100
else:
left = np.min(where)
right = np.max(where)
obs = np.sum(block[row, left:right] == 45)
if obs > inds:
inds = obs
return inds | def function[maxind_numba, parameter[block]]:
constant[ filter for indels ]
variable[inds] assign[=] constant[0]
for taget[name[row]] in starred[call[name[xrange], parameter[call[name[block].shape][constant[0]]]]] begin[:]
variable[where] assign[=] call[call[name[np].where, parameter[compare[call[name[block]][name[row]] not_equal[!=] constant[45]]]]][constant[0]]
if compare[call[name[len], parameter[name[where]]] equal[==] constant[0]] begin[:]
variable[obs] assign[=] constant[100]
if compare[name[obs] greater[>] name[inds]] begin[:]
variable[inds] assign[=] name[obs]
return[name[inds]] | keyword[def] identifier[maxind_numba] ( identifier[block] ):
literal[string]
identifier[inds] = literal[int]
keyword[for] identifier[row] keyword[in] identifier[xrange] ( identifier[block] . identifier[shape] [ literal[int] ]):
identifier[where] = identifier[np] . identifier[where] ( identifier[block] [ identifier[row] ]!= literal[int] )[ literal[int] ]
keyword[if] identifier[len] ( identifier[where] )== literal[int] :
identifier[obs] = literal[int]
keyword[else] :
identifier[left] = identifier[np] . identifier[min] ( identifier[where] )
identifier[right] = identifier[np] . identifier[max] ( identifier[where] )
identifier[obs] = identifier[np] . identifier[sum] ( identifier[block] [ identifier[row] , identifier[left] : identifier[right] ]== literal[int] )
keyword[if] identifier[obs] > identifier[inds] :
identifier[inds] = identifier[obs]
keyword[return] identifier[inds] | def maxind_numba(block):
""" filter for indels """
## remove terminal edges
inds = 0
for row in xrange(block.shape[0]):
where = np.where(block[row] != 45)[0]
if len(where) == 0:
obs = 100 # depends on [control=['if'], data=[]]
else:
left = np.min(where)
right = np.max(where)
obs = np.sum(block[row, left:right] == 45)
if obs > inds:
inds = obs # depends on [control=['if'], data=['obs', 'inds']] # depends on [control=['for'], data=['row']]
return inds |
def _call(self, method, *args, **kwargs):
"""Call the remote service and return the response data."""
assert self.session
if not kwargs.get('verify'):
kwargs['verify'] = self.SSL_VERIFY
response = self.session.request(method, *args, **kwargs)
response_json = response.text and response.json() or {}
if response.status_code < 200 or response.status_code >= 300:
message = response_json.get('error', response_json.get('message'))
raise HelpScoutRemoteException(response.status_code, message)
self.page_current = response_json.get(self.PAGE_CURRENT, 1)
self.page_total = response_json.get(self.PAGE_TOTAL, 1)
try:
return response_json[self.PAGE_DATA_MULTI]
except KeyError:
pass
try:
return [response_json[self.PAGE_DATA_SINGLE]]
except KeyError:
pass
return None | def function[_call, parameter[self, method]]:
constant[Call the remote service and return the response data.]
assert[name[self].session]
if <ast.UnaryOp object at 0x7da204622fe0> begin[:]
call[name[kwargs]][constant[verify]] assign[=] name[self].SSL_VERIFY
variable[response] assign[=] call[name[self].session.request, parameter[name[method], <ast.Starred object at 0x7da204623c40>]]
variable[response_json] assign[=] <ast.BoolOp object at 0x7da204620b80>
if <ast.BoolOp object at 0x7da204621d80> begin[:]
variable[message] assign[=] call[name[response_json].get, parameter[constant[error], call[name[response_json].get, parameter[constant[message]]]]]
<ast.Raise object at 0x7da204623e20>
name[self].page_current assign[=] call[name[response_json].get, parameter[name[self].PAGE_CURRENT, constant[1]]]
name[self].page_total assign[=] call[name[response_json].get, parameter[name[self].PAGE_TOTAL, constant[1]]]
<ast.Try object at 0x7da2046203a0>
<ast.Try object at 0x7da2046213f0>
return[constant[None]] | keyword[def] identifier[_call] ( identifier[self] , identifier[method] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[assert] identifier[self] . identifier[session]
keyword[if] keyword[not] identifier[kwargs] . identifier[get] ( literal[string] ):
identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[SSL_VERIFY]
identifier[response] = identifier[self] . identifier[session] . identifier[request] ( identifier[method] ,* identifier[args] ,** identifier[kwargs] )
identifier[response_json] = identifier[response] . identifier[text] keyword[and] identifier[response] . identifier[json] () keyword[or] {}
keyword[if] identifier[response] . identifier[status_code] < literal[int] keyword[or] identifier[response] . identifier[status_code] >= literal[int] :
identifier[message] = identifier[response_json] . identifier[get] ( literal[string] , identifier[response_json] . identifier[get] ( literal[string] ))
keyword[raise] identifier[HelpScoutRemoteException] ( identifier[response] . identifier[status_code] , identifier[message] )
identifier[self] . identifier[page_current] = identifier[response_json] . identifier[get] ( identifier[self] . identifier[PAGE_CURRENT] , literal[int] )
identifier[self] . identifier[page_total] = identifier[response_json] . identifier[get] ( identifier[self] . identifier[PAGE_TOTAL] , literal[int] )
keyword[try] :
keyword[return] identifier[response_json] [ identifier[self] . identifier[PAGE_DATA_MULTI] ]
keyword[except] identifier[KeyError] :
keyword[pass]
keyword[try] :
keyword[return] [ identifier[response_json] [ identifier[self] . identifier[PAGE_DATA_SINGLE] ]]
keyword[except] identifier[KeyError] :
keyword[pass]
keyword[return] keyword[None] | def _call(self, method, *args, **kwargs):
"""Call the remote service and return the response data."""
assert self.session
if not kwargs.get('verify'):
kwargs['verify'] = self.SSL_VERIFY # depends on [control=['if'], data=[]]
response = self.session.request(method, *args, **kwargs)
response_json = response.text and response.json() or {}
if response.status_code < 200 or response.status_code >= 300:
message = response_json.get('error', response_json.get('message'))
raise HelpScoutRemoteException(response.status_code, message) # depends on [control=['if'], data=[]]
self.page_current = response_json.get(self.PAGE_CURRENT, 1)
self.page_total = response_json.get(self.PAGE_TOTAL, 1)
try:
return response_json[self.PAGE_DATA_MULTI] # depends on [control=['try'], data=[]]
except KeyError:
pass # depends on [control=['except'], data=[]]
try:
return [response_json[self.PAGE_DATA_SINGLE]] # depends on [control=['try'], data=[]]
except KeyError:
pass # depends on [control=['except'], data=[]]
return None |
def load_from_store(self):
"""Load index from store.
:return: Whether index was correctly loaded or not
:rtype: bool
:raise AttributeError: If no datastore is defined
"""
if not self._store:
raise AttributeError('No datastore defined!')
if self._store.has_blob('all_keys'):
data = Serializer.deserialize(self._store.get_blob('all_keys'))
self.load_from_data(data)
return True
elif self._store.has_blob('all_keys_with_undefined'):
blob = self._store.get_blob('all_keys_with_undefined')
data = Serializer.deserialize(blob)
self.load_from_data(data, with_undefined=True)
return True
else:
return False | def function[load_from_store, parameter[self]]:
constant[Load index from store.
:return: Whether index was correctly loaded or not
:rtype: bool
:raise AttributeError: If no datastore is defined
]
if <ast.UnaryOp object at 0x7da1b1834be0> begin[:]
<ast.Raise object at 0x7da1b1834ac0>
if call[name[self]._store.has_blob, parameter[constant[all_keys]]] begin[:]
variable[data] assign[=] call[name[Serializer].deserialize, parameter[call[name[self]._store.get_blob, parameter[constant[all_keys]]]]]
call[name[self].load_from_data, parameter[name[data]]]
return[constant[True]] | keyword[def] identifier[load_from_store] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_store] :
keyword[raise] identifier[AttributeError] ( literal[string] )
keyword[if] identifier[self] . identifier[_store] . identifier[has_blob] ( literal[string] ):
identifier[data] = identifier[Serializer] . identifier[deserialize] ( identifier[self] . identifier[_store] . identifier[get_blob] ( literal[string] ))
identifier[self] . identifier[load_from_data] ( identifier[data] )
keyword[return] keyword[True]
keyword[elif] identifier[self] . identifier[_store] . identifier[has_blob] ( literal[string] ):
identifier[blob] = identifier[self] . identifier[_store] . identifier[get_blob] ( literal[string] )
identifier[data] = identifier[Serializer] . identifier[deserialize] ( identifier[blob] )
identifier[self] . identifier[load_from_data] ( identifier[data] , identifier[with_undefined] = keyword[True] )
keyword[return] keyword[True]
keyword[else] :
keyword[return] keyword[False] | def load_from_store(self):
"""Load index from store.
:return: Whether index was correctly loaded or not
:rtype: bool
:raise AttributeError: If no datastore is defined
"""
if not self._store:
raise AttributeError('No datastore defined!') # depends on [control=['if'], data=[]]
if self._store.has_blob('all_keys'):
data = Serializer.deserialize(self._store.get_blob('all_keys'))
self.load_from_data(data)
return True # depends on [control=['if'], data=[]]
elif self._store.has_blob('all_keys_with_undefined'):
blob = self._store.get_blob('all_keys_with_undefined')
data = Serializer.deserialize(blob)
self.load_from_data(data, with_undefined=True)
return True # depends on [control=['if'], data=[]]
else:
return False |
def _request(self, method, identifier, key=None, value=None):
"""Perform request with identifier."""
params = {'id': identifier}
if key is not None and value is not None:
params[key] = value
result = yield from self._transact(method, params)
return result.get(key) | def function[_request, parameter[self, method, identifier, key, value]]:
constant[Perform request with identifier.]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b02e5f00>], [<ast.Name object at 0x7da1b02e73d0>]]
if <ast.BoolOp object at 0x7da1b02e7ca0> begin[:]
call[name[params]][name[key]] assign[=] name[value]
variable[result] assign[=] <ast.YieldFrom object at 0x7da1b02e6140>
return[call[name[result].get, parameter[name[key]]]] | keyword[def] identifier[_request] ( identifier[self] , identifier[method] , identifier[identifier] , identifier[key] = keyword[None] , identifier[value] = keyword[None] ):
literal[string]
identifier[params] ={ literal[string] : identifier[identifier] }
keyword[if] identifier[key] keyword[is] keyword[not] keyword[None] keyword[and] identifier[value] keyword[is] keyword[not] keyword[None] :
identifier[params] [ identifier[key] ]= identifier[value]
identifier[result] = keyword[yield] keyword[from] identifier[self] . identifier[_transact] ( identifier[method] , identifier[params] )
keyword[return] identifier[result] . identifier[get] ( identifier[key] ) | def _request(self, method, identifier, key=None, value=None):
"""Perform request with identifier."""
params = {'id': identifier}
if key is not None and value is not None:
params[key] = value # depends on [control=['if'], data=[]]
result = (yield from self._transact(method, params))
return result.get(key) |
def update(self, force=[]):
"""Update the filters according to `self.rtdc_ds.config["filtering"]`
Parameters
----------
force : list
A list of feature names that must be refiltered with
min/max values.
"""
# These lists may help us become very fast in the future
newkeys = []
oldvals = []
newvals = []
cfg_cur = self.rtdc_ds.config["filtering"]
cfg_old = self._old_config
# Determine which data was updated
for skey in list(cfg_cur.keys()):
if skey not in cfg_old:
cfg_old[skey] = None
if cfg_cur[skey] != cfg_old[skey]:
newkeys.append(skey)
oldvals.append(cfg_old[skey])
newvals.append(cfg_cur[skey])
# 1. Filter all feature min/max values.
# This line gets the feature names that must be filtered.
col2filter = []
for k in newkeys:
# k[:-4] because we want to crop " min" and " max"
if k[:-4] in dfn.scalar_feature_names:
col2filter.append(k[:-4])
for f in force:
# Manually add forced features
if f in dfn.scalar_feature_names:
col2filter.append(f)
else:
# Make sure the feature name is valid.
raise ValueError("Unknown feature name {}".format(f))
col2filter = np.unique(col2filter)
for col in col2filter:
if col in self.rtdc_ds:
fstart = col + " min"
fend = col + " max"
# Get the current feature filter
col_filt = self[col]
# If min and max exist and if they are not identical:
if (fstart in cfg_cur and
fend in cfg_cur and
cfg_cur[fstart] != cfg_cur[fend]):
# TODO: speedup
# Here one could check for smaller values in the
# lists oldvals/newvals that we defined above.
# Be sure to check against force in that case!
ivalstart = cfg_cur[fstart]
ivalend = cfg_cur[fend]
if ivalstart > ivalend:
msg = "inverting filter: {} > {}".format(fstart, fend)
warnings.warn(msg)
ivalstart, ivalend = ivalend, ivalstart
data = self.rtdc_ds[col]
col_filt[:] = (ivalstart <= data)*(data <= ivalend)
else:
col_filt[:] = True
# 2. Filter with polygon filters
# check if something has changed
pf_id = "polygon filters"
if (
(pf_id in cfg_cur and pf_id not in cfg_old) or
(pf_id in cfg_cur and pf_id in cfg_old and
cfg_cur[pf_id] != cfg_old[pf_id])):
self.polygon[:] = True
# perform polygon filtering
for p in PolygonFilter.instances:
if p.unique_id in cfg_cur[pf_id]:
# update self.polygon
# iterate through axes
datax = self.rtdc_ds[p.axes[0]]
datay = self.rtdc_ds[p.axes[1]]
self.polygon *= p.filter(datax, datay)
# 3. Invalid filters
self.invalid[:] = True
if cfg_cur["remove invalid events"]:
for col in dfn.scalar_feature_names:
if col in self.rtdc_ds:
data = self.rtdc_ds[col]
invalid = np.isinf(data) | np.isnan(data)
self.invalid *= ~invalid
# 4. Finally combine all filters
# get a list of all filters
self.all[:] = True
if cfg_cur["enable filters"]:
for col in self._filters:
self.all[:] *= self._filters[col]
self.all[:] *= self.invalid
self.all[:] *= self.manual
self.all[:] *= self.polygon
# Filter with configuration keyword argument "limit events".
# This additional step limits the total number of events in
# self.all.
if cfg_cur["limit events"] > 0:
limit = cfg_cur["limit events"]
sub = self.all[self.all]
_f, idx = downsampling.downsample_rand(sub,
samples=limit,
ret_idx=True)
sub[~idx] = False
self.all[self.all] = sub
# Actual filtering is then done during plotting
self._old_config = self.rtdc_ds.config.copy()["filtering"] | def function[update, parameter[self, force]]:
constant[Update the filters according to `self.rtdc_ds.config["filtering"]`
Parameters
----------
force : list
A list of feature names that must be refiltered with
min/max values.
]
variable[newkeys] assign[=] list[[]]
variable[oldvals] assign[=] list[[]]
variable[newvals] assign[=] list[[]]
variable[cfg_cur] assign[=] call[name[self].rtdc_ds.config][constant[filtering]]
variable[cfg_old] assign[=] name[self]._old_config
for taget[name[skey]] in starred[call[name[list], parameter[call[name[cfg_cur].keys, parameter[]]]]] begin[:]
if compare[name[skey] <ast.NotIn object at 0x7da2590d7190> name[cfg_old]] begin[:]
call[name[cfg_old]][name[skey]] assign[=] constant[None]
if compare[call[name[cfg_cur]][name[skey]] not_equal[!=] call[name[cfg_old]][name[skey]]] begin[:]
call[name[newkeys].append, parameter[name[skey]]]
call[name[oldvals].append, parameter[call[name[cfg_old]][name[skey]]]]
call[name[newvals].append, parameter[call[name[cfg_cur]][name[skey]]]]
variable[col2filter] assign[=] list[[]]
for taget[name[k]] in starred[name[newkeys]] begin[:]
if compare[call[name[k]][<ast.Slice object at 0x7da1b1a3dae0>] in name[dfn].scalar_feature_names] begin[:]
call[name[col2filter].append, parameter[call[name[k]][<ast.Slice object at 0x7da1b1a3f040>]]]
for taget[name[f]] in starred[name[force]] begin[:]
if compare[name[f] in name[dfn].scalar_feature_names] begin[:]
call[name[col2filter].append, parameter[name[f]]]
variable[col2filter] assign[=] call[name[np].unique, parameter[name[col2filter]]]
for taget[name[col]] in starred[name[col2filter]] begin[:]
if compare[name[col] in name[self].rtdc_ds] begin[:]
variable[fstart] assign[=] binary_operation[name[col] + constant[ min]]
variable[fend] assign[=] binary_operation[name[col] + constant[ max]]
variable[col_filt] assign[=] call[name[self]][name[col]]
if <ast.BoolOp object at 0x7da1b197c490> begin[:]
variable[ivalstart] assign[=] call[name[cfg_cur]][name[fstart]]
variable[ivalend] assign[=] call[name[cfg_cur]][name[fend]]
if compare[name[ivalstart] greater[>] name[ivalend]] begin[:]
variable[msg] assign[=] call[constant[inverting filter: {} > {}].format, parameter[name[fstart], name[fend]]]
call[name[warnings].warn, parameter[name[msg]]]
<ast.Tuple object at 0x7da1b197dc60> assign[=] tuple[[<ast.Name object at 0x7da1b197fa90>, <ast.Name object at 0x7da1b197d930>]]
variable[data] assign[=] call[name[self].rtdc_ds][name[col]]
call[name[col_filt]][<ast.Slice object at 0x7da1b197d8d0>] assign[=] binary_operation[compare[name[ivalstart] less_or_equal[<=] name[data]] * compare[name[data] less_or_equal[<=] name[ivalend]]]
variable[pf_id] assign[=] constant[polygon filters]
if <ast.BoolOp object at 0x7da1b197f160> begin[:]
call[name[self].polygon][<ast.Slice object at 0x7da1b197dde0>] assign[=] constant[True]
for taget[name[p]] in starred[name[PolygonFilter].instances] begin[:]
if compare[name[p].unique_id in call[name[cfg_cur]][name[pf_id]]] begin[:]
variable[datax] assign[=] call[name[self].rtdc_ds][call[name[p].axes][constant[0]]]
variable[datay] assign[=] call[name[self].rtdc_ds][call[name[p].axes][constant[1]]]
<ast.AugAssign object at 0x7da1b197f340>
call[name[self].invalid][<ast.Slice object at 0x7da1b197fa00>] assign[=] constant[True]
if call[name[cfg_cur]][constant[remove invalid events]] begin[:]
for taget[name[col]] in starred[name[dfn].scalar_feature_names] begin[:]
if compare[name[col] in name[self].rtdc_ds] begin[:]
variable[data] assign[=] call[name[self].rtdc_ds][name[col]]
variable[invalid] assign[=] binary_operation[call[name[np].isinf, parameter[name[data]]] <ast.BitOr object at 0x7da2590d6aa0> call[name[np].isnan, parameter[name[data]]]]
<ast.AugAssign object at 0x7da1b197ee90>
call[name[self].all][<ast.Slice object at 0x7da1b197eaa0>] assign[=] constant[True]
if call[name[cfg_cur]][constant[enable filters]] begin[:]
for taget[name[col]] in starred[name[self]._filters] begin[:]
<ast.AugAssign object at 0x7da1b197d420>
<ast.AugAssign object at 0x7da1b197c700>
<ast.AugAssign object at 0x7da1b1835360>
<ast.AugAssign object at 0x7da1b1836230>
if compare[call[name[cfg_cur]][constant[limit events]] greater[>] constant[0]] begin[:]
variable[limit] assign[=] call[name[cfg_cur]][constant[limit events]]
variable[sub] assign[=] call[name[self].all][name[self].all]
<ast.Tuple object at 0x7da1b1835390> assign[=] call[name[downsampling].downsample_rand, parameter[name[sub]]]
call[name[sub]][<ast.UnaryOp object at 0x7da1b1836860>] assign[=] constant[False]
call[name[self].all][name[self].all] assign[=] name[sub]
name[self]._old_config assign[=] call[call[name[self].rtdc_ds.config.copy, parameter[]]][constant[filtering]] | keyword[def] identifier[update] ( identifier[self] , identifier[force] =[]):
literal[string]
identifier[newkeys] =[]
identifier[oldvals] =[]
identifier[newvals] =[]
identifier[cfg_cur] = identifier[self] . identifier[rtdc_ds] . identifier[config] [ literal[string] ]
identifier[cfg_old] = identifier[self] . identifier[_old_config]
keyword[for] identifier[skey] keyword[in] identifier[list] ( identifier[cfg_cur] . identifier[keys] ()):
keyword[if] identifier[skey] keyword[not] keyword[in] identifier[cfg_old] :
identifier[cfg_old] [ identifier[skey] ]= keyword[None]
keyword[if] identifier[cfg_cur] [ identifier[skey] ]!= identifier[cfg_old] [ identifier[skey] ]:
identifier[newkeys] . identifier[append] ( identifier[skey] )
identifier[oldvals] . identifier[append] ( identifier[cfg_old] [ identifier[skey] ])
identifier[newvals] . identifier[append] ( identifier[cfg_cur] [ identifier[skey] ])
identifier[col2filter] =[]
keyword[for] identifier[k] keyword[in] identifier[newkeys] :
keyword[if] identifier[k] [:- literal[int] ] keyword[in] identifier[dfn] . identifier[scalar_feature_names] :
identifier[col2filter] . identifier[append] ( identifier[k] [:- literal[int] ])
keyword[for] identifier[f] keyword[in] identifier[force] :
keyword[if] identifier[f] keyword[in] identifier[dfn] . identifier[scalar_feature_names] :
identifier[col2filter] . identifier[append] ( identifier[f] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[f] ))
identifier[col2filter] = identifier[np] . identifier[unique] ( identifier[col2filter] )
keyword[for] identifier[col] keyword[in] identifier[col2filter] :
keyword[if] identifier[col] keyword[in] identifier[self] . identifier[rtdc_ds] :
identifier[fstart] = identifier[col] + literal[string]
identifier[fend] = identifier[col] + literal[string]
identifier[col_filt] = identifier[self] [ identifier[col] ]
keyword[if] ( identifier[fstart] keyword[in] identifier[cfg_cur] keyword[and]
identifier[fend] keyword[in] identifier[cfg_cur] keyword[and]
identifier[cfg_cur] [ identifier[fstart] ]!= identifier[cfg_cur] [ identifier[fend] ]):
identifier[ivalstart] = identifier[cfg_cur] [ identifier[fstart] ]
identifier[ivalend] = identifier[cfg_cur] [ identifier[fend] ]
keyword[if] identifier[ivalstart] > identifier[ivalend] :
identifier[msg] = literal[string] . identifier[format] ( identifier[fstart] , identifier[fend] )
identifier[warnings] . identifier[warn] ( identifier[msg] )
identifier[ivalstart] , identifier[ivalend] = identifier[ivalend] , identifier[ivalstart]
identifier[data] = identifier[self] . identifier[rtdc_ds] [ identifier[col] ]
identifier[col_filt] [:]=( identifier[ivalstart] <= identifier[data] )*( identifier[data] <= identifier[ivalend] )
keyword[else] :
identifier[col_filt] [:]= keyword[True]
identifier[pf_id] = literal[string]
keyword[if] (
( identifier[pf_id] keyword[in] identifier[cfg_cur] keyword[and] identifier[pf_id] keyword[not] keyword[in] identifier[cfg_old] ) keyword[or]
( identifier[pf_id] keyword[in] identifier[cfg_cur] keyword[and] identifier[pf_id] keyword[in] identifier[cfg_old] keyword[and]
identifier[cfg_cur] [ identifier[pf_id] ]!= identifier[cfg_old] [ identifier[pf_id] ])):
identifier[self] . identifier[polygon] [:]= keyword[True]
keyword[for] identifier[p] keyword[in] identifier[PolygonFilter] . identifier[instances] :
keyword[if] identifier[p] . identifier[unique_id] keyword[in] identifier[cfg_cur] [ identifier[pf_id] ]:
identifier[datax] = identifier[self] . identifier[rtdc_ds] [ identifier[p] . identifier[axes] [ literal[int] ]]
identifier[datay] = identifier[self] . identifier[rtdc_ds] [ identifier[p] . identifier[axes] [ literal[int] ]]
identifier[self] . identifier[polygon] *= identifier[p] . identifier[filter] ( identifier[datax] , identifier[datay] )
identifier[self] . identifier[invalid] [:]= keyword[True]
keyword[if] identifier[cfg_cur] [ literal[string] ]:
keyword[for] identifier[col] keyword[in] identifier[dfn] . identifier[scalar_feature_names] :
keyword[if] identifier[col] keyword[in] identifier[self] . identifier[rtdc_ds] :
identifier[data] = identifier[self] . identifier[rtdc_ds] [ identifier[col] ]
identifier[invalid] = identifier[np] . identifier[isinf] ( identifier[data] )| identifier[np] . identifier[isnan] ( identifier[data] )
identifier[self] . identifier[invalid] *=~ identifier[invalid]
identifier[self] . identifier[all] [:]= keyword[True]
keyword[if] identifier[cfg_cur] [ literal[string] ]:
keyword[for] identifier[col] keyword[in] identifier[self] . identifier[_filters] :
identifier[self] . identifier[all] [:]*= identifier[self] . identifier[_filters] [ identifier[col] ]
identifier[self] . identifier[all] [:]*= identifier[self] . identifier[invalid]
identifier[self] . identifier[all] [:]*= identifier[self] . identifier[manual]
identifier[self] . identifier[all] [:]*= identifier[self] . identifier[polygon]
keyword[if] identifier[cfg_cur] [ literal[string] ]> literal[int] :
identifier[limit] = identifier[cfg_cur] [ literal[string] ]
identifier[sub] = identifier[self] . identifier[all] [ identifier[self] . identifier[all] ]
identifier[_f] , identifier[idx] = identifier[downsampling] . identifier[downsample_rand] ( identifier[sub] ,
identifier[samples] = identifier[limit] ,
identifier[ret_idx] = keyword[True] )
identifier[sub] [~ identifier[idx] ]= keyword[False]
identifier[self] . identifier[all] [ identifier[self] . identifier[all] ]= identifier[sub]
identifier[self] . identifier[_old_config] = identifier[self] . identifier[rtdc_ds] . identifier[config] . identifier[copy] ()[ literal[string] ] | def update(self, force=[]):
"""Update the filters according to `self.rtdc_ds.config["filtering"]`
Parameters
----------
force : list
A list of feature names that must be refiltered with
min/max values.
"""
# These lists may help us become very fast in the future
newkeys = []
oldvals = []
newvals = []
cfg_cur = self.rtdc_ds.config['filtering']
cfg_old = self._old_config
# Determine which data was updated
for skey in list(cfg_cur.keys()):
if skey not in cfg_old:
cfg_old[skey] = None # depends on [control=['if'], data=['skey', 'cfg_old']]
if cfg_cur[skey] != cfg_old[skey]:
newkeys.append(skey)
oldvals.append(cfg_old[skey])
newvals.append(cfg_cur[skey]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['skey']]
# 1. Filter all feature min/max values.
# This line gets the feature names that must be filtered.
col2filter = []
for k in newkeys:
# k[:-4] because we want to crop " min" and " max"
if k[:-4] in dfn.scalar_feature_names:
col2filter.append(k[:-4]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['k']]
for f in force:
# Manually add forced features
if f in dfn.scalar_feature_names:
col2filter.append(f) # depends on [control=['if'], data=['f']]
else:
# Make sure the feature name is valid.
raise ValueError('Unknown feature name {}'.format(f)) # depends on [control=['for'], data=['f']]
col2filter = np.unique(col2filter)
for col in col2filter:
if col in self.rtdc_ds:
fstart = col + ' min'
fend = col + ' max'
# Get the current feature filter
col_filt = self[col]
# If min and max exist and if they are not identical:
if fstart in cfg_cur and fend in cfg_cur and (cfg_cur[fstart] != cfg_cur[fend]):
# TODO: speedup
# Here one could check for smaller values in the
# lists oldvals/newvals that we defined above.
# Be sure to check against force in that case!
ivalstart = cfg_cur[fstart]
ivalend = cfg_cur[fend]
if ivalstart > ivalend:
msg = 'inverting filter: {} > {}'.format(fstart, fend)
warnings.warn(msg)
(ivalstart, ivalend) = (ivalend, ivalstart) # depends on [control=['if'], data=['ivalstart', 'ivalend']]
data = self.rtdc_ds[col]
col_filt[:] = (ivalstart <= data) * (data <= ivalend) # depends on [control=['if'], data=[]]
else:
col_filt[:] = True # depends on [control=['if'], data=['col']] # depends on [control=['for'], data=['col']]
# 2. Filter with polygon filters
# check if something has changed
pf_id = 'polygon filters'
if pf_id in cfg_cur and pf_id not in cfg_old or (pf_id in cfg_cur and pf_id in cfg_old and (cfg_cur[pf_id] != cfg_old[pf_id])):
self.polygon[:] = True
# perform polygon filtering
for p in PolygonFilter.instances:
if p.unique_id in cfg_cur[pf_id]:
# update self.polygon
# iterate through axes
datax = self.rtdc_ds[p.axes[0]]
datay = self.rtdc_ds[p.axes[1]]
self.polygon *= p.filter(datax, datay) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['p']] # depends on [control=['if'], data=[]]
# 3. Invalid filters
self.invalid[:] = True
if cfg_cur['remove invalid events']:
for col in dfn.scalar_feature_names:
if col in self.rtdc_ds:
data = self.rtdc_ds[col]
invalid = np.isinf(data) | np.isnan(data)
self.invalid *= ~invalid # depends on [control=['if'], data=['col']] # depends on [control=['for'], data=['col']] # depends on [control=['if'], data=[]]
# 4. Finally combine all filters
# get a list of all filters
self.all[:] = True
if cfg_cur['enable filters']:
for col in self._filters:
self.all[:] *= self._filters[col] # depends on [control=['for'], data=['col']]
self.all[:] *= self.invalid
self.all[:] *= self.manual
self.all[:] *= self.polygon
# Filter with configuration keyword argument "limit events".
# This additional step limits the total number of events in
# self.all.
if cfg_cur['limit events'] > 0:
limit = cfg_cur['limit events']
sub = self.all[self.all]
(_f, idx) = downsampling.downsample_rand(sub, samples=limit, ret_idx=True)
sub[~idx] = False
self.all[self.all] = sub # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Actual filtering is then done during plotting
self._old_config = self.rtdc_ds.config.copy()['filtering'] |
def type(self):
"""
Type of a valid object.
Type may be a JSON type name or a list of such names. Valid JSON type
names are ``string``, ``number``, ``integer``, ``boolean``, ``object``,
``array``, ``any`` (default).
"""
value = self._schema.get("type", "any")
if not isinstance(value, (basestring, dict, list)):
raise SchemaError(
"type value {0!r} is not a simple type name, nested "
"schema nor a list of those".format(value))
if isinstance(value, list):
type_list = value
# Union types have to have at least two alternatives
if len(type_list) < 2:
raise SchemaError(
"union type {0!r} is too short".format(value))
else:
type_list = [value]
seen = set()
for js_type in type_list:
if isinstance(js_type, dict):
# no nested validation here
pass
elif isinstance(js_type, list):
# no nested validation here
pass
else:
if js_type in seen:
raise SchemaError(
("type value {0!r} contains duplicate element"
" {1!r}").format(value, js_type))
else:
seen.add(js_type)
if js_type not in (
"string", "number", "integer", "boolean", "object",
"array", "null", "any"):
raise SchemaError(
"type value {0!r} is not a simple type "
"name".format(js_type))
return value | def function[type, parameter[self]]:
constant[
Type of a valid object.
Type may be a JSON type name or a list of such names. Valid JSON type
names are ``string``, ``number``, ``integer``, ``boolean``, ``object``,
``array``, ``any`` (default).
]
variable[value] assign[=] call[name[self]._schema.get, parameter[constant[type], constant[any]]]
if <ast.UnaryOp object at 0x7da2054a7c10> begin[:]
<ast.Raise object at 0x7da2054a7880>
if call[name[isinstance], parameter[name[value], name[list]]] begin[:]
variable[type_list] assign[=] name[value]
if compare[call[name[len], parameter[name[type_list]]] less[<] constant[2]] begin[:]
<ast.Raise object at 0x7da2054a69b0>
variable[seen] assign[=] call[name[set], parameter[]]
for taget[name[js_type]] in starred[name[type_list]] begin[:]
if call[name[isinstance], parameter[name[js_type], name[dict]]] begin[:]
pass
return[name[value]] | keyword[def] identifier[type] ( identifier[self] ):
literal[string]
identifier[value] = identifier[self] . identifier[_schema] . identifier[get] ( literal[string] , literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[value] ,( identifier[basestring] , identifier[dict] , identifier[list] )):
keyword[raise] identifier[SchemaError] (
literal[string]
literal[string] . identifier[format] ( identifier[value] ))
keyword[if] identifier[isinstance] ( identifier[value] , identifier[list] ):
identifier[type_list] = identifier[value]
keyword[if] identifier[len] ( identifier[type_list] )< literal[int] :
keyword[raise] identifier[SchemaError] (
literal[string] . identifier[format] ( identifier[value] ))
keyword[else] :
identifier[type_list] =[ identifier[value] ]
identifier[seen] = identifier[set] ()
keyword[for] identifier[js_type] keyword[in] identifier[type_list] :
keyword[if] identifier[isinstance] ( identifier[js_type] , identifier[dict] ):
keyword[pass]
keyword[elif] identifier[isinstance] ( identifier[js_type] , identifier[list] ):
keyword[pass]
keyword[else] :
keyword[if] identifier[js_type] keyword[in] identifier[seen] :
keyword[raise] identifier[SchemaError] (
( literal[string]
literal[string] ). identifier[format] ( identifier[value] , identifier[js_type] ))
keyword[else] :
identifier[seen] . identifier[add] ( identifier[js_type] )
keyword[if] identifier[js_type] keyword[not] keyword[in] (
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ):
keyword[raise] identifier[SchemaError] (
literal[string]
literal[string] . identifier[format] ( identifier[js_type] ))
keyword[return] identifier[value] | def type(self):
"""
Type of a valid object.
Type may be a JSON type name or a list of such names. Valid JSON type
names are ``string``, ``number``, ``integer``, ``boolean``, ``object``,
``array``, ``any`` (default).
"""
value = self._schema.get('type', 'any')
if not isinstance(value, (basestring, dict, list)):
raise SchemaError('type value {0!r} is not a simple type name, nested schema nor a list of those'.format(value)) # depends on [control=['if'], data=[]]
if isinstance(value, list):
type_list = value
# Union types have to have at least two alternatives
if len(type_list) < 2:
raise SchemaError('union type {0!r} is too short'.format(value)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
type_list = [value]
seen = set()
for js_type in type_list:
if isinstance(js_type, dict):
# no nested validation here
pass # depends on [control=['if'], data=[]]
elif isinstance(js_type, list):
# no nested validation here
pass # depends on [control=['if'], data=[]]
else:
if js_type in seen:
raise SchemaError('type value {0!r} contains duplicate element {1!r}'.format(value, js_type)) # depends on [control=['if'], data=['js_type']]
else:
seen.add(js_type)
if js_type not in ('string', 'number', 'integer', 'boolean', 'object', 'array', 'null', 'any'):
raise SchemaError('type value {0!r} is not a simple type name'.format(js_type)) # depends on [control=['if'], data=['js_type']] # depends on [control=['for'], data=['js_type']]
return value |
def update_file(self, value, relativePath, description=False,
dump=False, pull=False, raiseError=True, ntrials=3):
"""
Update the value of a file that is already in the Repository.\n
If file is not registered in repository, and error will be thrown.\n
If file is missing in the system, it will be regenerated as dump method
is called.
Unlike dump_file, update_file won't block the whole repository but only
the file being updated.
:Parameters:
#. value (object): The value of a file to update.
#. relativePath (str): The relative to the repository path of the
file to be updated.
#. description (False, string): Any random description about the file.
If False is given, the description info won't be updated,
otherwise it will be update to what description argument value is.
#. dump (False, string): The new dump method. If False is given,
the old one will be used.
#. pull (False, string): The new pull method. If False is given,
the old one will be used.
#. raiseError (boolean): Whether to raise encountered error instead
of returning failure.
#. ntrials (int): After aquiring all locks, ntrials is the maximum
number of trials allowed before failing.
In rare cases, when multiple processes
are accessing the same repository components, different processes
can alter repository components between successive lock releases
of some other process. Bigger number of trials lowers the
likelyhood of failure due to multiple processes same time
alteration.
:Returns:
#. success (boolean): Whether renaming the directory was successful.
#. message (None, string): Some explanatory message or error reason
why directory was not updated.
"""
# check arguments
assert isinstance(raiseError, bool), "raiseError must be boolean"
assert description is False or description is None or isinstance(description, basestring), "description must be False, None or a string"
assert dump is False or dump is None or isinstance(dump, basestring), "dump must be False, None or a string"
assert pull is False or pull is None or isinstance(pull, basestring), "pull must be False, None or a string"
assert isinstance(ntrials, int), "ntrials must be integer"
assert ntrials>0, "ntrials must be >0"
# get name and path
relativePath = self.to_repo_relative_path(path=relativePath, split=False)
savePath = os.path.join(self.__path,relativePath)
fPath, fName = os.path.split(savePath)
# get locker
LF = Locker(filePath=None, lockPass=str(uuid.uuid1()), lockPath=os.path.join(fPath,self.__fileLock%fName))
acquired, code = LF.acquire_lock()
if not acquired:
error = "Code %s. Unable to aquire the lock to update '%s'"%(code,relativePath)
assert not raiseError, error
return False, error
# update file
for _trial in range(ntrials):
message = []
updated = False
try:
# check file in repository
isRepoFile, fileOnDisk, infoOnDisk, classOnDisk = self.is_repository_file(relativePath)
assert isRepoFile, "file '%s' is not registered in repository, no update can be performed."%(relativePath,)
# get file info
if not fileOnDisk:
assert description is not False, "file '%s' is found on disk, description must be provided"%(relativePath,)
assert dump is not False, "file '%s' is found on disk, dump must be provided"%(relativePath,)
assert pull is not False, "file '%s' is found on disk, pull must be provided"%(relativePath,)
info = {}
info['repository_unique_name'] = self.__repo['repository_unique_name']
info['create_utctime'] = info['last_update_utctime'] = time.time()
else:
with open(os.path.join(fPath,self.__fileInfo%fName), 'rb') as fd:
info = pickle.load(fd)
info['last_update_utctime'] = time.time()
if not fileOnDisk:
message.append("file %s is registered in repository but it was found on disk prior to updating"%relativePath)
if not infoOnDisk:
message.append("%s is not found on disk prior to updating"%self.__fileInfo%fName)
if not classOnDisk:
message.append("%s is not found on disk prior to updating"%self.__fileClass%fName)
# get dump and pull
if (description is False) or (dump is False) or (pull is False):
if description is False:
description = info['description']
elif description is None:
description = ''
if dump is False:
dump = info['dump']
elif dump is None:
dump = get_dump_method(dump, protocol=self._DEFAULT_PICKLE_PROTOCOL)
if pull is False:
pull = info['pull']
elif pull is None:
pull = get_pull_method(pull)
# update dump, pull and description
info['dump'] = dump
info['pull'] = pull
info['description'] = description
# dump file
my_exec( dump.replace("$FILE_PATH", str(savePath)), locals=locals(), globals=globals(), description='update' )
#exec( dump.replace("$FILE_PATH", str(savePath)) )
# remove file if exists
_path = os.path.join(fPath,self.__fileInfo%fName)
# update info
with open(_path, 'wb') as fd:
pickle.dump( info,fd, protocol=self._DEFAULT_PICKLE_PROTOCOL )
fd.flush()
os.fsync(fd.fileno())
# update class file
fileClassPath = os.path.join(self.__path,os.path.dirname(relativePath),self.__fileClass%fName)
with open(fileClassPath, 'wb') as fd:
if value is None:
klass = None
else:
klass = value.__class__
pickle.dump(klass , fd, protocol=self._DEFAULT_PICKLE_PROTOCOL )
fd.flush()
os.fsync(fd.fileno())
except Exception as err:
message.append(str(err))
updated = False
try:
if 'pickle.dump(' in dump:
mi = get_pickling_errors(value)
if mi is not None:
message.append('more info: %s'%str(mi))
except:
pass
if self.DEBUG_PRINT_FAILED_TRIALS: print("Trial %i failed in Repository.%s (%s). Set Repository.DEBUG_PRINT_FAILED_TRIALS to False to mute"%(_trial, inspect.stack()[1][3], '\n'.join(message)))
else:
updated = True
break
# release lock
LF.release_lock()
# check and return
assert updated or not raiseError, "Unable to update file '%s' (%s)"%(relativePath, '\n'.join(message),)
return updated, '\n'.join(message) | def function[update_file, parameter[self, value, relativePath, description, dump, pull, raiseError, ntrials]]:
constant[
Update the value of a file that is already in the Repository.
If file is not registered in repository, and error will be thrown.
If file is missing in the system, it will be regenerated as dump method
is called.
Unlike dump_file, update_file won't block the whole repository but only
the file being updated.
:Parameters:
#. value (object): The value of a file to update.
#. relativePath (str): The relative to the repository path of the
file to be updated.
#. description (False, string): Any random description about the file.
If False is given, the description info won't be updated,
otherwise it will be update to what description argument value is.
#. dump (False, string): The new dump method. If False is given,
the old one will be used.
#. pull (False, string): The new pull method. If False is given,
the old one will be used.
#. raiseError (boolean): Whether to raise encountered error instead
of returning failure.
#. ntrials (int): After aquiring all locks, ntrials is the maximum
number of trials allowed before failing.
In rare cases, when multiple processes
are accessing the same repository components, different processes
can alter repository components between successive lock releases
of some other process. Bigger number of trials lowers the
likelyhood of failure due to multiple processes same time
alteration.
:Returns:
#. success (boolean): Whether renaming the directory was successful.
#. message (None, string): Some explanatory message or error reason
why directory was not updated.
]
assert[call[name[isinstance], parameter[name[raiseError], name[bool]]]]
assert[<ast.BoolOp object at 0x7da2054a5f90>]
assert[<ast.BoolOp object at 0x7da2054a61d0>]
assert[<ast.BoolOp object at 0x7da2054a6470>]
assert[call[name[isinstance], parameter[name[ntrials], name[int]]]]
assert[compare[name[ntrials] greater[>] constant[0]]]
variable[relativePath] assign[=] call[name[self].to_repo_relative_path, parameter[]]
variable[savePath] assign[=] call[name[os].path.join, parameter[name[self].__path, name[relativePath]]]
<ast.Tuple object at 0x7da2054a4ee0> assign[=] call[name[os].path.split, parameter[name[savePath]]]
variable[LF] assign[=] call[name[Locker], parameter[]]
<ast.Tuple object at 0x7da2054a5fc0> assign[=] call[name[LF].acquire_lock, parameter[]]
if <ast.UnaryOp object at 0x7da2054a4040> begin[:]
variable[error] assign[=] binary_operation[constant[Code %s. Unable to aquire the lock to update '%s'] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da2054a5960>, <ast.Name object at 0x7da2054a4340>]]]
assert[<ast.UnaryOp object at 0x7da2054a55a0>]
return[tuple[[<ast.Constant object at 0x7da2054a5b40>, <ast.Name object at 0x7da2054a6bc0>]]]
for taget[name[_trial]] in starred[call[name[range], parameter[name[ntrials]]]] begin[:]
variable[message] assign[=] list[[]]
variable[updated] assign[=] constant[False]
<ast.Try object at 0x7da2054a68f0>
call[name[LF].release_lock, parameter[]]
assert[<ast.BoolOp object at 0x7da2047e8190>]
return[tuple[[<ast.Name object at 0x7da2047e8400>, <ast.Call object at 0x7da2047e8c10>]]] | keyword[def] identifier[update_file] ( identifier[self] , identifier[value] , identifier[relativePath] , identifier[description] = keyword[False] ,
identifier[dump] = keyword[False] , identifier[pull] = keyword[False] , identifier[raiseError] = keyword[True] , identifier[ntrials] = literal[int] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[raiseError] , identifier[bool] ), literal[string]
keyword[assert] identifier[description] keyword[is] keyword[False] keyword[or] identifier[description] keyword[is] keyword[None] keyword[or] identifier[isinstance] ( identifier[description] , identifier[basestring] ), literal[string]
keyword[assert] identifier[dump] keyword[is] keyword[False] keyword[or] identifier[dump] keyword[is] keyword[None] keyword[or] identifier[isinstance] ( identifier[dump] , identifier[basestring] ), literal[string]
keyword[assert] identifier[pull] keyword[is] keyword[False] keyword[or] identifier[pull] keyword[is] keyword[None] keyword[or] identifier[isinstance] ( identifier[pull] , identifier[basestring] ), literal[string]
keyword[assert] identifier[isinstance] ( identifier[ntrials] , identifier[int] ), literal[string]
keyword[assert] identifier[ntrials] > literal[int] , literal[string]
identifier[relativePath] = identifier[self] . identifier[to_repo_relative_path] ( identifier[path] = identifier[relativePath] , identifier[split] = keyword[False] )
identifier[savePath] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[__path] , identifier[relativePath] )
identifier[fPath] , identifier[fName] = identifier[os] . identifier[path] . identifier[split] ( identifier[savePath] )
identifier[LF] = identifier[Locker] ( identifier[filePath] = keyword[None] , identifier[lockPass] = identifier[str] ( identifier[uuid] . identifier[uuid1] ()), identifier[lockPath] = identifier[os] . identifier[path] . identifier[join] ( identifier[fPath] , identifier[self] . identifier[__fileLock] % identifier[fName] ))
identifier[acquired] , identifier[code] = identifier[LF] . identifier[acquire_lock] ()
keyword[if] keyword[not] identifier[acquired] :
identifier[error] = literal[string] %( identifier[code] , identifier[relativePath] )
keyword[assert] keyword[not] identifier[raiseError] , identifier[error]
keyword[return] keyword[False] , identifier[error]
keyword[for] identifier[_trial] keyword[in] identifier[range] ( identifier[ntrials] ):
identifier[message] =[]
identifier[updated] = keyword[False]
keyword[try] :
identifier[isRepoFile] , identifier[fileOnDisk] , identifier[infoOnDisk] , identifier[classOnDisk] = identifier[self] . identifier[is_repository_file] ( identifier[relativePath] )
keyword[assert] identifier[isRepoFile] , literal[string] %( identifier[relativePath] ,)
keyword[if] keyword[not] identifier[fileOnDisk] :
keyword[assert] identifier[description] keyword[is] keyword[not] keyword[False] , literal[string] %( identifier[relativePath] ,)
keyword[assert] identifier[dump] keyword[is] keyword[not] keyword[False] , literal[string] %( identifier[relativePath] ,)
keyword[assert] identifier[pull] keyword[is] keyword[not] keyword[False] , literal[string] %( identifier[relativePath] ,)
identifier[info] ={}
identifier[info] [ literal[string] ]= identifier[self] . identifier[__repo] [ literal[string] ]
identifier[info] [ literal[string] ]= identifier[info] [ literal[string] ]= identifier[time] . identifier[time] ()
keyword[else] :
keyword[with] identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[fPath] , identifier[self] . identifier[__fileInfo] % identifier[fName] ), literal[string] ) keyword[as] identifier[fd] :
identifier[info] = identifier[pickle] . identifier[load] ( identifier[fd] )
identifier[info] [ literal[string] ]= identifier[time] . identifier[time] ()
keyword[if] keyword[not] identifier[fileOnDisk] :
identifier[message] . identifier[append] ( literal[string] % identifier[relativePath] )
keyword[if] keyword[not] identifier[infoOnDisk] :
identifier[message] . identifier[append] ( literal[string] % identifier[self] . identifier[__fileInfo] % identifier[fName] )
keyword[if] keyword[not] identifier[classOnDisk] :
identifier[message] . identifier[append] ( literal[string] % identifier[self] . identifier[__fileClass] % identifier[fName] )
keyword[if] ( identifier[description] keyword[is] keyword[False] ) keyword[or] ( identifier[dump] keyword[is] keyword[False] ) keyword[or] ( identifier[pull] keyword[is] keyword[False] ):
keyword[if] identifier[description] keyword[is] keyword[False] :
identifier[description] = identifier[info] [ literal[string] ]
keyword[elif] identifier[description] keyword[is] keyword[None] :
identifier[description] = literal[string]
keyword[if] identifier[dump] keyword[is] keyword[False] :
identifier[dump] = identifier[info] [ literal[string] ]
keyword[elif] identifier[dump] keyword[is] keyword[None] :
identifier[dump] = identifier[get_dump_method] ( identifier[dump] , identifier[protocol] = identifier[self] . identifier[_DEFAULT_PICKLE_PROTOCOL] )
keyword[if] identifier[pull] keyword[is] keyword[False] :
identifier[pull] = identifier[info] [ literal[string] ]
keyword[elif] identifier[pull] keyword[is] keyword[None] :
identifier[pull] = identifier[get_pull_method] ( identifier[pull] )
identifier[info] [ literal[string] ]= identifier[dump]
identifier[info] [ literal[string] ]= identifier[pull]
identifier[info] [ literal[string] ]= identifier[description]
identifier[my_exec] ( identifier[dump] . identifier[replace] ( literal[string] , identifier[str] ( identifier[savePath] )), identifier[locals] = identifier[locals] (), identifier[globals] = identifier[globals] (), identifier[description] = literal[string] )
identifier[_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[fPath] , identifier[self] . identifier[__fileInfo] % identifier[fName] )
keyword[with] identifier[open] ( identifier[_path] , literal[string] ) keyword[as] identifier[fd] :
identifier[pickle] . identifier[dump] ( identifier[info] , identifier[fd] , identifier[protocol] = identifier[self] . identifier[_DEFAULT_PICKLE_PROTOCOL] )
identifier[fd] . identifier[flush] ()
identifier[os] . identifier[fsync] ( identifier[fd] . identifier[fileno] ())
identifier[fileClassPath] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[__path] , identifier[os] . identifier[path] . identifier[dirname] ( identifier[relativePath] ), identifier[self] . identifier[__fileClass] % identifier[fName] )
keyword[with] identifier[open] ( identifier[fileClassPath] , literal[string] ) keyword[as] identifier[fd] :
keyword[if] identifier[value] keyword[is] keyword[None] :
identifier[klass] = keyword[None]
keyword[else] :
identifier[klass] = identifier[value] . identifier[__class__]
identifier[pickle] . identifier[dump] ( identifier[klass] , identifier[fd] , identifier[protocol] = identifier[self] . identifier[_DEFAULT_PICKLE_PROTOCOL] )
identifier[fd] . identifier[flush] ()
identifier[os] . identifier[fsync] ( identifier[fd] . identifier[fileno] ())
keyword[except] identifier[Exception] keyword[as] identifier[err] :
identifier[message] . identifier[append] ( identifier[str] ( identifier[err] ))
identifier[updated] = keyword[False]
keyword[try] :
keyword[if] literal[string] keyword[in] identifier[dump] :
identifier[mi] = identifier[get_pickling_errors] ( identifier[value] )
keyword[if] identifier[mi] keyword[is] keyword[not] keyword[None] :
identifier[message] . identifier[append] ( literal[string] % identifier[str] ( identifier[mi] ))
keyword[except] :
keyword[pass]
keyword[if] identifier[self] . identifier[DEBUG_PRINT_FAILED_TRIALS] : identifier[print] ( literal[string] %( identifier[_trial] , identifier[inspect] . identifier[stack] ()[ literal[int] ][ literal[int] ], literal[string] . identifier[join] ( identifier[message] )))
keyword[else] :
identifier[updated] = keyword[True]
keyword[break]
identifier[LF] . identifier[release_lock] ()
keyword[assert] identifier[updated] keyword[or] keyword[not] identifier[raiseError] , literal[string] %( identifier[relativePath] , literal[string] . identifier[join] ( identifier[message] ),)
keyword[return] identifier[updated] , literal[string] . identifier[join] ( identifier[message] ) | def update_file(self, value, relativePath, description=False, dump=False, pull=False, raiseError=True, ntrials=3):
"""
Update the value of a file that is already in the Repository.
If file is not registered in repository, and error will be thrown.
If file is missing in the system, it will be regenerated as dump method
is called.
Unlike dump_file, update_file won't block the whole repository but only
the file being updated.
:Parameters:
#. value (object): The value of a file to update.
#. relativePath (str): The relative to the repository path of the
file to be updated.
#. description (False, string): Any random description about the file.
If False is given, the description info won't be updated,
otherwise it will be update to what description argument value is.
#. dump (False, string): The new dump method. If False is given,
the old one will be used.
#. pull (False, string): The new pull method. If False is given,
the old one will be used.
#. raiseError (boolean): Whether to raise encountered error instead
of returning failure.
#. ntrials (int): After aquiring all locks, ntrials is the maximum
number of trials allowed before failing.
In rare cases, when multiple processes
are accessing the same repository components, different processes
can alter repository components between successive lock releases
of some other process. Bigger number of trials lowers the
likelyhood of failure due to multiple processes same time
alteration.
:Returns:
#. success (boolean): Whether renaming the directory was successful.
#. message (None, string): Some explanatory message or error reason
why directory was not updated.
"""
# check arguments
assert isinstance(raiseError, bool), 'raiseError must be boolean'
assert description is False or description is None or isinstance(description, basestring), 'description must be False, None or a string'
assert dump is False or dump is None or isinstance(dump, basestring), 'dump must be False, None or a string'
assert pull is False or pull is None or isinstance(pull, basestring), 'pull must be False, None or a string'
assert isinstance(ntrials, int), 'ntrials must be integer'
assert ntrials > 0, 'ntrials must be >0'
# get name and path
relativePath = self.to_repo_relative_path(path=relativePath, split=False)
savePath = os.path.join(self.__path, relativePath)
(fPath, fName) = os.path.split(savePath)
# get locker
LF = Locker(filePath=None, lockPass=str(uuid.uuid1()), lockPath=os.path.join(fPath, self.__fileLock % fName))
(acquired, code) = LF.acquire_lock()
if not acquired:
error = "Code %s. Unable to aquire the lock to update '%s'" % (code, relativePath)
assert not raiseError, error
return (False, error) # depends on [control=['if'], data=[]]
# update file
for _trial in range(ntrials):
message = []
updated = False
try:
# check file in repository
(isRepoFile, fileOnDisk, infoOnDisk, classOnDisk) = self.is_repository_file(relativePath)
assert isRepoFile, "file '%s' is not registered in repository, no update can be performed." % (relativePath,)
# get file info
if not fileOnDisk:
assert description is not False, "file '%s' is found on disk, description must be provided" % (relativePath,)
assert dump is not False, "file '%s' is found on disk, dump must be provided" % (relativePath,)
assert pull is not False, "file '%s' is found on disk, pull must be provided" % (relativePath,)
info = {}
info['repository_unique_name'] = self.__repo['repository_unique_name']
info['create_utctime'] = info['last_update_utctime'] = time.time() # depends on [control=['if'], data=[]]
else:
with open(os.path.join(fPath, self.__fileInfo % fName), 'rb') as fd:
info = pickle.load(fd)
info['last_update_utctime'] = time.time() # depends on [control=['with'], data=['fd']]
if not fileOnDisk:
message.append('file %s is registered in repository but it was found on disk prior to updating' % relativePath) # depends on [control=['if'], data=[]]
if not infoOnDisk:
message.append('%s is not found on disk prior to updating' % self.__fileInfo % fName) # depends on [control=['if'], data=[]]
if not classOnDisk:
message.append('%s is not found on disk prior to updating' % self.__fileClass % fName) # depends on [control=['if'], data=[]]
# get dump and pull
if description is False or dump is False or pull is False:
if description is False:
description = info['description'] # depends on [control=['if'], data=['description']]
elif description is None:
description = '' # depends on [control=['if'], data=['description']]
if dump is False:
dump = info['dump'] # depends on [control=['if'], data=['dump']]
elif dump is None:
dump = get_dump_method(dump, protocol=self._DEFAULT_PICKLE_PROTOCOL) # depends on [control=['if'], data=['dump']]
if pull is False:
pull = info['pull'] # depends on [control=['if'], data=['pull']]
elif pull is None:
pull = get_pull_method(pull) # depends on [control=['if'], data=['pull']] # depends on [control=['if'], data=[]]
# update dump, pull and description
info['dump'] = dump
info['pull'] = pull
info['description'] = description
# dump file
my_exec(dump.replace('$FILE_PATH', str(savePath)), locals=locals(), globals=globals(), description='update')
#exec( dump.replace("$FILE_PATH", str(savePath)) )
# remove file if exists
_path = os.path.join(fPath, self.__fileInfo % fName)
# update info
with open(_path, 'wb') as fd:
pickle.dump(info, fd, protocol=self._DEFAULT_PICKLE_PROTOCOL)
fd.flush()
os.fsync(fd.fileno()) # depends on [control=['with'], data=['fd']]
# update class file
fileClassPath = os.path.join(self.__path, os.path.dirname(relativePath), self.__fileClass % fName)
with open(fileClassPath, 'wb') as fd:
if value is None:
klass = None # depends on [control=['if'], data=[]]
else:
klass = value.__class__
pickle.dump(klass, fd, protocol=self._DEFAULT_PICKLE_PROTOCOL)
fd.flush()
os.fsync(fd.fileno()) # depends on [control=['with'], data=['fd']] # depends on [control=['try'], data=[]]
except Exception as err:
message.append(str(err))
updated = False
try:
if 'pickle.dump(' in dump:
mi = get_pickling_errors(value)
if mi is not None:
message.append('more info: %s' % str(mi)) # depends on [control=['if'], data=['mi']] # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]]
if self.DEBUG_PRINT_FAILED_TRIALS:
print('Trial %i failed in Repository.%s (%s). Set Repository.DEBUG_PRINT_FAILED_TRIALS to False to mute' % (_trial, inspect.stack()[1][3], '\n'.join(message))) # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['err']]
else:
updated = True
break # depends on [control=['for'], data=['_trial']]
# release lock
LF.release_lock()
# check and return
assert updated or not raiseError, "Unable to update file '%s' (%s)" % (relativePath, '\n'.join(message))
return (updated, '\n'.join(message)) |
def _get_flag(which, flags):
""" Find 'which' entry in 'flags'. """
res = [this for this in flags if this.is_equal(which)]
if len(res) == 0:
return None
if len(res) == 1:
return res[0]
assert() | def function[_get_flag, parameter[which, flags]]:
constant[ Find 'which' entry in 'flags'. ]
variable[res] assign[=] <ast.ListComp object at 0x7da1b0833e80>
if compare[call[name[len], parameter[name[res]]] equal[==] constant[0]] begin[:]
return[constant[None]]
if compare[call[name[len], parameter[name[res]]] equal[==] constant[1]] begin[:]
return[call[name[res]][constant[0]]]
assert[tuple[[]]] | keyword[def] identifier[_get_flag] ( identifier[which] , identifier[flags] ):
literal[string]
identifier[res] =[ identifier[this] keyword[for] identifier[this] keyword[in] identifier[flags] keyword[if] identifier[this] . identifier[is_equal] ( identifier[which] )]
keyword[if] identifier[len] ( identifier[res] )== literal[int] :
keyword[return] keyword[None]
keyword[if] identifier[len] ( identifier[res] )== literal[int] :
keyword[return] identifier[res] [ literal[int] ]
keyword[assert] () | def _get_flag(which, flags):
""" Find 'which' entry in 'flags'. """
res = [this for this in flags if this.is_equal(which)]
if len(res) == 0:
return None # depends on [control=['if'], data=[]]
if len(res) == 1:
return res[0] # depends on [control=['if'], data=[]]
assert () |
def select_subreddit(self):
"""
Store the selected subreddit and return to the subreddit page
"""
name = self.get_selected_item()['name']
self.selected_page = self.open_subreddit_page(name) | def function[select_subreddit, parameter[self]]:
constant[
Store the selected subreddit and return to the subreddit page
]
variable[name] assign[=] call[call[name[self].get_selected_item, parameter[]]][constant[name]]
name[self].selected_page assign[=] call[name[self].open_subreddit_page, parameter[name[name]]] | keyword[def] identifier[select_subreddit] ( identifier[self] ):
literal[string]
identifier[name] = identifier[self] . identifier[get_selected_item] ()[ literal[string] ]
identifier[self] . identifier[selected_page] = identifier[self] . identifier[open_subreddit_page] ( identifier[name] ) | def select_subreddit(self):
"""
Store the selected subreddit and return to the subreddit page
"""
name = self.get_selected_item()['name']
self.selected_page = self.open_subreddit_page(name) |
def format(self):
"""PixelFormat: The raw format of the texture. The actual format may differ, but pixel transfers will use this
format.
"""
fmt = ffi.new('Uint32 *')
check_int_err(lib.SDL_QueryTexture(self._ptr, fmt, ffi.NULL, ffi.NULL, ffi.NULL))
return PixelFormat(fmt[0]) | def function[format, parameter[self]]:
constant[PixelFormat: The raw format of the texture. The actual format may differ, but pixel transfers will use this
format.
]
variable[fmt] assign[=] call[name[ffi].new, parameter[constant[Uint32 *]]]
call[name[check_int_err], parameter[call[name[lib].SDL_QueryTexture, parameter[name[self]._ptr, name[fmt], name[ffi].NULL, name[ffi].NULL, name[ffi].NULL]]]]
return[call[name[PixelFormat], parameter[call[name[fmt]][constant[0]]]]] | keyword[def] identifier[format] ( identifier[self] ):
literal[string]
identifier[fmt] = identifier[ffi] . identifier[new] ( literal[string] )
identifier[check_int_err] ( identifier[lib] . identifier[SDL_QueryTexture] ( identifier[self] . identifier[_ptr] , identifier[fmt] , identifier[ffi] . identifier[NULL] , identifier[ffi] . identifier[NULL] , identifier[ffi] . identifier[NULL] ))
keyword[return] identifier[PixelFormat] ( identifier[fmt] [ literal[int] ]) | def format(self):
"""PixelFormat: The raw format of the texture. The actual format may differ, but pixel transfers will use this
format.
"""
fmt = ffi.new('Uint32 *')
check_int_err(lib.SDL_QueryTexture(self._ptr, fmt, ffi.NULL, ffi.NULL, ffi.NULL))
return PixelFormat(fmt[0]) |
def _escaped_token_to_subtoken_strings(self, escaped_token):
""" Converts an escaped token string to a list of subtoken strings.
Args:
escaped_token: An escaped token as a unicode string.
Returns:
A list of subtokens as unicode strings.
"""
# NOTE: This algorithm is greedy; it won't necessarily produce the "best"
# list of subtokens.
ret = []
start = 0
token_len = len(escaped_token)
while start < token_len:
for end in xrange(min(token_len, start + self._max_subtoken_len), start, -1):
subtoken = escaped_token[start:end]
if subtoken in self._all_subtoken_strings:
ret.append(subtoken)
start = end
break
else: # Did not break
# If there is no possible encoding of the escaped token then one of the
# characters in the token is not in the alphabet. This should be
# impossible and would be indicative of a bug.
assert False, "Token substring not found in subtoken vocabulary."
return ret | def function[_escaped_token_to_subtoken_strings, parameter[self, escaped_token]]:
constant[ Converts an escaped token string to a list of subtoken strings.
Args:
escaped_token: An escaped token as a unicode string.
Returns:
A list of subtokens as unicode strings.
]
variable[ret] assign[=] list[[]]
variable[start] assign[=] constant[0]
variable[token_len] assign[=] call[name[len], parameter[name[escaped_token]]]
while compare[name[start] less[<] name[token_len]] begin[:]
for taget[name[end]] in starred[call[name[xrange], parameter[call[name[min], parameter[name[token_len], binary_operation[name[start] + name[self]._max_subtoken_len]]], name[start], <ast.UnaryOp object at 0x7da18dc04760>]]] begin[:]
variable[subtoken] assign[=] call[name[escaped_token]][<ast.Slice object at 0x7da18dc06a40>]
if compare[name[subtoken] in name[self]._all_subtoken_strings] begin[:]
call[name[ret].append, parameter[name[subtoken]]]
variable[start] assign[=] name[end]
break
return[name[ret]] | keyword[def] identifier[_escaped_token_to_subtoken_strings] ( identifier[self] , identifier[escaped_token] ):
literal[string]
identifier[ret] =[]
identifier[start] = literal[int]
identifier[token_len] = identifier[len] ( identifier[escaped_token] )
keyword[while] identifier[start] < identifier[token_len] :
keyword[for] identifier[end] keyword[in] identifier[xrange] ( identifier[min] ( identifier[token_len] , identifier[start] + identifier[self] . identifier[_max_subtoken_len] ), identifier[start] ,- literal[int] ):
identifier[subtoken] = identifier[escaped_token] [ identifier[start] : identifier[end] ]
keyword[if] identifier[subtoken] keyword[in] identifier[self] . identifier[_all_subtoken_strings] :
identifier[ret] . identifier[append] ( identifier[subtoken] )
identifier[start] = identifier[end]
keyword[break]
keyword[else] :
keyword[assert] keyword[False] , literal[string]
keyword[return] identifier[ret] | def _escaped_token_to_subtoken_strings(self, escaped_token):
""" Converts an escaped token string to a list of subtoken strings.
Args:
escaped_token: An escaped token as a unicode string.
Returns:
A list of subtokens as unicode strings.
"""
# NOTE: This algorithm is greedy; it won't necessarily produce the "best"
# list of subtokens.
ret = []
start = 0
token_len = len(escaped_token)
while start < token_len:
for end in xrange(min(token_len, start + self._max_subtoken_len), start, -1):
subtoken = escaped_token[start:end]
if subtoken in self._all_subtoken_strings:
ret.append(subtoken)
start = end
break # depends on [control=['if'], data=['subtoken']] # depends on [control=['for'], data=['end']]
else: # Did not break
# If there is no possible encoding of the escaped token then one of the
# characters in the token is not in the alphabet. This should be
# impossible and would be indicative of a bug.
assert False, 'Token substring not found in subtoken vocabulary.' # depends on [control=['while'], data=['start', 'token_len']]
return ret |
def ingest_from_dataframe(self, df, ingestion_properties):
"""Enqueuing an ingest command from local files.
:param pandas.DataFrame df: input dataframe to ingest.
:param azure.kusto.ingest.IngestionProperties ingestion_properties: Ingestion properties.
"""
from pandas import DataFrame
if not isinstance(df, DataFrame):
raise ValueError("Expected DataFrame instance, found {}".format(type(df)))
file_name = "df_{timestamp}_{pid}.csv.gz".format(timestamp=int(time.time()), pid=os.getpid())
temp_file_path = os.path.join(tempfile.gettempdir(), file_name)
df.to_csv(temp_file_path, index=False, encoding="utf-8", header=False, compression="gzip")
fd = FileDescriptor(temp_file_path)
blob_name = "{db}__{table}__{guid}__{file}".format(
db=ingestion_properties.database, table=ingestion_properties.table, guid=uuid.uuid4(), file=file_name
)
containers = self._resource_manager.get_containers()
container_details = random.choice(containers)
storage_client = CloudStorageAccount(container_details.storage_account_name, sas_token=container_details.sas)
blob_service = storage_client.create_block_blob_service()
blob_service.create_blob_from_path(
container_name=container_details.object_name, blob_name=blob_name, file_path=temp_file_path
)
url = blob_service.make_blob_url(container_details.object_name, blob_name, sas_token=container_details.sas)
self.ingest_from_blob(BlobDescriptor(url, fd.size), ingestion_properties=ingestion_properties)
fd.delete_files()
os.unlink(temp_file_path) | def function[ingest_from_dataframe, parameter[self, df, ingestion_properties]]:
constant[Enqueuing an ingest command from local files.
:param pandas.DataFrame df: input dataframe to ingest.
:param azure.kusto.ingest.IngestionProperties ingestion_properties: Ingestion properties.
]
from relative_module[pandas] import module[DataFrame]
if <ast.UnaryOp object at 0x7da1b17bb1f0> begin[:]
<ast.Raise object at 0x7da1b17bbca0>
variable[file_name] assign[=] call[constant[df_{timestamp}_{pid}.csv.gz].format, parameter[]]
variable[temp_file_path] assign[=] call[name[os].path.join, parameter[call[name[tempfile].gettempdir, parameter[]], name[file_name]]]
call[name[df].to_csv, parameter[name[temp_file_path]]]
variable[fd] assign[=] call[name[FileDescriptor], parameter[name[temp_file_path]]]
variable[blob_name] assign[=] call[constant[{db}__{table}__{guid}__{file}].format, parameter[]]
variable[containers] assign[=] call[name[self]._resource_manager.get_containers, parameter[]]
variable[container_details] assign[=] call[name[random].choice, parameter[name[containers]]]
variable[storage_client] assign[=] call[name[CloudStorageAccount], parameter[name[container_details].storage_account_name]]
variable[blob_service] assign[=] call[name[storage_client].create_block_blob_service, parameter[]]
call[name[blob_service].create_blob_from_path, parameter[]]
variable[url] assign[=] call[name[blob_service].make_blob_url, parameter[name[container_details].object_name, name[blob_name]]]
call[name[self].ingest_from_blob, parameter[call[name[BlobDescriptor], parameter[name[url], name[fd].size]]]]
call[name[fd].delete_files, parameter[]]
call[name[os].unlink, parameter[name[temp_file_path]]] | keyword[def] identifier[ingest_from_dataframe] ( identifier[self] , identifier[df] , identifier[ingestion_properties] ):
literal[string]
keyword[from] identifier[pandas] keyword[import] identifier[DataFrame]
keyword[if] keyword[not] identifier[isinstance] ( identifier[df] , identifier[DataFrame] ):
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[type] ( identifier[df] )))
identifier[file_name] = literal[string] . identifier[format] ( identifier[timestamp] = identifier[int] ( identifier[time] . identifier[time] ()), identifier[pid] = identifier[os] . identifier[getpid] ())
identifier[temp_file_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[tempfile] . identifier[gettempdir] (), identifier[file_name] )
identifier[df] . identifier[to_csv] ( identifier[temp_file_path] , identifier[index] = keyword[False] , identifier[encoding] = literal[string] , identifier[header] = keyword[False] , identifier[compression] = literal[string] )
identifier[fd] = identifier[FileDescriptor] ( identifier[temp_file_path] )
identifier[blob_name] = literal[string] . identifier[format] (
identifier[db] = identifier[ingestion_properties] . identifier[database] , identifier[table] = identifier[ingestion_properties] . identifier[table] , identifier[guid] = identifier[uuid] . identifier[uuid4] (), identifier[file] = identifier[file_name]
)
identifier[containers] = identifier[self] . identifier[_resource_manager] . identifier[get_containers] ()
identifier[container_details] = identifier[random] . identifier[choice] ( identifier[containers] )
identifier[storage_client] = identifier[CloudStorageAccount] ( identifier[container_details] . identifier[storage_account_name] , identifier[sas_token] = identifier[container_details] . identifier[sas] )
identifier[blob_service] = identifier[storage_client] . identifier[create_block_blob_service] ()
identifier[blob_service] . identifier[create_blob_from_path] (
identifier[container_name] = identifier[container_details] . identifier[object_name] , identifier[blob_name] = identifier[blob_name] , identifier[file_path] = identifier[temp_file_path]
)
identifier[url] = identifier[blob_service] . identifier[make_blob_url] ( identifier[container_details] . identifier[object_name] , identifier[blob_name] , identifier[sas_token] = identifier[container_details] . identifier[sas] )
identifier[self] . identifier[ingest_from_blob] ( identifier[BlobDescriptor] ( identifier[url] , identifier[fd] . identifier[size] ), identifier[ingestion_properties] = identifier[ingestion_properties] )
identifier[fd] . identifier[delete_files] ()
identifier[os] . identifier[unlink] ( identifier[temp_file_path] ) | def ingest_from_dataframe(self, df, ingestion_properties):
"""Enqueuing an ingest command from local files.
:param pandas.DataFrame df: input dataframe to ingest.
:param azure.kusto.ingest.IngestionProperties ingestion_properties: Ingestion properties.
"""
from pandas import DataFrame
if not isinstance(df, DataFrame):
raise ValueError('Expected DataFrame instance, found {}'.format(type(df))) # depends on [control=['if'], data=[]]
file_name = 'df_{timestamp}_{pid}.csv.gz'.format(timestamp=int(time.time()), pid=os.getpid())
temp_file_path = os.path.join(tempfile.gettempdir(), file_name)
df.to_csv(temp_file_path, index=False, encoding='utf-8', header=False, compression='gzip')
fd = FileDescriptor(temp_file_path)
blob_name = '{db}__{table}__{guid}__{file}'.format(db=ingestion_properties.database, table=ingestion_properties.table, guid=uuid.uuid4(), file=file_name)
containers = self._resource_manager.get_containers()
container_details = random.choice(containers)
storage_client = CloudStorageAccount(container_details.storage_account_name, sas_token=container_details.sas)
blob_service = storage_client.create_block_blob_service()
blob_service.create_blob_from_path(container_name=container_details.object_name, blob_name=blob_name, file_path=temp_file_path)
url = blob_service.make_blob_url(container_details.object_name, blob_name, sas_token=container_details.sas)
self.ingest_from_blob(BlobDescriptor(url, fd.size), ingestion_properties=ingestion_properties)
fd.delete_files()
os.unlink(temp_file_path) |
def urijoin(base, ref, strict=False):
"""Convert a URI reference relative to a base URI to its target URI
string.
"""
if isinstance(base, type(ref)):
return urisplit(base).transform(ref, strict).geturi()
elif isinstance(base, bytes):
return urisplit(base.decode()).transform(ref, strict).geturi()
else:
return urisplit(base).transform(ref.decode(), strict).geturi() | def function[urijoin, parameter[base, ref, strict]]:
constant[Convert a URI reference relative to a base URI to its target URI
string.
]
if call[name[isinstance], parameter[name[base], call[name[type], parameter[name[ref]]]]] begin[:]
return[call[call[call[name[urisplit], parameter[name[base]]].transform, parameter[name[ref], name[strict]]].geturi, parameter[]]] | keyword[def] identifier[urijoin] ( identifier[base] , identifier[ref] , identifier[strict] = keyword[False] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[base] , identifier[type] ( identifier[ref] )):
keyword[return] identifier[urisplit] ( identifier[base] ). identifier[transform] ( identifier[ref] , identifier[strict] ). identifier[geturi] ()
keyword[elif] identifier[isinstance] ( identifier[base] , identifier[bytes] ):
keyword[return] identifier[urisplit] ( identifier[base] . identifier[decode] ()). identifier[transform] ( identifier[ref] , identifier[strict] ). identifier[geturi] ()
keyword[else] :
keyword[return] identifier[urisplit] ( identifier[base] ). identifier[transform] ( identifier[ref] . identifier[decode] (), identifier[strict] ). identifier[geturi] () | def urijoin(base, ref, strict=False):
"""Convert a URI reference relative to a base URI to its target URI
string.
"""
if isinstance(base, type(ref)):
return urisplit(base).transform(ref, strict).geturi() # depends on [control=['if'], data=[]]
elif isinstance(base, bytes):
return urisplit(base.decode()).transform(ref, strict).geturi() # depends on [control=['if'], data=[]]
else:
return urisplit(base).transform(ref.decode(), strict).geturi() |
def _GetIdentifierFromPath(self, parser_mediator):
"""Extracts a container or a graph ID from a JSON file's path.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
Returns:
str: container or graph identifier.
"""
file_entry = parser_mediator.GetFileEntry()
path = file_entry.path_spec.location
file_system = file_entry.GetFileSystem()
path_segments = file_system.SplitPath(path)
return path_segments[-2] | def function[_GetIdentifierFromPath, parameter[self, parser_mediator]]:
constant[Extracts a container or a graph ID from a JSON file's path.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
Returns:
str: container or graph identifier.
]
variable[file_entry] assign[=] call[name[parser_mediator].GetFileEntry, parameter[]]
variable[path] assign[=] name[file_entry].path_spec.location
variable[file_system] assign[=] call[name[file_entry].GetFileSystem, parameter[]]
variable[path_segments] assign[=] call[name[file_system].SplitPath, parameter[name[path]]]
return[call[name[path_segments]][<ast.UnaryOp object at 0x7da1b26af100>]] | keyword[def] identifier[_GetIdentifierFromPath] ( identifier[self] , identifier[parser_mediator] ):
literal[string]
identifier[file_entry] = identifier[parser_mediator] . identifier[GetFileEntry] ()
identifier[path] = identifier[file_entry] . identifier[path_spec] . identifier[location]
identifier[file_system] = identifier[file_entry] . identifier[GetFileSystem] ()
identifier[path_segments] = identifier[file_system] . identifier[SplitPath] ( identifier[path] )
keyword[return] identifier[path_segments] [- literal[int] ] | def _GetIdentifierFromPath(self, parser_mediator):
"""Extracts a container or a graph ID from a JSON file's path.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
Returns:
str: container or graph identifier.
"""
file_entry = parser_mediator.GetFileEntry()
path = file_entry.path_spec.location
file_system = file_entry.GetFileSystem()
path_segments = file_system.SplitPath(path)
return path_segments[-2] |
def userpass(self, dir="ppcoin"):
"""Reads config file for username/password"""
source = os.path.expanduser("~/.{0}/{0}.conf").format(dir)
dest = open(source, "r")
with dest as conf:
for line in conf:
if line.startswith("rpcuser"):
username = line.split("=")[1].strip()
if line.startswith("rpcpassword"):
password = line.split("=")[1].strip()
return username, password | def function[userpass, parameter[self, dir]]:
constant[Reads config file for username/password]
variable[source] assign[=] call[call[name[os].path.expanduser, parameter[constant[~/.{0}/{0}.conf]]].format, parameter[name[dir]]]
variable[dest] assign[=] call[name[open], parameter[name[source], constant[r]]]
with name[dest] begin[:]
for taget[name[line]] in starred[name[conf]] begin[:]
if call[name[line].startswith, parameter[constant[rpcuser]]] begin[:]
variable[username] assign[=] call[call[call[name[line].split, parameter[constant[=]]]][constant[1]].strip, parameter[]]
if call[name[line].startswith, parameter[constant[rpcpassword]]] begin[:]
variable[password] assign[=] call[call[call[name[line].split, parameter[constant[=]]]][constant[1]].strip, parameter[]]
return[tuple[[<ast.Name object at 0x7da2043467d0>, <ast.Name object at 0x7da204347d60>]]] | keyword[def] identifier[userpass] ( identifier[self] , identifier[dir] = literal[string] ):
literal[string]
identifier[source] = identifier[os] . identifier[path] . identifier[expanduser] ( literal[string] ). identifier[format] ( identifier[dir] )
identifier[dest] = identifier[open] ( identifier[source] , literal[string] )
keyword[with] identifier[dest] keyword[as] identifier[conf] :
keyword[for] identifier[line] keyword[in] identifier[conf] :
keyword[if] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[username] = identifier[line] . identifier[split] ( literal[string] )[ literal[int] ]. identifier[strip] ()
keyword[if] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[password] = identifier[line] . identifier[split] ( literal[string] )[ literal[int] ]. identifier[strip] ()
keyword[return] identifier[username] , identifier[password] | def userpass(self, dir='ppcoin'):
"""Reads config file for username/password"""
source = os.path.expanduser('~/.{0}/{0}.conf').format(dir)
dest = open(source, 'r')
with dest as conf:
for line in conf:
if line.startswith('rpcuser'):
username = line.split('=')[1].strip() # depends on [control=['if'], data=[]]
if line.startswith('rpcpassword'):
password = line.split('=')[1].strip() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['conf']]
return (username, password) |
def uniformly_refine_triangulation(self, faces=False, trisect=False):
"""
return points defining a refined triangulation obtained by bisection of all edges
in the triangulation
"""
if faces:
x_v1, y_v1 = self._add_face_centroids()
else:
if not trisect:
x_v1, y_v1 = self._add_midpoints()
else:
x_v1, y_v1 = self._add_tripoints(ratio=0.333333)
return x_v1, y_v1 | def function[uniformly_refine_triangulation, parameter[self, faces, trisect]]:
constant[
return points defining a refined triangulation obtained by bisection of all edges
in the triangulation
]
if name[faces] begin[:]
<ast.Tuple object at 0x7da1b246c850> assign[=] call[name[self]._add_face_centroids, parameter[]]
return[tuple[[<ast.Name object at 0x7da1b1c38c10>, <ast.Name object at 0x7da1b1c38760>]]] | keyword[def] identifier[uniformly_refine_triangulation] ( identifier[self] , identifier[faces] = keyword[False] , identifier[trisect] = keyword[False] ):
literal[string]
keyword[if] identifier[faces] :
identifier[x_v1] , identifier[y_v1] = identifier[self] . identifier[_add_face_centroids] ()
keyword[else] :
keyword[if] keyword[not] identifier[trisect] :
identifier[x_v1] , identifier[y_v1] = identifier[self] . identifier[_add_midpoints] ()
keyword[else] :
identifier[x_v1] , identifier[y_v1] = identifier[self] . identifier[_add_tripoints] ( identifier[ratio] = literal[int] )
keyword[return] identifier[x_v1] , identifier[y_v1] | def uniformly_refine_triangulation(self, faces=False, trisect=False):
"""
return points defining a refined triangulation obtained by bisection of all edges
in the triangulation
"""
if faces:
(x_v1, y_v1) = self._add_face_centroids() # depends on [control=['if'], data=[]]
elif not trisect:
(x_v1, y_v1) = self._add_midpoints() # depends on [control=['if'], data=[]]
else:
(x_v1, y_v1) = self._add_tripoints(ratio=0.333333)
return (x_v1, y_v1) |
def capture(self, tunnel_addr, tunnel_port, filename=None,
bucket=None, destination=None):
"""
Captures memory based on the provided OutputDestination
:type tunnel_addr: str
:param tunnel_port: ssh tunnel hostname or ip
:type tunnel_port: int
:param tunnel_port: ssh tunnel port
:type filename: str
:param filename: memory dump output filename
:type bucket: str
:param bucket: output s3 bucket
:type destination: :py:class:`margaritashotgun.memory.OutputDestinations`
:param destination: OutputDestinations member
"""
if filename is None:
raise MemoryCaptureAttributeMissingError('filename')
if destination == OutputDestinations.local:
logger.info("{0}: dumping to file://{1}".format(self.remote_addr,
filename))
result = self.to_file(filename, tunnel_addr, tunnel_port)
elif destination == OutputDestinations.s3:
if bucket is None:
raise MemoryCaptureAttributeMissingError('bucket')
logger.info(("{0}: dumping memory to s3://{1}/"
"{2}".format(self.remote_addr, bucket, filename)))
result = self.to_s3(bucket, filename, tunnel_addr, tunnel_port)
else:
raise MemoryCaptureOutputMissingError(self.remote_addr)
return result | def function[capture, parameter[self, tunnel_addr, tunnel_port, filename, bucket, destination]]:
constant[
Captures memory based on the provided OutputDestination
:type tunnel_addr: str
:param tunnel_port: ssh tunnel hostname or ip
:type tunnel_port: int
:param tunnel_port: ssh tunnel port
:type filename: str
:param filename: memory dump output filename
:type bucket: str
:param bucket: output s3 bucket
:type destination: :py:class:`margaritashotgun.memory.OutputDestinations`
:param destination: OutputDestinations member
]
if compare[name[filename] is constant[None]] begin[:]
<ast.Raise object at 0x7da18f09d3c0>
if compare[name[destination] equal[==] name[OutputDestinations].local] begin[:]
call[name[logger].info, parameter[call[constant[{0}: dumping to file://{1}].format, parameter[name[self].remote_addr, name[filename]]]]]
variable[result] assign[=] call[name[self].to_file, parameter[name[filename], name[tunnel_addr], name[tunnel_port]]]
return[name[result]] | keyword[def] identifier[capture] ( identifier[self] , identifier[tunnel_addr] , identifier[tunnel_port] , identifier[filename] = keyword[None] ,
identifier[bucket] = keyword[None] , identifier[destination] = keyword[None] ):
literal[string]
keyword[if] identifier[filename] keyword[is] keyword[None] :
keyword[raise] identifier[MemoryCaptureAttributeMissingError] ( literal[string] )
keyword[if] identifier[destination] == identifier[OutputDestinations] . identifier[local] :
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[self] . identifier[remote_addr] ,
identifier[filename] ))
identifier[result] = identifier[self] . identifier[to_file] ( identifier[filename] , identifier[tunnel_addr] , identifier[tunnel_port] )
keyword[elif] identifier[destination] == identifier[OutputDestinations] . identifier[s3] :
keyword[if] identifier[bucket] keyword[is] keyword[None] :
keyword[raise] identifier[MemoryCaptureAttributeMissingError] ( literal[string] )
identifier[logger] . identifier[info] (( literal[string]
literal[string] . identifier[format] ( identifier[self] . identifier[remote_addr] , identifier[bucket] , identifier[filename] )))
identifier[result] = identifier[self] . identifier[to_s3] ( identifier[bucket] , identifier[filename] , identifier[tunnel_addr] , identifier[tunnel_port] )
keyword[else] :
keyword[raise] identifier[MemoryCaptureOutputMissingError] ( identifier[self] . identifier[remote_addr] )
keyword[return] identifier[result] | def capture(self, tunnel_addr, tunnel_port, filename=None, bucket=None, destination=None):
"""
Captures memory based on the provided OutputDestination
:type tunnel_addr: str
:param tunnel_port: ssh tunnel hostname or ip
:type tunnel_port: int
:param tunnel_port: ssh tunnel port
:type filename: str
:param filename: memory dump output filename
:type bucket: str
:param bucket: output s3 bucket
:type destination: :py:class:`margaritashotgun.memory.OutputDestinations`
:param destination: OutputDestinations member
"""
if filename is None:
raise MemoryCaptureAttributeMissingError('filename') # depends on [control=['if'], data=[]]
if destination == OutputDestinations.local:
logger.info('{0}: dumping to file://{1}'.format(self.remote_addr, filename))
result = self.to_file(filename, tunnel_addr, tunnel_port) # depends on [control=['if'], data=[]]
elif destination == OutputDestinations.s3:
if bucket is None:
raise MemoryCaptureAttributeMissingError('bucket') # depends on [control=['if'], data=[]]
logger.info('{0}: dumping memory to s3://{1}/{2}'.format(self.remote_addr, bucket, filename))
result = self.to_s3(bucket, filename, tunnel_addr, tunnel_port) # depends on [control=['if'], data=[]]
else:
raise MemoryCaptureOutputMissingError(self.remote_addr)
return result |
def checked_add_with_arr(arr, b, arr_mask=None, b_mask=None):
"""
Perform array addition that checks for underflow and overflow.
Performs the addition of an int64 array and an int64 integer (or array)
but checks that they do not result in overflow first. For elements that
are indicated to be NaN, whether or not there is overflow for that element
is automatically ignored.
Parameters
----------
arr : array addend.
b : array or scalar addend.
arr_mask : boolean array or None
array indicating which elements to exclude from checking
b_mask : boolean array or boolean or None
array or scalar indicating which element(s) to exclude from checking
Returns
-------
sum : An array for elements x + b for each element x in arr if b is
a scalar or an array for elements x + y for each element pair
(x, y) in (arr, b).
Raises
------
OverflowError if any x + y exceeds the maximum or minimum int64 value.
"""
# For performance reasons, we broadcast 'b' to the new array 'b2'
# so that it has the same size as 'arr'.
b2 = np.broadcast_to(b, arr.shape)
if b_mask is not None:
# We do the same broadcasting for b_mask as well.
b2_mask = np.broadcast_to(b_mask, arr.shape)
else:
b2_mask = None
# For elements that are NaN, regardless of their value, we should
# ignore whether they overflow or not when doing the checked add.
if arr_mask is not None and b2_mask is not None:
not_nan = np.logical_not(arr_mask | b2_mask)
elif arr_mask is not None:
not_nan = np.logical_not(arr_mask)
elif b_mask is not None:
not_nan = np.logical_not(b2_mask)
else:
not_nan = np.empty(arr.shape, dtype=bool)
not_nan.fill(True)
# gh-14324: For each element in 'arr' and its corresponding element
# in 'b2', we check the sign of the element in 'b2'. If it is positive,
# we then check whether its sum with the element in 'arr' exceeds
# np.iinfo(np.int64).max. If so, we have an overflow error. If it
# it is negative, we then check whether its sum with the element in
# 'arr' exceeds np.iinfo(np.int64).min. If so, we have an overflow
# error as well.
mask1 = b2 > 0
mask2 = b2 < 0
if not mask1.any():
to_raise = ((np.iinfo(np.int64).min - b2 > arr) & not_nan).any()
elif not mask2.any():
to_raise = ((np.iinfo(np.int64).max - b2 < arr) & not_nan).any()
else:
to_raise = (((np.iinfo(np.int64).max -
b2[mask1] < arr[mask1]) & not_nan[mask1]).any() or
((np.iinfo(np.int64).min -
b2[mask2] > arr[mask2]) & not_nan[mask2]).any())
if to_raise:
raise OverflowError("Overflow in int64 addition")
return arr + b | def function[checked_add_with_arr, parameter[arr, b, arr_mask, b_mask]]:
constant[
Perform array addition that checks for underflow and overflow.
Performs the addition of an int64 array and an int64 integer (or array)
but checks that they do not result in overflow first. For elements that
are indicated to be NaN, whether or not there is overflow for that element
is automatically ignored.
Parameters
----------
arr : array addend.
b : array or scalar addend.
arr_mask : boolean array or None
array indicating which elements to exclude from checking
b_mask : boolean array or boolean or None
array or scalar indicating which element(s) to exclude from checking
Returns
-------
sum : An array for elements x + b for each element x in arr if b is
a scalar or an array for elements x + y for each element pair
(x, y) in (arr, b).
Raises
------
OverflowError if any x + y exceeds the maximum or minimum int64 value.
]
variable[b2] assign[=] call[name[np].broadcast_to, parameter[name[b], name[arr].shape]]
if compare[name[b_mask] is_not constant[None]] begin[:]
variable[b2_mask] assign[=] call[name[np].broadcast_to, parameter[name[b_mask], name[arr].shape]]
if <ast.BoolOp object at 0x7da1b1eb4bb0> begin[:]
variable[not_nan] assign[=] call[name[np].logical_not, parameter[binary_operation[name[arr_mask] <ast.BitOr object at 0x7da2590d6aa0> name[b2_mask]]]]
variable[mask1] assign[=] compare[name[b2] greater[>] constant[0]]
variable[mask2] assign[=] compare[name[b2] less[<] constant[0]]
if <ast.UnaryOp object at 0x7da1b1d6dc00> begin[:]
variable[to_raise] assign[=] call[binary_operation[compare[binary_operation[call[name[np].iinfo, parameter[name[np].int64]].min - name[b2]] greater[>] name[arr]] <ast.BitAnd object at 0x7da2590d6b60> name[not_nan]].any, parameter[]]
if name[to_raise] begin[:]
<ast.Raise object at 0x7da1b1d6fdf0>
return[binary_operation[name[arr] + name[b]]] | keyword[def] identifier[checked_add_with_arr] ( identifier[arr] , identifier[b] , identifier[arr_mask] = keyword[None] , identifier[b_mask] = keyword[None] ):
literal[string]
identifier[b2] = identifier[np] . identifier[broadcast_to] ( identifier[b] , identifier[arr] . identifier[shape] )
keyword[if] identifier[b_mask] keyword[is] keyword[not] keyword[None] :
identifier[b2_mask] = identifier[np] . identifier[broadcast_to] ( identifier[b_mask] , identifier[arr] . identifier[shape] )
keyword[else] :
identifier[b2_mask] = keyword[None]
keyword[if] identifier[arr_mask] keyword[is] keyword[not] keyword[None] keyword[and] identifier[b2_mask] keyword[is] keyword[not] keyword[None] :
identifier[not_nan] = identifier[np] . identifier[logical_not] ( identifier[arr_mask] | identifier[b2_mask] )
keyword[elif] identifier[arr_mask] keyword[is] keyword[not] keyword[None] :
identifier[not_nan] = identifier[np] . identifier[logical_not] ( identifier[arr_mask] )
keyword[elif] identifier[b_mask] keyword[is] keyword[not] keyword[None] :
identifier[not_nan] = identifier[np] . identifier[logical_not] ( identifier[b2_mask] )
keyword[else] :
identifier[not_nan] = identifier[np] . identifier[empty] ( identifier[arr] . identifier[shape] , identifier[dtype] = identifier[bool] )
identifier[not_nan] . identifier[fill] ( keyword[True] )
identifier[mask1] = identifier[b2] > literal[int]
identifier[mask2] = identifier[b2] < literal[int]
keyword[if] keyword[not] identifier[mask1] . identifier[any] ():
identifier[to_raise] =(( identifier[np] . identifier[iinfo] ( identifier[np] . identifier[int64] ). identifier[min] - identifier[b2] > identifier[arr] )& identifier[not_nan] ). identifier[any] ()
keyword[elif] keyword[not] identifier[mask2] . identifier[any] ():
identifier[to_raise] =(( identifier[np] . identifier[iinfo] ( identifier[np] . identifier[int64] ). identifier[max] - identifier[b2] < identifier[arr] )& identifier[not_nan] ). identifier[any] ()
keyword[else] :
identifier[to_raise] =((( identifier[np] . identifier[iinfo] ( identifier[np] . identifier[int64] ). identifier[max] -
identifier[b2] [ identifier[mask1] ]< identifier[arr] [ identifier[mask1] ])& identifier[not_nan] [ identifier[mask1] ]). identifier[any] () keyword[or]
(( identifier[np] . identifier[iinfo] ( identifier[np] . identifier[int64] ). identifier[min] -
identifier[b2] [ identifier[mask2] ]> identifier[arr] [ identifier[mask2] ])& identifier[not_nan] [ identifier[mask2] ]). identifier[any] ())
keyword[if] identifier[to_raise] :
keyword[raise] identifier[OverflowError] ( literal[string] )
keyword[return] identifier[arr] + identifier[b] | def checked_add_with_arr(arr, b, arr_mask=None, b_mask=None):
"""
Perform array addition that checks for underflow and overflow.
Performs the addition of an int64 array and an int64 integer (or array)
but checks that they do not result in overflow first. For elements that
are indicated to be NaN, whether or not there is overflow for that element
is automatically ignored.
Parameters
----------
arr : array addend.
b : array or scalar addend.
arr_mask : boolean array or None
array indicating which elements to exclude from checking
b_mask : boolean array or boolean or None
array or scalar indicating which element(s) to exclude from checking
Returns
-------
sum : An array for elements x + b for each element x in arr if b is
a scalar or an array for elements x + y for each element pair
(x, y) in (arr, b).
Raises
------
OverflowError if any x + y exceeds the maximum or minimum int64 value.
"""
# For performance reasons, we broadcast 'b' to the new array 'b2'
# so that it has the same size as 'arr'.
b2 = np.broadcast_to(b, arr.shape)
if b_mask is not None:
# We do the same broadcasting for b_mask as well.
b2_mask = np.broadcast_to(b_mask, arr.shape) # depends on [control=['if'], data=['b_mask']]
else:
b2_mask = None
# For elements that are NaN, regardless of their value, we should
# ignore whether they overflow or not when doing the checked add.
if arr_mask is not None and b2_mask is not None:
not_nan = np.logical_not(arr_mask | b2_mask) # depends on [control=['if'], data=[]]
elif arr_mask is not None:
not_nan = np.logical_not(arr_mask) # depends on [control=['if'], data=['arr_mask']]
elif b_mask is not None:
not_nan = np.logical_not(b2_mask) # depends on [control=['if'], data=[]]
else:
not_nan = np.empty(arr.shape, dtype=bool)
not_nan.fill(True)
# gh-14324: For each element in 'arr' and its corresponding element
# in 'b2', we check the sign of the element in 'b2'. If it is positive,
# we then check whether its sum with the element in 'arr' exceeds
# np.iinfo(np.int64).max. If so, we have an overflow error. If it
# it is negative, we then check whether its sum with the element in
# 'arr' exceeds np.iinfo(np.int64).min. If so, we have an overflow
# error as well.
mask1 = b2 > 0
mask2 = b2 < 0
if not mask1.any():
to_raise = ((np.iinfo(np.int64).min - b2 > arr) & not_nan).any() # depends on [control=['if'], data=[]]
elif not mask2.any():
to_raise = ((np.iinfo(np.int64).max - b2 < arr) & not_nan).any() # depends on [control=['if'], data=[]]
else:
to_raise = ((np.iinfo(np.int64).max - b2[mask1] < arr[mask1]) & not_nan[mask1]).any() or ((np.iinfo(np.int64).min - b2[mask2] > arr[mask2]) & not_nan[mask2]).any()
if to_raise:
raise OverflowError('Overflow in int64 addition') # depends on [control=['if'], data=[]]
return arr + b |
def create_policy(name, policy_name, policy_type, policy, region=None,
key=None, keyid=None, profile=None):
'''
Create an ELB policy.
.. versionadded:: 2016.3.0
CLI example:
.. code-block:: bash
salt myminion boto_elb.create_policy myelb mypolicy LBCookieStickinessPolicyType '{"CookieExpirationPeriod": 3600}'
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not exists(name, region, key, keyid, profile):
return False
try:
success = conn.create_lb_policy(name, policy_name, policy_type, policy)
if success:
log.info('Created policy %s on ELB %s', policy_name, name)
return True
else:
log.error('Failed to create policy %s on ELB %s', policy_name, name)
return False
except boto.exception.BotoServerError as e:
log.error('Failed to create policy %s on ELB %s: %s',
policy_name, name, e.message,
exc_info_on_loglevel=logging.DEBUG)
return False | def function[create_policy, parameter[name, policy_name, policy_type, policy, region, key, keyid, profile]]:
constant[
Create an ELB policy.
.. versionadded:: 2016.3.0
CLI example:
.. code-block:: bash
salt myminion boto_elb.create_policy myelb mypolicy LBCookieStickinessPolicyType '{"CookieExpirationPeriod": 3600}'
]
variable[conn] assign[=] call[name[_get_conn], parameter[]]
if <ast.UnaryOp object at 0x7da1b2044790> begin[:]
return[constant[False]]
<ast.Try object at 0x7da1b2047130> | keyword[def] identifier[create_policy] ( identifier[name] , identifier[policy_name] , identifier[policy_type] , identifier[policy] , identifier[region] = keyword[None] ,
identifier[key] = keyword[None] , identifier[keyid] = keyword[None] , identifier[profile] = keyword[None] ):
literal[string]
identifier[conn] = identifier[_get_conn] ( identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] )
keyword[if] keyword[not] identifier[exists] ( identifier[name] , identifier[region] , identifier[key] , identifier[keyid] , identifier[profile] ):
keyword[return] keyword[False]
keyword[try] :
identifier[success] = identifier[conn] . identifier[create_lb_policy] ( identifier[name] , identifier[policy_name] , identifier[policy_type] , identifier[policy] )
keyword[if] identifier[success] :
identifier[log] . identifier[info] ( literal[string] , identifier[policy_name] , identifier[name] )
keyword[return] keyword[True]
keyword[else] :
identifier[log] . identifier[error] ( literal[string] , identifier[policy_name] , identifier[name] )
keyword[return] keyword[False]
keyword[except] identifier[boto] . identifier[exception] . identifier[BotoServerError] keyword[as] identifier[e] :
identifier[log] . identifier[error] ( literal[string] ,
identifier[policy_name] , identifier[name] , identifier[e] . identifier[message] ,
identifier[exc_info_on_loglevel] = identifier[logging] . identifier[DEBUG] )
keyword[return] keyword[False] | def create_policy(name, policy_name, policy_type, policy, region=None, key=None, keyid=None, profile=None):
"""
Create an ELB policy.
.. versionadded:: 2016.3.0
CLI example:
.. code-block:: bash
salt myminion boto_elb.create_policy myelb mypolicy LBCookieStickinessPolicyType '{"CookieExpirationPeriod": 3600}'
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not exists(name, region, key, keyid, profile):
return False # depends on [control=['if'], data=[]]
try:
success = conn.create_lb_policy(name, policy_name, policy_type, policy)
if success:
log.info('Created policy %s on ELB %s', policy_name, name)
return True # depends on [control=['if'], data=[]]
else:
log.error('Failed to create policy %s on ELB %s', policy_name, name)
return False # depends on [control=['try'], data=[]]
except boto.exception.BotoServerError as e:
log.error('Failed to create policy %s on ELB %s: %s', policy_name, name, e.message, exc_info_on_loglevel=logging.DEBUG)
return False # depends on [control=['except'], data=['e']] |
def _canonicalize(self, filename):
"""Use .collection as extension unless provided"""
path, ext = os.path.splitext(filename)
if not ext:
ext = ".collection"
return path + ext | def function[_canonicalize, parameter[self, filename]]:
constant[Use .collection as extension unless provided]
<ast.Tuple object at 0x7da207f99a80> assign[=] call[name[os].path.splitext, parameter[name[filename]]]
if <ast.UnaryOp object at 0x7da18bc72260> begin[:]
variable[ext] assign[=] constant[.collection]
return[binary_operation[name[path] + name[ext]]] | keyword[def] identifier[_canonicalize] ( identifier[self] , identifier[filename] ):
literal[string]
identifier[path] , identifier[ext] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[filename] )
keyword[if] keyword[not] identifier[ext] :
identifier[ext] = literal[string]
keyword[return] identifier[path] + identifier[ext] | def _canonicalize(self, filename):
"""Use .collection as extension unless provided"""
(path, ext) = os.path.splitext(filename)
if not ext:
ext = '.collection' # depends on [control=['if'], data=[]]
return path + ext |
def read(self, identifier, path=None):
""" Read a text object given an identifier and a path
:param identifier: Identifier of the text
:param path: Path of the text files
:return: Text
"""
if self.CACHE_FULL_TEI is True:
o = self.cache.get(_cache_key(self.texts_parsed_cache_key, identifier))
if o is not None:
return o
else:
with open(path) as f:
o = Text(urn=identifier, resource=self.xmlparse(f))
self.cache.set(_cache_key(self.texts_parsed_cache_key, identifier), o)
else:
with open(path) as f:
o = Text(urn=identifier, resource=self.xmlparse(f))
return o | def function[read, parameter[self, identifier, path]]:
constant[ Read a text object given an identifier and a path
:param identifier: Identifier of the text
:param path: Path of the text files
:return: Text
]
if compare[name[self].CACHE_FULL_TEI is constant[True]] begin[:]
variable[o] assign[=] call[name[self].cache.get, parameter[call[name[_cache_key], parameter[name[self].texts_parsed_cache_key, name[identifier]]]]]
if compare[name[o] is_not constant[None]] begin[:]
return[name[o]]
return[name[o]] | keyword[def] identifier[read] ( identifier[self] , identifier[identifier] , identifier[path] = keyword[None] ):
literal[string]
keyword[if] identifier[self] . identifier[CACHE_FULL_TEI] keyword[is] keyword[True] :
identifier[o] = identifier[self] . identifier[cache] . identifier[get] ( identifier[_cache_key] ( identifier[self] . identifier[texts_parsed_cache_key] , identifier[identifier] ))
keyword[if] identifier[o] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[o]
keyword[else] :
keyword[with] identifier[open] ( identifier[path] ) keyword[as] identifier[f] :
identifier[o] = identifier[Text] ( identifier[urn] = identifier[identifier] , identifier[resource] = identifier[self] . identifier[xmlparse] ( identifier[f] ))
identifier[self] . identifier[cache] . identifier[set] ( identifier[_cache_key] ( identifier[self] . identifier[texts_parsed_cache_key] , identifier[identifier] ), identifier[o] )
keyword[else] :
keyword[with] identifier[open] ( identifier[path] ) keyword[as] identifier[f] :
identifier[o] = identifier[Text] ( identifier[urn] = identifier[identifier] , identifier[resource] = identifier[self] . identifier[xmlparse] ( identifier[f] ))
keyword[return] identifier[o] | def read(self, identifier, path=None):
""" Read a text object given an identifier and a path
:param identifier: Identifier of the text
:param path: Path of the text files
:return: Text
"""
if self.CACHE_FULL_TEI is True:
o = self.cache.get(_cache_key(self.texts_parsed_cache_key, identifier))
if o is not None:
return o # depends on [control=['if'], data=['o']]
else:
with open(path) as f:
o = Text(urn=identifier, resource=self.xmlparse(f))
self.cache.set(_cache_key(self.texts_parsed_cache_key, identifier), o) # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]]
else:
with open(path) as f:
o = Text(urn=identifier, resource=self.xmlparse(f)) # depends on [control=['with'], data=['f']]
return o |
def _brentq_cdf(self, value):
"""Helper function to compute percent_point.
As scipy.stats.gaussian_kde doesn't provide this functionality out of the box we need
to make a numerical approach:
- First we scalarize and bound cumulative_distribution.
- Then we define a function `f(x) = cdf(x) - value`, where value is the given argument.
- As value will be called from ppf we can assume value = cdf(z) for some z that is the
value we are searching for. Therefore the zeros of the function will be x such that:
cdf(x) - cdf(z) = 0 => (becasue cdf is monotonous and continous) x = z
Args:
value(float): cdf value, that is, in [0,1]
Returns:
callable: function whose zero is the ppf of value.
"""
# The decorator expects an instance method, but usually are decorated before being bounded
bound_cdf = partial(scalarize(GaussianKDE.cumulative_distribution), self)
def f(x):
return bound_cdf(x) - value
return f | def function[_brentq_cdf, parameter[self, value]]:
constant[Helper function to compute percent_point.
As scipy.stats.gaussian_kde doesn't provide this functionality out of the box we need
to make a numerical approach:
- First we scalarize and bound cumulative_distribution.
- Then we define a function `f(x) = cdf(x) - value`, where value is the given argument.
- As value will be called from ppf we can assume value = cdf(z) for some z that is the
value we are searching for. Therefore the zeros of the function will be x such that:
cdf(x) - cdf(z) = 0 => (becasue cdf is monotonous and continous) x = z
Args:
value(float): cdf value, that is, in [0,1]
Returns:
callable: function whose zero is the ppf of value.
]
variable[bound_cdf] assign[=] call[name[partial], parameter[call[name[scalarize], parameter[name[GaussianKDE].cumulative_distribution]], name[self]]]
def function[f, parameter[x]]:
return[binary_operation[call[name[bound_cdf], parameter[name[x]]] - name[value]]]
return[name[f]] | keyword[def] identifier[_brentq_cdf] ( identifier[self] , identifier[value] ):
literal[string]
identifier[bound_cdf] = identifier[partial] ( identifier[scalarize] ( identifier[GaussianKDE] . identifier[cumulative_distribution] ), identifier[self] )
keyword[def] identifier[f] ( identifier[x] ):
keyword[return] identifier[bound_cdf] ( identifier[x] )- identifier[value]
keyword[return] identifier[f] | def _brentq_cdf(self, value):
"""Helper function to compute percent_point.
As scipy.stats.gaussian_kde doesn't provide this functionality out of the box we need
to make a numerical approach:
- First we scalarize and bound cumulative_distribution.
- Then we define a function `f(x) = cdf(x) - value`, where value is the given argument.
- As value will be called from ppf we can assume value = cdf(z) for some z that is the
value we are searching for. Therefore the zeros of the function will be x such that:
cdf(x) - cdf(z) = 0 => (becasue cdf is monotonous and continous) x = z
Args:
value(float): cdf value, that is, in [0,1]
Returns:
callable: function whose zero is the ppf of value.
"""
# The decorator expects an instance method, but usually are decorated before being bounded
bound_cdf = partial(scalarize(GaussianKDE.cumulative_distribution), self)
def f(x):
return bound_cdf(x) - value
return f |
def minimize(bed_file):
"""
strip a BED file down to its three necessary columns: chrom start end
"""
if not bed_file:
return bed_file
else:
sorted_bed = bt.BedTool(bed_file).cut(range(3)).sort()
if not sorted_bed.fn.endswith(".bed"):
return sorted_bed.moveto(sorted_bed.fn + ".bed")
else:
return sorted_bed | def function[minimize, parameter[bed_file]]:
constant[
strip a BED file down to its three necessary columns: chrom start end
]
if <ast.UnaryOp object at 0x7da1b1844280> begin[:]
return[name[bed_file]] | keyword[def] identifier[minimize] ( identifier[bed_file] ):
literal[string]
keyword[if] keyword[not] identifier[bed_file] :
keyword[return] identifier[bed_file]
keyword[else] :
identifier[sorted_bed] = identifier[bt] . identifier[BedTool] ( identifier[bed_file] ). identifier[cut] ( identifier[range] ( literal[int] )). identifier[sort] ()
keyword[if] keyword[not] identifier[sorted_bed] . identifier[fn] . identifier[endswith] ( literal[string] ):
keyword[return] identifier[sorted_bed] . identifier[moveto] ( identifier[sorted_bed] . identifier[fn] + literal[string] )
keyword[else] :
keyword[return] identifier[sorted_bed] | def minimize(bed_file):
"""
strip a BED file down to its three necessary columns: chrom start end
"""
if not bed_file:
return bed_file # depends on [control=['if'], data=[]]
else:
sorted_bed = bt.BedTool(bed_file).cut(range(3)).sort()
if not sorted_bed.fn.endswith('.bed'):
return sorted_bed.moveto(sorted_bed.fn + '.bed') # depends on [control=['if'], data=[]]
else:
return sorted_bed |
def do_mv(self, subcmd, opts, message, folder):
"""${cmd_name}: move the specified message to the specified folder
${cmd_usage}
"""
client = MdClient(self.maildir, filesystem=self.filesystem)
client.move(message, folder) | def function[do_mv, parameter[self, subcmd, opts, message, folder]]:
constant[${cmd_name}: move the specified message to the specified folder
${cmd_usage}
]
variable[client] assign[=] call[name[MdClient], parameter[name[self].maildir]]
call[name[client].move, parameter[name[message], name[folder]]] | keyword[def] identifier[do_mv] ( identifier[self] , identifier[subcmd] , identifier[opts] , identifier[message] , identifier[folder] ):
literal[string]
identifier[client] = identifier[MdClient] ( identifier[self] . identifier[maildir] , identifier[filesystem] = identifier[self] . identifier[filesystem] )
identifier[client] . identifier[move] ( identifier[message] , identifier[folder] ) | def do_mv(self, subcmd, opts, message, folder):
"""${cmd_name}: move the specified message to the specified folder
${cmd_usage}
"""
client = MdClient(self.maildir, filesystem=self.filesystem)
client.move(message, folder) |
def write_run_output(run, **kwargs):
"""Writes PolyChord output files corresponding to the input nested sampling
run. The file root is
.. code-block:: python
root = os.path.join(run['output']['base_dir'],
run['output']['file_root'])
Output files which can be made with this function (see the PolyChord
documentation for more information about what each contains):
* [root].stats
* [root].txt
* [root]_equal_weights.txt
* [root]_dead-birth.txt
* [root]_dead.txt
Files produced by PolyChord which are not made by this function:
* [root].resume: for resuming runs part way through (not relevant for a
completed run).
* [root]_phys_live.txt and [root]phys_live-birth.txt: for checking runtime
progress (not relevant for a completed run).
* [root].paramnames: for use with getdist (not needed when calling getdist
from within python).
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
write_dead: bool, optional
Whether or not to write [root]_dead.txt and [root]_dead-birth.txt.
write_stats: bool, optional
Whether or not to write [root].stats.
posteriors: bool, optional
Whether or not to write [root].txt.
equals: bool, optional
Whether or not to write [root]_equal_weights.txt.
stats_means_errs: bool, optional
Whether or not to calculate mean values of :math:`\log \mathcal{Z}` and
each parameter, and their uncertainties.
fmt: str, optional
Formatting for numbers written by np.savetxt. Default value is set to
make output files look like the ones produced by PolyChord.
n_simulate: int, optional
Number of bootstrap replications to use when estimating uncertainty on
evidence and parameter means.
"""
write_dead = kwargs.pop('write_dead', True)
write_stats = kwargs.pop('write_stats', True)
posteriors = kwargs.pop('posteriors', False)
equals = kwargs.pop('equals', False)
stats_means_errs = kwargs.pop('stats_means_errs', True)
fmt = kwargs.pop('fmt', '% .14E')
n_simulate = kwargs.pop('n_simulate', 100)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
mandatory_keys = ['file_root', 'base_dir']
for key in mandatory_keys:
assert key in run['output'], key + ' not in run["output"]'
root = os.path.join(run['output']['base_dir'], run['output']['file_root'])
if write_dead:
samples = run_dead_birth_array(run)
np.savetxt(root + '_dead-birth.txt', samples, fmt=fmt)
np.savetxt(root + '_dead.txt', samples[:, :-1], fmt=fmt)
if equals or posteriors:
w_rel = nestcheck.ns_run_utils.get_w_rel(run)
post_arr = np.zeros((run['theta'].shape[0], run['theta'].shape[1] + 2))
post_arr[:, 0] = w_rel
post_arr[:, 1] = -2 * run['logl']
post_arr[:, 2:] = run['theta']
if posteriors:
np.savetxt(root + '.txt', post_arr, fmt=fmt)
run['output']['nposterior'] = post_arr.shape[0]
else:
run['output']['nposterior'] = 0
if equals:
inds = np.where(w_rel > np.random.random(w_rel.shape[0]))[0]
np.savetxt(root + '_equal_weights.txt', post_arr[inds, 1:],
fmt=fmt)
run['output']['nequals'] = inds.shape[0]
else:
run['output']['nequals'] = 0
if write_stats:
run['output']['ndead'] = run['logl'].shape[0]
if stats_means_errs:
# Get logZ and param estimates and errors
estimators = [e.logz]
for i in range(run['theta'].shape[1]):
estimators.append(functools.partial(e.param_mean, param_ind=i))
values = nestcheck.ns_run_utils.run_estimators(run, estimators)
stds = nestcheck.error_analysis.run_std_bootstrap(
run, estimators, n_simulate=n_simulate)
run['output']['logZ'] = values[0]
run['output']['logZerr'] = stds[0]
run['output']['param_means'] = list(values[1:])
run['output']['param_mean_errs'] = list(stds[1:])
write_stats_file(run['output']) | def function[write_run_output, parameter[run]]:
constant[Writes PolyChord output files corresponding to the input nested sampling
run. The file root is
.. code-block:: python
root = os.path.join(run['output']['base_dir'],
run['output']['file_root'])
Output files which can be made with this function (see the PolyChord
documentation for more information about what each contains):
* [root].stats
* [root].txt
* [root]_equal_weights.txt
* [root]_dead-birth.txt
* [root]_dead.txt
Files produced by PolyChord which are not made by this function:
* [root].resume: for resuming runs part way through (not relevant for a
completed run).
* [root]_phys_live.txt and [root]phys_live-birth.txt: for checking runtime
progress (not relevant for a completed run).
* [root].paramnames: for use with getdist (not needed when calling getdist
from within python).
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
write_dead: bool, optional
Whether or not to write [root]_dead.txt and [root]_dead-birth.txt.
write_stats: bool, optional
Whether or not to write [root].stats.
posteriors: bool, optional
Whether or not to write [root].txt.
equals: bool, optional
Whether or not to write [root]_equal_weights.txt.
stats_means_errs: bool, optional
Whether or not to calculate mean values of :math:`\log \mathcal{Z}` and
each parameter, and their uncertainties.
fmt: str, optional
Formatting for numbers written by np.savetxt. Default value is set to
make output files look like the ones produced by PolyChord.
n_simulate: int, optional
Number of bootstrap replications to use when estimating uncertainty on
evidence and parameter means.
]
variable[write_dead] assign[=] call[name[kwargs].pop, parameter[constant[write_dead], constant[True]]]
variable[write_stats] assign[=] call[name[kwargs].pop, parameter[constant[write_stats], constant[True]]]
variable[posteriors] assign[=] call[name[kwargs].pop, parameter[constant[posteriors], constant[False]]]
variable[equals] assign[=] call[name[kwargs].pop, parameter[constant[equals], constant[False]]]
variable[stats_means_errs] assign[=] call[name[kwargs].pop, parameter[constant[stats_means_errs], constant[True]]]
variable[fmt] assign[=] call[name[kwargs].pop, parameter[constant[fmt], constant[% .14E]]]
variable[n_simulate] assign[=] call[name[kwargs].pop, parameter[constant[n_simulate], constant[100]]]
if name[kwargs] begin[:]
<ast.Raise object at 0x7da18dc9ba60>
variable[mandatory_keys] assign[=] list[[<ast.Constant object at 0x7da18dc9a0b0>, <ast.Constant object at 0x7da18dc9b460>]]
for taget[name[key]] in starred[name[mandatory_keys]] begin[:]
assert[compare[name[key] in call[name[run]][constant[output]]]]
variable[root] assign[=] call[name[os].path.join, parameter[call[call[name[run]][constant[output]]][constant[base_dir]], call[call[name[run]][constant[output]]][constant[file_root]]]]
if name[write_dead] begin[:]
variable[samples] assign[=] call[name[run_dead_birth_array], parameter[name[run]]]
call[name[np].savetxt, parameter[binary_operation[name[root] + constant[_dead-birth.txt]], name[samples]]]
call[name[np].savetxt, parameter[binary_operation[name[root] + constant[_dead.txt]], call[name[samples]][tuple[[<ast.Slice object at 0x7da18dc9acb0>, <ast.Slice object at 0x7da18dc9a920>]]]]]
if <ast.BoolOp object at 0x7da18dc9a770> begin[:]
variable[w_rel] assign[=] call[name[nestcheck].ns_run_utils.get_w_rel, parameter[name[run]]]
variable[post_arr] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Subscript object at 0x7da18dc9bbe0>, <ast.BinOp object at 0x7da18dc9b4f0>]]]]
call[name[post_arr]][tuple[[<ast.Slice object at 0x7da18dc9a950>, <ast.Constant object at 0x7da18dc99960>]]] assign[=] name[w_rel]
call[name[post_arr]][tuple[[<ast.Slice object at 0x7da18dc9b130>, <ast.Constant object at 0x7da18dc991e0>]]] assign[=] binary_operation[<ast.UnaryOp object at 0x7da18dc9a8f0> * call[name[run]][constant[logl]]]
call[name[post_arr]][tuple[[<ast.Slice object at 0x7da18dc9a890>, <ast.Slice object at 0x7da18dc989a0>]]] assign[=] call[name[run]][constant[theta]]
if name[posteriors] begin[:]
call[name[np].savetxt, parameter[binary_operation[name[root] + constant[.txt]], name[post_arr]]]
call[call[name[run]][constant[output]]][constant[nposterior]] assign[=] call[name[post_arr].shape][constant[0]]
if name[equals] begin[:]
variable[inds] assign[=] call[call[name[np].where, parameter[compare[name[w_rel] greater[>] call[name[np].random.random, parameter[call[name[w_rel].shape][constant[0]]]]]]]][constant[0]]
call[name[np].savetxt, parameter[binary_operation[name[root] + constant[_equal_weights.txt]], call[name[post_arr]][tuple[[<ast.Name object at 0x7da18dc9a440>, <ast.Slice object at 0x7da18dc9b970>]]]]]
call[call[name[run]][constant[output]]][constant[nequals]] assign[=] call[name[inds].shape][constant[0]]
if name[write_stats] begin[:]
call[call[name[run]][constant[output]]][constant[ndead]] assign[=] call[call[name[run]][constant[logl]].shape][constant[0]]
if name[stats_means_errs] begin[:]
variable[estimators] assign[=] list[[<ast.Attribute object at 0x7da18f09dd50>]]
for taget[name[i]] in starred[call[name[range], parameter[call[call[name[run]][constant[theta]].shape][constant[1]]]]] begin[:]
call[name[estimators].append, parameter[call[name[functools].partial, parameter[name[e].param_mean]]]]
variable[values] assign[=] call[name[nestcheck].ns_run_utils.run_estimators, parameter[name[run], name[estimators]]]
variable[stds] assign[=] call[name[nestcheck].error_analysis.run_std_bootstrap, parameter[name[run], name[estimators]]]
call[call[name[run]][constant[output]]][constant[logZ]] assign[=] call[name[values]][constant[0]]
call[call[name[run]][constant[output]]][constant[logZerr]] assign[=] call[name[stds]][constant[0]]
call[call[name[run]][constant[output]]][constant[param_means]] assign[=] call[name[list], parameter[call[name[values]][<ast.Slice object at 0x7da18f09cf40>]]]
call[call[name[run]][constant[output]]][constant[param_mean_errs]] assign[=] call[name[list], parameter[call[name[stds]][<ast.Slice object at 0x7da18f09d6f0>]]]
call[name[write_stats_file], parameter[call[name[run]][constant[output]]]] | keyword[def] identifier[write_run_output] ( identifier[run] ,** identifier[kwargs] ):
literal[string]
identifier[write_dead] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[True] )
identifier[write_stats] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[True] )
identifier[posteriors] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] )
identifier[equals] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] )
identifier[stats_means_errs] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[True] )
identifier[fmt] = identifier[kwargs] . identifier[pop] ( literal[string] , literal[string] )
identifier[n_simulate] = identifier[kwargs] . identifier[pop] ( literal[string] , literal[int] )
keyword[if] identifier[kwargs] :
keyword[raise] identifier[TypeError] ( literal[string] . identifier[format] ( identifier[kwargs] ))
identifier[mandatory_keys] =[ literal[string] , literal[string] ]
keyword[for] identifier[key] keyword[in] identifier[mandatory_keys] :
keyword[assert] identifier[key] keyword[in] identifier[run] [ literal[string] ], identifier[key] + literal[string]
identifier[root] = identifier[os] . identifier[path] . identifier[join] ( identifier[run] [ literal[string] ][ literal[string] ], identifier[run] [ literal[string] ][ literal[string] ])
keyword[if] identifier[write_dead] :
identifier[samples] = identifier[run_dead_birth_array] ( identifier[run] )
identifier[np] . identifier[savetxt] ( identifier[root] + literal[string] , identifier[samples] , identifier[fmt] = identifier[fmt] )
identifier[np] . identifier[savetxt] ( identifier[root] + literal[string] , identifier[samples] [:,:- literal[int] ], identifier[fmt] = identifier[fmt] )
keyword[if] identifier[equals] keyword[or] identifier[posteriors] :
identifier[w_rel] = identifier[nestcheck] . identifier[ns_run_utils] . identifier[get_w_rel] ( identifier[run] )
identifier[post_arr] = identifier[np] . identifier[zeros] (( identifier[run] [ literal[string] ]. identifier[shape] [ literal[int] ], identifier[run] [ literal[string] ]. identifier[shape] [ literal[int] ]+ literal[int] ))
identifier[post_arr] [:, literal[int] ]= identifier[w_rel]
identifier[post_arr] [:, literal[int] ]=- literal[int] * identifier[run] [ literal[string] ]
identifier[post_arr] [:, literal[int] :]= identifier[run] [ literal[string] ]
keyword[if] identifier[posteriors] :
identifier[np] . identifier[savetxt] ( identifier[root] + literal[string] , identifier[post_arr] , identifier[fmt] = identifier[fmt] )
identifier[run] [ literal[string] ][ literal[string] ]= identifier[post_arr] . identifier[shape] [ literal[int] ]
keyword[else] :
identifier[run] [ literal[string] ][ literal[string] ]= literal[int]
keyword[if] identifier[equals] :
identifier[inds] = identifier[np] . identifier[where] ( identifier[w_rel] > identifier[np] . identifier[random] . identifier[random] ( identifier[w_rel] . identifier[shape] [ literal[int] ]))[ literal[int] ]
identifier[np] . identifier[savetxt] ( identifier[root] + literal[string] , identifier[post_arr] [ identifier[inds] , literal[int] :],
identifier[fmt] = identifier[fmt] )
identifier[run] [ literal[string] ][ literal[string] ]= identifier[inds] . identifier[shape] [ literal[int] ]
keyword[else] :
identifier[run] [ literal[string] ][ literal[string] ]= literal[int]
keyword[if] identifier[write_stats] :
identifier[run] [ literal[string] ][ literal[string] ]= identifier[run] [ literal[string] ]. identifier[shape] [ literal[int] ]
keyword[if] identifier[stats_means_errs] :
identifier[estimators] =[ identifier[e] . identifier[logz] ]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[run] [ literal[string] ]. identifier[shape] [ literal[int] ]):
identifier[estimators] . identifier[append] ( identifier[functools] . identifier[partial] ( identifier[e] . identifier[param_mean] , identifier[param_ind] = identifier[i] ))
identifier[values] = identifier[nestcheck] . identifier[ns_run_utils] . identifier[run_estimators] ( identifier[run] , identifier[estimators] )
identifier[stds] = identifier[nestcheck] . identifier[error_analysis] . identifier[run_std_bootstrap] (
identifier[run] , identifier[estimators] , identifier[n_simulate] = identifier[n_simulate] )
identifier[run] [ literal[string] ][ literal[string] ]= identifier[values] [ literal[int] ]
identifier[run] [ literal[string] ][ literal[string] ]= identifier[stds] [ literal[int] ]
identifier[run] [ literal[string] ][ literal[string] ]= identifier[list] ( identifier[values] [ literal[int] :])
identifier[run] [ literal[string] ][ literal[string] ]= identifier[list] ( identifier[stds] [ literal[int] :])
identifier[write_stats_file] ( identifier[run] [ literal[string] ]) | def write_run_output(run, **kwargs):
"""Writes PolyChord output files corresponding to the input nested sampling
run. The file root is
.. code-block:: python
root = os.path.join(run['output']['base_dir'],
run['output']['file_root'])
Output files which can be made with this function (see the PolyChord
documentation for more information about what each contains):
* [root].stats
* [root].txt
* [root]_equal_weights.txt
* [root]_dead-birth.txt
* [root]_dead.txt
Files produced by PolyChord which are not made by this function:
* [root].resume: for resuming runs part way through (not relevant for a
completed run).
* [root]_phys_live.txt and [root]phys_live-birth.txt: for checking runtime
progress (not relevant for a completed run).
* [root].paramnames: for use with getdist (not needed when calling getdist
from within python).
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
write_dead: bool, optional
Whether or not to write [root]_dead.txt and [root]_dead-birth.txt.
write_stats: bool, optional
Whether or not to write [root].stats.
posteriors: bool, optional
Whether or not to write [root].txt.
equals: bool, optional
Whether or not to write [root]_equal_weights.txt.
stats_means_errs: bool, optional
Whether or not to calculate mean values of :math:`\\log \\mathcal{Z}` and
each parameter, and their uncertainties.
fmt: str, optional
Formatting for numbers written by np.savetxt. Default value is set to
make output files look like the ones produced by PolyChord.
n_simulate: int, optional
Number of bootstrap replications to use when estimating uncertainty on
evidence and parameter means.
"""
write_dead = kwargs.pop('write_dead', True)
write_stats = kwargs.pop('write_stats', True)
posteriors = kwargs.pop('posteriors', False)
equals = kwargs.pop('equals', False)
stats_means_errs = kwargs.pop('stats_means_errs', True)
fmt = kwargs.pop('fmt', '% .14E')
n_simulate = kwargs.pop('n_simulate', 100)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs)) # depends on [control=['if'], data=[]]
mandatory_keys = ['file_root', 'base_dir']
for key in mandatory_keys:
assert key in run['output'], key + ' not in run["output"]' # depends on [control=['for'], data=['key']]
root = os.path.join(run['output']['base_dir'], run['output']['file_root'])
if write_dead:
samples = run_dead_birth_array(run)
np.savetxt(root + '_dead-birth.txt', samples, fmt=fmt)
np.savetxt(root + '_dead.txt', samples[:, :-1], fmt=fmt) # depends on [control=['if'], data=[]]
if equals or posteriors:
w_rel = nestcheck.ns_run_utils.get_w_rel(run)
post_arr = np.zeros((run['theta'].shape[0], run['theta'].shape[1] + 2))
post_arr[:, 0] = w_rel
post_arr[:, 1] = -2 * run['logl']
post_arr[:, 2:] = run['theta'] # depends on [control=['if'], data=[]]
if posteriors:
np.savetxt(root + '.txt', post_arr, fmt=fmt)
run['output']['nposterior'] = post_arr.shape[0] # depends on [control=['if'], data=[]]
else:
run['output']['nposterior'] = 0
if equals:
inds = np.where(w_rel > np.random.random(w_rel.shape[0]))[0]
np.savetxt(root + '_equal_weights.txt', post_arr[inds, 1:], fmt=fmt)
run['output']['nequals'] = inds.shape[0] # depends on [control=['if'], data=[]]
else:
run['output']['nequals'] = 0
if write_stats:
run['output']['ndead'] = run['logl'].shape[0]
if stats_means_errs:
# Get logZ and param estimates and errors
estimators = [e.logz]
for i in range(run['theta'].shape[1]):
estimators.append(functools.partial(e.param_mean, param_ind=i)) # depends on [control=['for'], data=['i']]
values = nestcheck.ns_run_utils.run_estimators(run, estimators)
stds = nestcheck.error_analysis.run_std_bootstrap(run, estimators, n_simulate=n_simulate)
run['output']['logZ'] = values[0]
run['output']['logZerr'] = stds[0]
run['output']['param_means'] = list(values[1:])
run['output']['param_mean_errs'] = list(stds[1:]) # depends on [control=['if'], data=[]]
write_stats_file(run['output']) # depends on [control=['if'], data=[]] |
def attention_bias_same_segment(query_segment_id, memory_segment_id):
"""Create an bias tensor to be added to attention logits.
Positions with the same segment_ids can see each other.
Args:
query_segment_id: a float `Tensor` with shape [batch, query_length].
memory_segment_id: a float `Tensor` with shape [batch, memory_length].
Returns:
a `Tensor` with shape [batch, 1, query_length, memory_length].
"""
ret = (tf.to_float(
tf.not_equal(
tf.expand_dims(query_segment_id, 2),
tf.expand_dims(memory_segment_id, 1))) *
large_compatible_negative(memory_segment_id.dtype))
return tf.expand_dims(ret, axis=1) | def function[attention_bias_same_segment, parameter[query_segment_id, memory_segment_id]]:
constant[Create an bias tensor to be added to attention logits.
Positions with the same segment_ids can see each other.
Args:
query_segment_id: a float `Tensor` with shape [batch, query_length].
memory_segment_id: a float `Tensor` with shape [batch, memory_length].
Returns:
a `Tensor` with shape [batch, 1, query_length, memory_length].
]
variable[ret] assign[=] binary_operation[call[name[tf].to_float, parameter[call[name[tf].not_equal, parameter[call[name[tf].expand_dims, parameter[name[query_segment_id], constant[2]]], call[name[tf].expand_dims, parameter[name[memory_segment_id], constant[1]]]]]]] * call[name[large_compatible_negative], parameter[name[memory_segment_id].dtype]]]
return[call[name[tf].expand_dims, parameter[name[ret]]]] | keyword[def] identifier[attention_bias_same_segment] ( identifier[query_segment_id] , identifier[memory_segment_id] ):
literal[string]
identifier[ret] =( identifier[tf] . identifier[to_float] (
identifier[tf] . identifier[not_equal] (
identifier[tf] . identifier[expand_dims] ( identifier[query_segment_id] , literal[int] ),
identifier[tf] . identifier[expand_dims] ( identifier[memory_segment_id] , literal[int] )))*
identifier[large_compatible_negative] ( identifier[memory_segment_id] . identifier[dtype] ))
keyword[return] identifier[tf] . identifier[expand_dims] ( identifier[ret] , identifier[axis] = literal[int] ) | def attention_bias_same_segment(query_segment_id, memory_segment_id):
"""Create an bias tensor to be added to attention logits.
Positions with the same segment_ids can see each other.
Args:
query_segment_id: a float `Tensor` with shape [batch, query_length].
memory_segment_id: a float `Tensor` with shape [batch, memory_length].
Returns:
a `Tensor` with shape [batch, 1, query_length, memory_length].
"""
ret = tf.to_float(tf.not_equal(tf.expand_dims(query_segment_id, 2), tf.expand_dims(memory_segment_id, 1))) * large_compatible_negative(memory_segment_id.dtype)
return tf.expand_dims(ret, axis=1) |
def cume_dist(expr, sort=None, ascending=True):
"""
Calculate cumulative ratio of a sequence expression.
:param expr: expression for calculation
:param sort: name of the sort column
:param ascending: whether to sort in ascending order
:return: calculated column
"""
return _rank_op(expr, CumeDist, types.float64, sort=sort, ascending=ascending) | def function[cume_dist, parameter[expr, sort, ascending]]:
constant[
Calculate cumulative ratio of a sequence expression.
:param expr: expression for calculation
:param sort: name of the sort column
:param ascending: whether to sort in ascending order
:return: calculated column
]
return[call[name[_rank_op], parameter[name[expr], name[CumeDist], name[types].float64]]] | keyword[def] identifier[cume_dist] ( identifier[expr] , identifier[sort] = keyword[None] , identifier[ascending] = keyword[True] ):
literal[string]
keyword[return] identifier[_rank_op] ( identifier[expr] , identifier[CumeDist] , identifier[types] . identifier[float64] , identifier[sort] = identifier[sort] , identifier[ascending] = identifier[ascending] ) | def cume_dist(expr, sort=None, ascending=True):
"""
Calculate cumulative ratio of a sequence expression.
:param expr: expression for calculation
:param sort: name of the sort column
:param ascending: whether to sort in ascending order
:return: calculated column
"""
return _rank_op(expr, CumeDist, types.float64, sort=sort, ascending=ascending) |
def get_archives(self, title_id, language_code):
"""Get the archive list from a given `title_id` and `language_code`.
:param int title_id: title id.
:param int language_code: language code.
:return: the archives.
:rtype: list of :class:`LegendasTVArchive`
"""
logger.info('Getting archives for title %d and language %d', title_id, language_code)
archives = []
page = 1
while True:
# get the archive page
url = self.server_url + 'util/carrega_legendas_busca_filme/{title}/{language}/-/{page}'.format(
title=title_id, language=language_code, page=page)
r = self.session.get(url)
r.raise_for_status()
# parse the results
soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser'])
for archive_soup in soup.select('div.list_element > article > div'):
# create archive
archive = LegendasTVArchive(archive_soup.a['href'].split('/')[2], archive_soup.a.text,
'pack' in archive_soup['class'], 'destaque' in archive_soup['class'],
self.server_url + archive_soup.a['href'][1:])
# extract text containing downloads, rating and timestamp
data_text = archive_soup.find('p', class_='data').text
# match downloads
archive.downloads = int(downloads_re.search(data_text).group('downloads'))
# match rating
match = rating_re.search(data_text)
if match:
archive.rating = int(match.group('rating'))
# match timestamp and validate it
time_data = {k: int(v) for k, v in timestamp_re.search(data_text).groupdict().items()}
archive.timestamp = pytz.timezone('America/Sao_Paulo').localize(datetime(**time_data))
if archive.timestamp > datetime.utcnow().replace(tzinfo=pytz.utc):
raise ProviderError('Archive timestamp is in the future')
# add archive
archives.append(archive)
# stop on last page
if soup.find('a', attrs={'class': 'load_more'}, string='carregar mais') is None:
break
# increment page count
page += 1
logger.debug('Found %d archives', len(archives))
return archives | def function[get_archives, parameter[self, title_id, language_code]]:
constant[Get the archive list from a given `title_id` and `language_code`.
:param int title_id: title id.
:param int language_code: language code.
:return: the archives.
:rtype: list of :class:`LegendasTVArchive`
]
call[name[logger].info, parameter[constant[Getting archives for title %d and language %d], name[title_id], name[language_code]]]
variable[archives] assign[=] list[[]]
variable[page] assign[=] constant[1]
while constant[True] begin[:]
variable[url] assign[=] binary_operation[name[self].server_url + call[constant[util/carrega_legendas_busca_filme/{title}/{language}/-/{page}].format, parameter[]]]
variable[r] assign[=] call[name[self].session.get, parameter[name[url]]]
call[name[r].raise_for_status, parameter[]]
variable[soup] assign[=] call[name[ParserBeautifulSoup], parameter[name[r].content, list[[<ast.Constant object at 0x7da1b1ecebc0>, <ast.Constant object at 0x7da1b1ecc850>]]]]
for taget[name[archive_soup]] in starred[call[name[soup].select, parameter[constant[div.list_element > article > div]]]] begin[:]
variable[archive] assign[=] call[name[LegendasTVArchive], parameter[call[call[call[name[archive_soup].a][constant[href]].split, parameter[constant[/]]]][constant[2]], name[archive_soup].a.text, compare[constant[pack] in call[name[archive_soup]][constant[class]]], compare[constant[destaque] in call[name[archive_soup]][constant[class]]], binary_operation[name[self].server_url + call[call[name[archive_soup].a][constant[href]]][<ast.Slice object at 0x7da1b1ece410>]]]]
variable[data_text] assign[=] call[name[archive_soup].find, parameter[constant[p]]].text
name[archive].downloads assign[=] call[name[int], parameter[call[call[name[downloads_re].search, parameter[name[data_text]]].group, parameter[constant[downloads]]]]]
variable[match] assign[=] call[name[rating_re].search, parameter[name[data_text]]]
if name[match] begin[:]
name[archive].rating assign[=] call[name[int], parameter[call[name[match].group, parameter[constant[rating]]]]]
variable[time_data] assign[=] <ast.DictComp object at 0x7da1b1ecd8a0>
name[archive].timestamp assign[=] call[call[name[pytz].timezone, parameter[constant[America/Sao_Paulo]]].localize, parameter[call[name[datetime], parameter[]]]]
if compare[name[archive].timestamp greater[>] call[call[name[datetime].utcnow, parameter[]].replace, parameter[]]] begin[:]
<ast.Raise object at 0x7da1b1eced10>
call[name[archives].append, parameter[name[archive]]]
if compare[call[name[soup].find, parameter[constant[a]]] is constant[None]] begin[:]
break
<ast.AugAssign object at 0x7da1b1eccbe0>
call[name[logger].debug, parameter[constant[Found %d archives], call[name[len], parameter[name[archives]]]]]
return[name[archives]] | keyword[def] identifier[get_archives] ( identifier[self] , identifier[title_id] , identifier[language_code] ):
literal[string]
identifier[logger] . identifier[info] ( literal[string] , identifier[title_id] , identifier[language_code] )
identifier[archives] =[]
identifier[page] = literal[int]
keyword[while] keyword[True] :
identifier[url] = identifier[self] . identifier[server_url] + literal[string] . identifier[format] (
identifier[title] = identifier[title_id] , identifier[language] = identifier[language_code] , identifier[page] = identifier[page] )
identifier[r] = identifier[self] . identifier[session] . identifier[get] ( identifier[url] )
identifier[r] . identifier[raise_for_status] ()
identifier[soup] = identifier[ParserBeautifulSoup] ( identifier[r] . identifier[content] ,[ literal[string] , literal[string] ])
keyword[for] identifier[archive_soup] keyword[in] identifier[soup] . identifier[select] ( literal[string] ):
identifier[archive] = identifier[LegendasTVArchive] ( identifier[archive_soup] . identifier[a] [ literal[string] ]. identifier[split] ( literal[string] )[ literal[int] ], identifier[archive_soup] . identifier[a] . identifier[text] ,
literal[string] keyword[in] identifier[archive_soup] [ literal[string] ], literal[string] keyword[in] identifier[archive_soup] [ literal[string] ],
identifier[self] . identifier[server_url] + identifier[archive_soup] . identifier[a] [ literal[string] ][ literal[int] :])
identifier[data_text] = identifier[archive_soup] . identifier[find] ( literal[string] , identifier[class_] = literal[string] ). identifier[text]
identifier[archive] . identifier[downloads] = identifier[int] ( identifier[downloads_re] . identifier[search] ( identifier[data_text] ). identifier[group] ( literal[string] ))
identifier[match] = identifier[rating_re] . identifier[search] ( identifier[data_text] )
keyword[if] identifier[match] :
identifier[archive] . identifier[rating] = identifier[int] ( identifier[match] . identifier[group] ( literal[string] ))
identifier[time_data] ={ identifier[k] : identifier[int] ( identifier[v] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[timestamp_re] . identifier[search] ( identifier[data_text] ). identifier[groupdict] (). identifier[items] ()}
identifier[archive] . identifier[timestamp] = identifier[pytz] . identifier[timezone] ( literal[string] ). identifier[localize] ( identifier[datetime] (** identifier[time_data] ))
keyword[if] identifier[archive] . identifier[timestamp] > identifier[datetime] . identifier[utcnow] (). identifier[replace] ( identifier[tzinfo] = identifier[pytz] . identifier[utc] ):
keyword[raise] identifier[ProviderError] ( literal[string] )
identifier[archives] . identifier[append] ( identifier[archive] )
keyword[if] identifier[soup] . identifier[find] ( literal[string] , identifier[attrs] ={ literal[string] : literal[string] }, identifier[string] = literal[string] ) keyword[is] keyword[None] :
keyword[break]
identifier[page] += literal[int]
identifier[logger] . identifier[debug] ( literal[string] , identifier[len] ( identifier[archives] ))
keyword[return] identifier[archives] | def get_archives(self, title_id, language_code):
"""Get the archive list from a given `title_id` and `language_code`.
:param int title_id: title id.
:param int language_code: language code.
:return: the archives.
:rtype: list of :class:`LegendasTVArchive`
"""
logger.info('Getting archives for title %d and language %d', title_id, language_code)
archives = []
page = 1
while True:
# get the archive page
url = self.server_url + 'util/carrega_legendas_busca_filme/{title}/{language}/-/{page}'.format(title=title_id, language=language_code, page=page)
r = self.session.get(url)
r.raise_for_status()
# parse the results
soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser'])
for archive_soup in soup.select('div.list_element > article > div'):
# create archive
archive = LegendasTVArchive(archive_soup.a['href'].split('/')[2], archive_soup.a.text, 'pack' in archive_soup['class'], 'destaque' in archive_soup['class'], self.server_url + archive_soup.a['href'][1:])
# extract text containing downloads, rating and timestamp
data_text = archive_soup.find('p', class_='data').text
# match downloads
archive.downloads = int(downloads_re.search(data_text).group('downloads'))
# match rating
match = rating_re.search(data_text)
if match:
archive.rating = int(match.group('rating')) # depends on [control=['if'], data=[]]
# match timestamp and validate it
time_data = {k: int(v) for (k, v) in timestamp_re.search(data_text).groupdict().items()}
archive.timestamp = pytz.timezone('America/Sao_Paulo').localize(datetime(**time_data))
if archive.timestamp > datetime.utcnow().replace(tzinfo=pytz.utc):
raise ProviderError('Archive timestamp is in the future') # depends on [control=['if'], data=[]]
# add archive
archives.append(archive) # depends on [control=['for'], data=['archive_soup']]
# stop on last page
if soup.find('a', attrs={'class': 'load_more'}, string='carregar mais') is None:
break # depends on [control=['if'], data=[]]
# increment page count
page += 1 # depends on [control=['while'], data=[]]
logger.debug('Found %d archives', len(archives))
return archives |
def run_wagtail_migration_before_core_34(apps, schema_editor):
"""
Migration 34 needs migration 0040 from wagtail core
and this Migration will run wagtail migration before
molo core migration 34
"""
db_alias = schema_editor.connection.alias
emit_pre_migrate_signal(verbosity=2, interactive=False, db=db_alias) | def function[run_wagtail_migration_before_core_34, parameter[apps, schema_editor]]:
constant[
Migration 34 needs migration 0040 from wagtail core
and this Migration will run wagtail migration before
molo core migration 34
]
variable[db_alias] assign[=] name[schema_editor].connection.alias
call[name[emit_pre_migrate_signal], parameter[]] | keyword[def] identifier[run_wagtail_migration_before_core_34] ( identifier[apps] , identifier[schema_editor] ):
literal[string]
identifier[db_alias] = identifier[schema_editor] . identifier[connection] . identifier[alias]
identifier[emit_pre_migrate_signal] ( identifier[verbosity] = literal[int] , identifier[interactive] = keyword[False] , identifier[db] = identifier[db_alias] ) | def run_wagtail_migration_before_core_34(apps, schema_editor):
"""
Migration 34 needs migration 0040 from wagtail core
and this Migration will run wagtail migration before
molo core migration 34
"""
db_alias = schema_editor.connection.alias
emit_pre_migrate_signal(verbosity=2, interactive=False, db=db_alias) |
def setDerivedTypeContents(self, extensions=None, restrictions=None):
"""For derived types set appropriate parameter and
"""
if extensions:
ofwhat = list(self.ofwhat)
if type(extensions) in _seqtypes:
ofwhat += list(extensions)
else:
ofwhat.append(extensions)
elif restrictions:
if type(restrictions) in _seqtypes:
ofwhat = restrictions
else:
ofwhat = (restrictions,)
else:
return
self.ofwhat = tuple(ofwhat)
self.lenofwhat = len(self.ofwhat) | def function[setDerivedTypeContents, parameter[self, extensions, restrictions]]:
constant[For derived types set appropriate parameter and
]
if name[extensions] begin[:]
variable[ofwhat] assign[=] call[name[list], parameter[name[self].ofwhat]]
if compare[call[name[type], parameter[name[extensions]]] in name[_seqtypes]] begin[:]
<ast.AugAssign object at 0x7da1b14fab30>
name[self].ofwhat assign[=] call[name[tuple], parameter[name[ofwhat]]]
name[self].lenofwhat assign[=] call[name[len], parameter[name[self].ofwhat]] | keyword[def] identifier[setDerivedTypeContents] ( identifier[self] , identifier[extensions] = keyword[None] , identifier[restrictions] = keyword[None] ):
literal[string]
keyword[if] identifier[extensions] :
identifier[ofwhat] = identifier[list] ( identifier[self] . identifier[ofwhat] )
keyword[if] identifier[type] ( identifier[extensions] ) keyword[in] identifier[_seqtypes] :
identifier[ofwhat] += identifier[list] ( identifier[extensions] )
keyword[else] :
identifier[ofwhat] . identifier[append] ( identifier[extensions] )
keyword[elif] identifier[restrictions] :
keyword[if] identifier[type] ( identifier[restrictions] ) keyword[in] identifier[_seqtypes] :
identifier[ofwhat] = identifier[restrictions]
keyword[else] :
identifier[ofwhat] =( identifier[restrictions] ,)
keyword[else] :
keyword[return]
identifier[self] . identifier[ofwhat] = identifier[tuple] ( identifier[ofwhat] )
identifier[self] . identifier[lenofwhat] = identifier[len] ( identifier[self] . identifier[ofwhat] ) | def setDerivedTypeContents(self, extensions=None, restrictions=None):
"""For derived types set appropriate parameter and
"""
if extensions:
ofwhat = list(self.ofwhat)
if type(extensions) in _seqtypes:
ofwhat += list(extensions) # depends on [control=['if'], data=[]]
else:
ofwhat.append(extensions) # depends on [control=['if'], data=[]]
elif restrictions:
if type(restrictions) in _seqtypes:
ofwhat = restrictions # depends on [control=['if'], data=[]]
else:
ofwhat = (restrictions,) # depends on [control=['if'], data=[]]
else:
return
self.ofwhat = tuple(ofwhat)
self.lenofwhat = len(self.ofwhat) |
def get_stock_quote(self, code_list):
"""
获取订阅股票报价的实时数据,有订阅要求限制。
对于异步推送,参见StockQuoteHandlerBase
:param code_list: 股票代码列表,必须确保code_list中的股票均订阅成功后才能够执行
:return: (ret, data)
ret == RET_OK 返回pd dataframe数据,数据列格式如下
ret != RET_OK 返回错误字符串
===================== =========== ==============================================================
参数 类型 说明
===================== =========== ==============================================================
code str 股票代码
data_date str 日期
data_time str 时间(美股默认是美东时间,港股A股默认是北京时间)
last_price float 最新价格
open_price float 今日开盘价
high_price float 最高价格
low_price float 最低价格
prev_close_price float 昨收盘价格
volume int 成交数量
turnover float 成交金额
turnover_rate float 换手率
amplitude int 振幅
suspension bool 是否停牌(True表示停牌)
listing_date str 上市日期 (yyyy-MM-dd)
price_spread float 当前价差,亦即摆盘数据的买档或卖档的相邻档位的报价差
dark_status str 暗盘交易状态,见DarkStatus
strike_price float 行权价
contract_size int 每份合约数
open_interest int 未平仓合约数
implied_volatility float 隐含波动率
premium float 溢价
delta float 希腊值 Delta
gamma float 希腊值 Gamma
vega float 希腊值 Vega
theta float 希腊值 Theta
rho float 希腊值 Rho
===================== =========== ==============================================================
"""
code_list = unique_and_normalize_list(code_list)
if not code_list:
error_str = ERROR_STR_PREFIX + "the type of code_list param is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(
StockQuoteQuery.pack_req,
StockQuoteQuery.unpack_rsp,
)
kargs = {
"stock_list": code_list,
"conn_id": self.get_sync_conn_id()
}
ret_code, msg, quote_list = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
col_list = [
'code', 'data_date', 'data_time', 'last_price', 'open_price',
'high_price', 'low_price', 'prev_close_price', 'volume',
'turnover', 'turnover_rate', 'amplitude', 'suspension',
'listing_date', 'price_spread', 'dark_status', 'strike_price',
'contract_size', 'open_interest', 'implied_volatility',
'premium', 'delta', 'gamma', 'vega', 'theta', 'rho'
]
quote_frame_table = pd.DataFrame(quote_list, columns=col_list)
return RET_OK, quote_frame_table | def function[get_stock_quote, parameter[self, code_list]]:
constant[
获取订阅股票报价的实时数据,有订阅要求限制。
对于异步推送,参见StockQuoteHandlerBase
:param code_list: 股票代码列表,必须确保code_list中的股票均订阅成功后才能够执行
:return: (ret, data)
ret == RET_OK 返回pd dataframe数据,数据列格式如下
ret != RET_OK 返回错误字符串
===================== =========== ==============================================================
参数 类型 说明
===================== =========== ==============================================================
code str 股票代码
data_date str 日期
data_time str 时间(美股默认是美东时间,港股A股默认是北京时间)
last_price float 最新价格
open_price float 今日开盘价
high_price float 最高价格
low_price float 最低价格
prev_close_price float 昨收盘价格
volume int 成交数量
turnover float 成交金额
turnover_rate float 换手率
amplitude int 振幅
suspension bool 是否停牌(True表示停牌)
listing_date str 上市日期 (yyyy-MM-dd)
price_spread float 当前价差,亦即摆盘数据的买档或卖档的相邻档位的报价差
dark_status str 暗盘交易状态,见DarkStatus
strike_price float 行权价
contract_size int 每份合约数
open_interest int 未平仓合约数
implied_volatility float 隐含波动率
premium float 溢价
delta float 希腊值 Delta
gamma float 希腊值 Gamma
vega float 希腊值 Vega
theta float 希腊值 Theta
rho float 希腊值 Rho
===================== =========== ==============================================================
]
variable[code_list] assign[=] call[name[unique_and_normalize_list], parameter[name[code_list]]]
if <ast.UnaryOp object at 0x7da18bc719f0> begin[:]
variable[error_str] assign[=] binary_operation[name[ERROR_STR_PREFIX] + constant[the type of code_list param is wrong]]
return[tuple[[<ast.Name object at 0x7da1b07bcc70>, <ast.Name object at 0x7da1b07bdb40>]]]
variable[query_processor] assign[=] call[name[self]._get_sync_query_processor, parameter[name[StockQuoteQuery].pack_req, name[StockQuoteQuery].unpack_rsp]]
variable[kargs] assign[=] dictionary[[<ast.Constant object at 0x7da1b07bd450>, <ast.Constant object at 0x7da1b07bd2d0>], [<ast.Name object at 0x7da1b07bcf40>, <ast.Call object at 0x7da1b07bce50>]]
<ast.Tuple object at 0x7da1b07bd390> assign[=] call[name[query_processor], parameter[]]
if compare[name[ret_code] equal[==] name[RET_ERROR]] begin[:]
return[tuple[[<ast.Name object at 0x7da1b07bf430>, <ast.Name object at 0x7da1b07bf400>]]]
variable[col_list] assign[=] list[[<ast.Constant object at 0x7da20c9909a0>, <ast.Constant object at 0x7da20c992f20>, <ast.Constant object at 0x7da20c992e30>, <ast.Constant object at 0x7da1b26ac250>, <ast.Constant object at 0x7da1b26ad8a0>, <ast.Constant object at 0x7da1b26af340>, <ast.Constant object at 0x7da1b26ac940>, <ast.Constant object at 0x7da1b26acd90>, <ast.Constant object at 0x7da1b26ae710>, <ast.Constant object at 0x7da1b26add80>, <ast.Constant object at 0x7da1b26ad870>, <ast.Constant object at 0x7da1b26ad660>, <ast.Constant object at 0x7da1b26ade40>, <ast.Constant object at 0x7da1b26af3a0>, <ast.Constant object at 0x7da1b26ac070>, <ast.Constant object at 0x7da1b26af4f0>, <ast.Constant object at 0x7da1b26ae050>, <ast.Constant object at 0x7da1b26ad750>, <ast.Constant object at 0x7da1b26af430>, <ast.Constant object at 0x7da1b26afdf0>, <ast.Constant object at 0x7da1b26afe20>, <ast.Constant object at 0x7da1b26ad960>, <ast.Constant object at 0x7da18f00fa60>, <ast.Constant object at 0x7da18f00efb0>, <ast.Constant object at 0x7da18f00d4e0>, <ast.Constant object at 0x7da18f00fcd0>]]
variable[quote_frame_table] assign[=] call[name[pd].DataFrame, parameter[name[quote_list]]]
return[tuple[[<ast.Name object at 0x7da18f00cca0>, <ast.Name object at 0x7da18f00d990>]]] | keyword[def] identifier[get_stock_quote] ( identifier[self] , identifier[code_list] ):
literal[string]
identifier[code_list] = identifier[unique_and_normalize_list] ( identifier[code_list] )
keyword[if] keyword[not] identifier[code_list] :
identifier[error_str] = identifier[ERROR_STR_PREFIX] + literal[string]
keyword[return] identifier[RET_ERROR] , identifier[error_str]
identifier[query_processor] = identifier[self] . identifier[_get_sync_query_processor] (
identifier[StockQuoteQuery] . identifier[pack_req] ,
identifier[StockQuoteQuery] . identifier[unpack_rsp] ,
)
identifier[kargs] ={
literal[string] : identifier[code_list] ,
literal[string] : identifier[self] . identifier[get_sync_conn_id] ()
}
identifier[ret_code] , identifier[msg] , identifier[quote_list] = identifier[query_processor] (** identifier[kargs] )
keyword[if] identifier[ret_code] == identifier[RET_ERROR] :
keyword[return] identifier[ret_code] , identifier[msg]
identifier[col_list] =[
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string]
]
identifier[quote_frame_table] = identifier[pd] . identifier[DataFrame] ( identifier[quote_list] , identifier[columns] = identifier[col_list] )
keyword[return] identifier[RET_OK] , identifier[quote_frame_table] | def get_stock_quote(self, code_list):
"""
获取订阅股票报价的实时数据,有订阅要求限制。
对于异步推送,参见StockQuoteHandlerBase
:param code_list: 股票代码列表,必须确保code_list中的股票均订阅成功后才能够执行
:return: (ret, data)
ret == RET_OK 返回pd dataframe数据,数据列格式如下
ret != RET_OK 返回错误字符串
===================== =========== ==============================================================
参数 类型 说明
===================== =========== ==============================================================
code str 股票代码
data_date str 日期
data_time str 时间(美股默认是美东时间,港股A股默认是北京时间)
last_price float 最新价格
open_price float 今日开盘价
high_price float 最高价格
low_price float 最低价格
prev_close_price float 昨收盘价格
volume int 成交数量
turnover float 成交金额
turnover_rate float 换手率
amplitude int 振幅
suspension bool 是否停牌(True表示停牌)
listing_date str 上市日期 (yyyy-MM-dd)
price_spread float 当前价差,亦即摆盘数据的买档或卖档的相邻档位的报价差
dark_status str 暗盘交易状态,见DarkStatus
strike_price float 行权价
contract_size int 每份合约数
open_interest int 未平仓合约数
implied_volatility float 隐含波动率
premium float 溢价
delta float 希腊值 Delta
gamma float 希腊值 Gamma
vega float 希腊值 Vega
theta float 希腊值 Theta
rho float 希腊值 Rho
===================== =========== ==============================================================
"""
code_list = unique_and_normalize_list(code_list)
if not code_list:
error_str = ERROR_STR_PREFIX + 'the type of code_list param is wrong'
return (RET_ERROR, error_str) # depends on [control=['if'], data=[]]
query_processor = self._get_sync_query_processor(StockQuoteQuery.pack_req, StockQuoteQuery.unpack_rsp)
kargs = {'stock_list': code_list, 'conn_id': self.get_sync_conn_id()}
(ret_code, msg, quote_list) = query_processor(**kargs)
if ret_code == RET_ERROR:
return (ret_code, msg) # depends on [control=['if'], data=['ret_code']]
col_list = ['code', 'data_date', 'data_time', 'last_price', 'open_price', 'high_price', 'low_price', 'prev_close_price', 'volume', 'turnover', 'turnover_rate', 'amplitude', 'suspension', 'listing_date', 'price_spread', 'dark_status', 'strike_price', 'contract_size', 'open_interest', 'implied_volatility', 'premium', 'delta', 'gamma', 'vega', 'theta', 'rho']
quote_frame_table = pd.DataFrame(quote_list, columns=col_list)
return (RET_OK, quote_frame_table) |
def make_compare(key, value, obj):
"Map a key name to a specific comparison function"
if '__' not in key:
# If no __ exists, default to doing an "exact" comparison
key, comp = key, 'exact'
else:
key, comp = key.rsplit('__', 1)
# Check if comp is valid
if hasattr(Compare, comp):
return getattr(Compare, comp)(key, value, obj)
raise AttributeError("No comparison '%s'" % comp) | def function[make_compare, parameter[key, value, obj]]:
constant[Map a key name to a specific comparison function]
if compare[constant[__] <ast.NotIn object at 0x7da2590d7190> name[key]] begin[:]
<ast.Tuple object at 0x7da20c6e4b50> assign[=] tuple[[<ast.Name object at 0x7da20c6e5f30>, <ast.Constant object at 0x7da20c6e75e0>]]
if call[name[hasattr], parameter[name[Compare], name[comp]]] begin[:]
return[call[call[name[getattr], parameter[name[Compare], name[comp]]], parameter[name[key], name[value], name[obj]]]]
<ast.Raise object at 0x7da20c6e7130> | keyword[def] identifier[make_compare] ( identifier[key] , identifier[value] , identifier[obj] ):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[key] :
identifier[key] , identifier[comp] = identifier[key] , literal[string]
keyword[else] :
identifier[key] , identifier[comp] = identifier[key] . identifier[rsplit] ( literal[string] , literal[int] )
keyword[if] identifier[hasattr] ( identifier[Compare] , identifier[comp] ):
keyword[return] identifier[getattr] ( identifier[Compare] , identifier[comp] )( identifier[key] , identifier[value] , identifier[obj] )
keyword[raise] identifier[AttributeError] ( literal[string] % identifier[comp] ) | def make_compare(key, value, obj):
"""Map a key name to a specific comparison function"""
if '__' not in key:
# If no __ exists, default to doing an "exact" comparison
(key, comp) = (key, 'exact') # depends on [control=['if'], data=['key']]
else:
(key, comp) = key.rsplit('__', 1)
# Check if comp is valid
if hasattr(Compare, comp):
return getattr(Compare, comp)(key, value, obj) # depends on [control=['if'], data=[]]
raise AttributeError("No comparison '%s'" % comp) |
def variants(case_id):
"""Show all variants for a case."""
filters = parse_filters()
values = [value for key, value in iteritems(filters)
if not isinstance(value, dict) and key != 'skip']
is_active = any(values)
variants, nr_of_variants = app.db.variants(
case_id,
skip=filters['skip'],
filters={
'gene_ids': filters['gene_symbols'],
'frequency': filters.get('frequency'),
'cadd': filters.get('cadd'),
'sv_len': filters.get('sv_len'),
'consequence': filters['selected_consequences'],
'genetic_models': filters['selected_models'],
'sv_types': filters['selected_sv_types'],
'gene_lists': filters['gene_lists'],
'impact_severities': filters['impact_severities'],
'gemini_query': filters['gemini_query'],
'range': filters['range'],
}
)
gene_lists = ([gene_list.list_id for gene_list in app.db.gene_lists()]
if app.config['STORE_ENABLED'] else [])
queries = ([(query.name or query.query, query.query) for query
in app.db.gemini_queries()]
if app.config['STORE_ENABLED'] else [])
kwargs = dict(variants=variants, case_id=case_id, db=app.db,
filters=filters, consequences=SO_TERMS,
inheritance_models=INHERITANCE_MODELS_SHORT,
gene_lists=gene_lists, impact_severities=IMPACT_LEVELS,
is_active=is_active, nr_of_variants=nr_of_variants,
queries=queries)
if app.db.variant_type == 'sv':
return render_template('sv_variants.html', sv_types=SV_TYPES, **kwargs)
else:
return render_template('variants.html', **kwargs) | def function[variants, parameter[case_id]]:
constant[Show all variants for a case.]
variable[filters] assign[=] call[name[parse_filters], parameter[]]
variable[values] assign[=] <ast.ListComp object at 0x7da18f09f610>
variable[is_active] assign[=] call[name[any], parameter[name[values]]]
<ast.Tuple object at 0x7da18f09f460> assign[=] call[name[app].db.variants, parameter[name[case_id]]]
variable[gene_lists] assign[=] <ast.IfExp object at 0x7da18f09d2d0>
variable[queries] assign[=] <ast.IfExp object at 0x7da18f09edd0>
variable[kwargs] assign[=] call[name[dict], parameter[]]
if compare[name[app].db.variant_type equal[==] constant[sv]] begin[:]
return[call[name[render_template], parameter[constant[sv_variants.html]]]] | keyword[def] identifier[variants] ( identifier[case_id] ):
literal[string]
identifier[filters] = identifier[parse_filters] ()
identifier[values] =[ identifier[value] keyword[for] identifier[key] , identifier[value] keyword[in] identifier[iteritems] ( identifier[filters] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[value] , identifier[dict] ) keyword[and] identifier[key] != literal[string] ]
identifier[is_active] = identifier[any] ( identifier[values] )
identifier[variants] , identifier[nr_of_variants] = identifier[app] . identifier[db] . identifier[variants] (
identifier[case_id] ,
identifier[skip] = identifier[filters] [ literal[string] ],
identifier[filters] ={
literal[string] : identifier[filters] [ literal[string] ],
literal[string] : identifier[filters] . identifier[get] ( literal[string] ),
literal[string] : identifier[filters] . identifier[get] ( literal[string] ),
literal[string] : identifier[filters] . identifier[get] ( literal[string] ),
literal[string] : identifier[filters] [ literal[string] ],
literal[string] : identifier[filters] [ literal[string] ],
literal[string] : identifier[filters] [ literal[string] ],
literal[string] : identifier[filters] [ literal[string] ],
literal[string] : identifier[filters] [ literal[string] ],
literal[string] : identifier[filters] [ literal[string] ],
literal[string] : identifier[filters] [ literal[string] ],
}
)
identifier[gene_lists] =([ identifier[gene_list] . identifier[list_id] keyword[for] identifier[gene_list] keyword[in] identifier[app] . identifier[db] . identifier[gene_lists] ()]
keyword[if] identifier[app] . identifier[config] [ literal[string] ] keyword[else] [])
identifier[queries] =([( identifier[query] . identifier[name] keyword[or] identifier[query] . identifier[query] , identifier[query] . identifier[query] ) keyword[for] identifier[query]
keyword[in] identifier[app] . identifier[db] . identifier[gemini_queries] ()]
keyword[if] identifier[app] . identifier[config] [ literal[string] ] keyword[else] [])
identifier[kwargs] = identifier[dict] ( identifier[variants] = identifier[variants] , identifier[case_id] = identifier[case_id] , identifier[db] = identifier[app] . identifier[db] ,
identifier[filters] = identifier[filters] , identifier[consequences] = identifier[SO_TERMS] ,
identifier[inheritance_models] = identifier[INHERITANCE_MODELS_SHORT] ,
identifier[gene_lists] = identifier[gene_lists] , identifier[impact_severities] = identifier[IMPACT_LEVELS] ,
identifier[is_active] = identifier[is_active] , identifier[nr_of_variants] = identifier[nr_of_variants] ,
identifier[queries] = identifier[queries] )
keyword[if] identifier[app] . identifier[db] . identifier[variant_type] == literal[string] :
keyword[return] identifier[render_template] ( literal[string] , identifier[sv_types] = identifier[SV_TYPES] ,** identifier[kwargs] )
keyword[else] :
keyword[return] identifier[render_template] ( literal[string] ,** identifier[kwargs] ) | def variants(case_id):
"""Show all variants for a case."""
filters = parse_filters()
values = [value for (key, value) in iteritems(filters) if not isinstance(value, dict) and key != 'skip']
is_active = any(values)
(variants, nr_of_variants) = app.db.variants(case_id, skip=filters['skip'], filters={'gene_ids': filters['gene_symbols'], 'frequency': filters.get('frequency'), 'cadd': filters.get('cadd'), 'sv_len': filters.get('sv_len'), 'consequence': filters['selected_consequences'], 'genetic_models': filters['selected_models'], 'sv_types': filters['selected_sv_types'], 'gene_lists': filters['gene_lists'], 'impact_severities': filters['impact_severities'], 'gemini_query': filters['gemini_query'], 'range': filters['range']})
gene_lists = [gene_list.list_id for gene_list in app.db.gene_lists()] if app.config['STORE_ENABLED'] else []
queries = [(query.name or query.query, query.query) for query in app.db.gemini_queries()] if app.config['STORE_ENABLED'] else []
kwargs = dict(variants=variants, case_id=case_id, db=app.db, filters=filters, consequences=SO_TERMS, inheritance_models=INHERITANCE_MODELS_SHORT, gene_lists=gene_lists, impact_severities=IMPACT_LEVELS, is_active=is_active, nr_of_variants=nr_of_variants, queries=queries)
if app.db.variant_type == 'sv':
return render_template('sv_variants.html', sv_types=SV_TYPES, **kwargs) # depends on [control=['if'], data=[]]
else:
return render_template('variants.html', **kwargs) |
def _updateExtraSelections(self):
"""Highlight current line
"""
cursorColumnIndex = self.textCursor().positionInBlock()
bracketSelections = self._bracketHighlighter.extraSelections(self,
self.textCursor().block(),
cursorColumnIndex)
selections = self._currentLineExtraSelections() + \
self._rectangularSelection.selections() + \
bracketSelections + \
self._userExtraSelections
self._nonVimExtraSelections = selections
if self._vim is None:
allSelections = selections
else:
allSelections = selections + self._vim.extraSelections()
QPlainTextEdit.setExtraSelections(self, allSelections) | def function[_updateExtraSelections, parameter[self]]:
constant[Highlight current line
]
variable[cursorColumnIndex] assign[=] call[call[name[self].textCursor, parameter[]].positionInBlock, parameter[]]
variable[bracketSelections] assign[=] call[name[self]._bracketHighlighter.extraSelections, parameter[name[self], call[call[name[self].textCursor, parameter[]].block, parameter[]], name[cursorColumnIndex]]]
variable[selections] assign[=] binary_operation[binary_operation[binary_operation[call[name[self]._currentLineExtraSelections, parameter[]] + call[name[self]._rectangularSelection.selections, parameter[]]] + name[bracketSelections]] + name[self]._userExtraSelections]
name[self]._nonVimExtraSelections assign[=] name[selections]
if compare[name[self]._vim is constant[None]] begin[:]
variable[allSelections] assign[=] name[selections]
call[name[QPlainTextEdit].setExtraSelections, parameter[name[self], name[allSelections]]] | keyword[def] identifier[_updateExtraSelections] ( identifier[self] ):
literal[string]
identifier[cursorColumnIndex] = identifier[self] . identifier[textCursor] (). identifier[positionInBlock] ()
identifier[bracketSelections] = identifier[self] . identifier[_bracketHighlighter] . identifier[extraSelections] ( identifier[self] ,
identifier[self] . identifier[textCursor] (). identifier[block] (),
identifier[cursorColumnIndex] )
identifier[selections] = identifier[self] . identifier[_currentLineExtraSelections] ()+ identifier[self] . identifier[_rectangularSelection] . identifier[selections] ()+ identifier[bracketSelections] + identifier[self] . identifier[_userExtraSelections]
identifier[self] . identifier[_nonVimExtraSelections] = identifier[selections]
keyword[if] identifier[self] . identifier[_vim] keyword[is] keyword[None] :
identifier[allSelections] = identifier[selections]
keyword[else] :
identifier[allSelections] = identifier[selections] + identifier[self] . identifier[_vim] . identifier[extraSelections] ()
identifier[QPlainTextEdit] . identifier[setExtraSelections] ( identifier[self] , identifier[allSelections] ) | def _updateExtraSelections(self):
"""Highlight current line
"""
cursorColumnIndex = self.textCursor().positionInBlock()
bracketSelections = self._bracketHighlighter.extraSelections(self, self.textCursor().block(), cursorColumnIndex)
selections = self._currentLineExtraSelections() + self._rectangularSelection.selections() + bracketSelections + self._userExtraSelections
self._nonVimExtraSelections = selections
if self._vim is None:
allSelections = selections # depends on [control=['if'], data=[]]
else:
allSelections = selections + self._vim.extraSelections()
QPlainTextEdit.setExtraSelections(self, allSelections) |
def create_or_update_user(self, user_id, password, roles):
"""
Create a new user record, or update an existing one
:param user_id:
user ID to update or create
:param password:
new password, or None to leave unchanged
:param roles:
new roles, or None to leave unchanged
:return:
the action taken, one of "none", "update", "create"
:raises:
ValueError if there is no existing user and either password or roles is None
"""
action = "update"
self.con.execute('SELECT 1 FROM archive_users WHERE userId = %s;', (user_id,))
results = self.con.fetchall()
if len(results) == 0:
if password is None:
raise ValueError("Must specify an initial password when creating a new user!")
action = "create"
self.con.execute('INSERT INTO archive_users (userId, pwHash) VALUES (%s,%s)',
(user_id, passlib.hash.bcrypt.encrypt(password)))
if password is None and roles is None:
action = "none"
if password is not None:
self.con.execute('UPDATE archive_users SET pwHash = %s WHERE userId = %s',
(passlib.hash.bcrypt.encrypt(password), user_id))
if roles is not None:
# Clear out existing roles, and delete any unused roles
self.con.execute("DELETE r FROM archive_user_roles AS r WHERE "
"(SELECT u.userId FROM archive_users AS u WHERE r.userId=u.uid)=%s;", (user_id,))
self.con.execute("DELETE r FROM archive_roles AS r WHERE r.uid NOT IN "
"(SELECT roleId FROM archive_user_roles);")
for role in roles:
self.con.execute("SELECT uid FROM archive_roles WHERE name=%s;", (role,))
results = self.con.fetchall()
if len(results) < 1:
self.con.execute("INSERT INTO archive_roles (name) VALUES (%s);", (role,))
self.con.execute("SELECT uid FROM archive_roles WHERE name=%s;", (role,))
results = self.con.fetchall()
self.con.execute('INSERT INTO archive_user_roles (userId, roleId) VALUES '
'((SELECT u.uid FROM archive_users u WHERE u.userId=%s),'
'%s)', (user_id, results[0]['uid']))
return action | def function[create_or_update_user, parameter[self, user_id, password, roles]]:
constant[
Create a new user record, or update an existing one
:param user_id:
user ID to update or create
:param password:
new password, or None to leave unchanged
:param roles:
new roles, or None to leave unchanged
:return:
the action taken, one of "none", "update", "create"
:raises:
ValueError if there is no existing user and either password or roles is None
]
variable[action] assign[=] constant[update]
call[name[self].con.execute, parameter[constant[SELECT 1 FROM archive_users WHERE userId = %s;], tuple[[<ast.Name object at 0x7da1b0aedbd0>]]]]
variable[results] assign[=] call[name[self].con.fetchall, parameter[]]
if compare[call[name[len], parameter[name[results]]] equal[==] constant[0]] begin[:]
if compare[name[password] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b0aef010>
variable[action] assign[=] constant[create]
call[name[self].con.execute, parameter[constant[INSERT INTO archive_users (userId, pwHash) VALUES (%s,%s)], tuple[[<ast.Name object at 0x7da1b0aedb70>, <ast.Call object at 0x7da1b0aed330>]]]]
if <ast.BoolOp object at 0x7da1b0aed6c0> begin[:]
variable[action] assign[=] constant[none]
if compare[name[password] is_not constant[None]] begin[:]
call[name[self].con.execute, parameter[constant[UPDATE archive_users SET pwHash = %s WHERE userId = %s], tuple[[<ast.Call object at 0x7da1b0aef220>, <ast.Name object at 0x7da1b0aec100>]]]]
if compare[name[roles] is_not constant[None]] begin[:]
call[name[self].con.execute, parameter[constant[DELETE r FROM archive_user_roles AS r WHERE (SELECT u.userId FROM archive_users AS u WHERE r.userId=u.uid)=%s;], tuple[[<ast.Name object at 0x7da1b0aeed40>]]]]
call[name[self].con.execute, parameter[constant[DELETE r FROM archive_roles AS r WHERE r.uid NOT IN (SELECT roleId FROM archive_user_roles);]]]
for taget[name[role]] in starred[name[roles]] begin[:]
call[name[self].con.execute, parameter[constant[SELECT uid FROM archive_roles WHERE name=%s;], tuple[[<ast.Name object at 0x7da1b0aec040>]]]]
variable[results] assign[=] call[name[self].con.fetchall, parameter[]]
if compare[call[name[len], parameter[name[results]]] less[<] constant[1]] begin[:]
call[name[self].con.execute, parameter[constant[INSERT INTO archive_roles (name) VALUES (%s);], tuple[[<ast.Name object at 0x7da1b0aed390>]]]]
call[name[self].con.execute, parameter[constant[SELECT uid FROM archive_roles WHERE name=%s;], tuple[[<ast.Name object at 0x7da1b0a3df60>]]]]
variable[results] assign[=] call[name[self].con.fetchall, parameter[]]
call[name[self].con.execute, parameter[constant[INSERT INTO archive_user_roles (userId, roleId) VALUES ((SELECT u.uid FROM archive_users u WHERE u.userId=%s),%s)], tuple[[<ast.Name object at 0x7da1b0a3eef0>, <ast.Subscript object at 0x7da1b0a3ce50>]]]]
return[name[action]] | keyword[def] identifier[create_or_update_user] ( identifier[self] , identifier[user_id] , identifier[password] , identifier[roles] ):
literal[string]
identifier[action] = literal[string]
identifier[self] . identifier[con] . identifier[execute] ( literal[string] ,( identifier[user_id] ,))
identifier[results] = identifier[self] . identifier[con] . identifier[fetchall] ()
keyword[if] identifier[len] ( identifier[results] )== literal[int] :
keyword[if] identifier[password] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[action] = literal[string]
identifier[self] . identifier[con] . identifier[execute] ( literal[string] ,
( identifier[user_id] , identifier[passlib] . identifier[hash] . identifier[bcrypt] . identifier[encrypt] ( identifier[password] )))
keyword[if] identifier[password] keyword[is] keyword[None] keyword[and] identifier[roles] keyword[is] keyword[None] :
identifier[action] = literal[string]
keyword[if] identifier[password] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[con] . identifier[execute] ( literal[string] ,
( identifier[passlib] . identifier[hash] . identifier[bcrypt] . identifier[encrypt] ( identifier[password] ), identifier[user_id] ))
keyword[if] identifier[roles] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[con] . identifier[execute] ( literal[string]
literal[string] ,( identifier[user_id] ,))
identifier[self] . identifier[con] . identifier[execute] ( literal[string]
literal[string] )
keyword[for] identifier[role] keyword[in] identifier[roles] :
identifier[self] . identifier[con] . identifier[execute] ( literal[string] ,( identifier[role] ,))
identifier[results] = identifier[self] . identifier[con] . identifier[fetchall] ()
keyword[if] identifier[len] ( identifier[results] )< literal[int] :
identifier[self] . identifier[con] . identifier[execute] ( literal[string] ,( identifier[role] ,))
identifier[self] . identifier[con] . identifier[execute] ( literal[string] ,( identifier[role] ,))
identifier[results] = identifier[self] . identifier[con] . identifier[fetchall] ()
identifier[self] . identifier[con] . identifier[execute] ( literal[string]
literal[string]
literal[string] ,( identifier[user_id] , identifier[results] [ literal[int] ][ literal[string] ]))
keyword[return] identifier[action] | def create_or_update_user(self, user_id, password, roles):
"""
Create a new user record, or update an existing one
:param user_id:
user ID to update or create
:param password:
new password, or None to leave unchanged
:param roles:
new roles, or None to leave unchanged
:return:
the action taken, one of "none", "update", "create"
:raises:
ValueError if there is no existing user and either password or roles is None
"""
action = 'update'
self.con.execute('SELECT 1 FROM archive_users WHERE userId = %s;', (user_id,))
results = self.con.fetchall()
if len(results) == 0:
if password is None:
raise ValueError('Must specify an initial password when creating a new user!') # depends on [control=['if'], data=[]]
action = 'create'
self.con.execute('INSERT INTO archive_users (userId, pwHash) VALUES (%s,%s)', (user_id, passlib.hash.bcrypt.encrypt(password))) # depends on [control=['if'], data=[]]
if password is None and roles is None:
action = 'none' # depends on [control=['if'], data=[]]
if password is not None:
self.con.execute('UPDATE archive_users SET pwHash = %s WHERE userId = %s', (passlib.hash.bcrypt.encrypt(password), user_id)) # depends on [control=['if'], data=['password']]
if roles is not None:
# Clear out existing roles, and delete any unused roles
self.con.execute('DELETE r FROM archive_user_roles AS r WHERE (SELECT u.userId FROM archive_users AS u WHERE r.userId=u.uid)=%s;', (user_id,))
self.con.execute('DELETE r FROM archive_roles AS r WHERE r.uid NOT IN (SELECT roleId FROM archive_user_roles);')
for role in roles:
self.con.execute('SELECT uid FROM archive_roles WHERE name=%s;', (role,))
results = self.con.fetchall()
if len(results) < 1:
self.con.execute('INSERT INTO archive_roles (name) VALUES (%s);', (role,))
self.con.execute('SELECT uid FROM archive_roles WHERE name=%s;', (role,))
results = self.con.fetchall() # depends on [control=['if'], data=[]]
self.con.execute('INSERT INTO archive_user_roles (userId, roleId) VALUES ((SELECT u.uid FROM archive_users u WHERE u.userId=%s),%s)', (user_id, results[0]['uid'])) # depends on [control=['for'], data=['role']]
return action # depends on [control=['if'], data=['roles']] |
def cli(env, identifier, details):
"""Invoices and all that mess"""
manager = AccountManager(env.client)
top_items = manager.get_billing_items(identifier)
title = "Invoice %s" % identifier
table = formatting.Table(["Item Id", "Category", "Description", "Single",
"Monthly", "Create Date", "Location"], title=title)
table.align['category'] = 'l'
table.align['description'] = 'l'
for item in top_items:
fqdn = "%s.%s" % (item.get('hostName', ''), item.get('domainName', ''))
# category id=2046, ram_usage doesn't have a name...
category = utils.lookup(item, 'category', 'name') or item.get('categoryCode')
description = nice_string(item.get('description'))
if fqdn != '.':
description = "%s (%s)" % (item.get('description'), fqdn)
table.add_row([
item.get('id'),
category,
nice_string(description),
"$%.2f" % float(item.get('oneTimeAfterTaxAmount')),
"$%.2f" % float(item.get('recurringAfterTaxAmount')),
utils.clean_time(item.get('createDate'), out_format="%Y-%m-%d"),
utils.lookup(item, 'location', 'name')
])
if details:
for child in item.get('children', []):
table.add_row([
'>>>',
utils.lookup(child, 'category', 'name'),
nice_string(child.get('description')),
"$%.2f" % float(child.get('oneTimeAfterTaxAmount')),
"$%.2f" % float(child.get('recurringAfterTaxAmount')),
'---',
'---'
])
env.fout(table) | def function[cli, parameter[env, identifier, details]]:
constant[Invoices and all that mess]
variable[manager] assign[=] call[name[AccountManager], parameter[name[env].client]]
variable[top_items] assign[=] call[name[manager].get_billing_items, parameter[name[identifier]]]
variable[title] assign[=] binary_operation[constant[Invoice %s] <ast.Mod object at 0x7da2590d6920> name[identifier]]
variable[table] assign[=] call[name[formatting].Table, parameter[list[[<ast.Constant object at 0x7da207f99720>, <ast.Constant object at 0x7da207f9b550>, <ast.Constant object at 0x7da207f9bf70>, <ast.Constant object at 0x7da207f9ac20>, <ast.Constant object at 0x7da207f9a7a0>, <ast.Constant object at 0x7da207f99150>, <ast.Constant object at 0x7da207f9ab00>]]]]
call[name[table].align][constant[category]] assign[=] constant[l]
call[name[table].align][constant[description]] assign[=] constant[l]
for taget[name[item]] in starred[name[top_items]] begin[:]
variable[fqdn] assign[=] binary_operation[constant[%s.%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da207f98640>, <ast.Call object at 0x7da207f99ab0>]]]
variable[category] assign[=] <ast.BoolOp object at 0x7da207f9b460>
variable[description] assign[=] call[name[nice_string], parameter[call[name[item].get, parameter[constant[description]]]]]
if compare[name[fqdn] not_equal[!=] constant[.]] begin[:]
variable[description] assign[=] binary_operation[constant[%s (%s)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da207f99b70>, <ast.Name object at 0x7da207f9a7d0>]]]
call[name[table].add_row, parameter[list[[<ast.Call object at 0x7da207f98b20>, <ast.Name object at 0x7da207f98df0>, <ast.Call object at 0x7da207f996f0>, <ast.BinOp object at 0x7da207f98940>, <ast.BinOp object at 0x7da207f98af0>, <ast.Call object at 0x7da207f99ff0>, <ast.Call object at 0x7da207f9afe0>]]]]
if name[details] begin[:]
for taget[name[child]] in starred[call[name[item].get, parameter[constant[children], list[[]]]]] begin[:]
call[name[table].add_row, parameter[list[[<ast.Constant object at 0x7da20c992440>, <ast.Call object at 0x7da20c991d50>, <ast.Call object at 0x7da20c991450>, <ast.BinOp object at 0x7da20c990d30>, <ast.BinOp object at 0x7da20c991360>, <ast.Constant object at 0x7da20c993e50>, <ast.Constant object at 0x7da18c4ce5c0>]]]]
call[name[env].fout, parameter[name[table]]] | keyword[def] identifier[cli] ( identifier[env] , identifier[identifier] , identifier[details] ):
literal[string]
identifier[manager] = identifier[AccountManager] ( identifier[env] . identifier[client] )
identifier[top_items] = identifier[manager] . identifier[get_billing_items] ( identifier[identifier] )
identifier[title] = literal[string] % identifier[identifier]
identifier[table] = identifier[formatting] . identifier[Table] ([ literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ], identifier[title] = identifier[title] )
identifier[table] . identifier[align] [ literal[string] ]= literal[string]
identifier[table] . identifier[align] [ literal[string] ]= literal[string]
keyword[for] identifier[item] keyword[in] identifier[top_items] :
identifier[fqdn] = literal[string] %( identifier[item] . identifier[get] ( literal[string] , literal[string] ), identifier[item] . identifier[get] ( literal[string] , literal[string] ))
identifier[category] = identifier[utils] . identifier[lookup] ( identifier[item] , literal[string] , literal[string] ) keyword[or] identifier[item] . identifier[get] ( literal[string] )
identifier[description] = identifier[nice_string] ( identifier[item] . identifier[get] ( literal[string] ))
keyword[if] identifier[fqdn] != literal[string] :
identifier[description] = literal[string] %( identifier[item] . identifier[get] ( literal[string] ), identifier[fqdn] )
identifier[table] . identifier[add_row] ([
identifier[item] . identifier[get] ( literal[string] ),
identifier[category] ,
identifier[nice_string] ( identifier[description] ),
literal[string] % identifier[float] ( identifier[item] . identifier[get] ( literal[string] )),
literal[string] % identifier[float] ( identifier[item] . identifier[get] ( literal[string] )),
identifier[utils] . identifier[clean_time] ( identifier[item] . identifier[get] ( literal[string] ), identifier[out_format] = literal[string] ),
identifier[utils] . identifier[lookup] ( identifier[item] , literal[string] , literal[string] )
])
keyword[if] identifier[details] :
keyword[for] identifier[child] keyword[in] identifier[item] . identifier[get] ( literal[string] ,[]):
identifier[table] . identifier[add_row] ([
literal[string] ,
identifier[utils] . identifier[lookup] ( identifier[child] , literal[string] , literal[string] ),
identifier[nice_string] ( identifier[child] . identifier[get] ( literal[string] )),
literal[string] % identifier[float] ( identifier[child] . identifier[get] ( literal[string] )),
literal[string] % identifier[float] ( identifier[child] . identifier[get] ( literal[string] )),
literal[string] ,
literal[string]
])
identifier[env] . identifier[fout] ( identifier[table] ) | def cli(env, identifier, details):
"""Invoices and all that mess"""
manager = AccountManager(env.client)
top_items = manager.get_billing_items(identifier)
title = 'Invoice %s' % identifier
table = formatting.Table(['Item Id', 'Category', 'Description', 'Single', 'Monthly', 'Create Date', 'Location'], title=title)
table.align['category'] = 'l'
table.align['description'] = 'l'
for item in top_items:
fqdn = '%s.%s' % (item.get('hostName', ''), item.get('domainName', ''))
# category id=2046, ram_usage doesn't have a name...
category = utils.lookup(item, 'category', 'name') or item.get('categoryCode')
description = nice_string(item.get('description'))
if fqdn != '.':
description = '%s (%s)' % (item.get('description'), fqdn) # depends on [control=['if'], data=['fqdn']]
table.add_row([item.get('id'), category, nice_string(description), '$%.2f' % float(item.get('oneTimeAfterTaxAmount')), '$%.2f' % float(item.get('recurringAfterTaxAmount')), utils.clean_time(item.get('createDate'), out_format='%Y-%m-%d'), utils.lookup(item, 'location', 'name')])
if details:
for child in item.get('children', []):
table.add_row(['>>>', utils.lookup(child, 'category', 'name'), nice_string(child.get('description')), '$%.2f' % float(child.get('oneTimeAfterTaxAmount')), '$%.2f' % float(child.get('recurringAfterTaxAmount')), '---', '---']) # depends on [control=['for'], data=['child']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']]
env.fout(table) |
def make_wcs(shape, galactic=False):
"""
Create a simple celestial WCS object in either the ICRS or Galactic
coordinate frame.
Parameters
----------
shape : 2-tuple of int
The shape of the 2D array to be used with the output
`~astropy.wcs.WCS` object.
galactic : bool, optional
If `True`, then the output WCS will be in the Galactic
coordinate frame. If `False` (default), then the output WCS
will be in the ICRS coordinate frame.
Returns
-------
wcs : `~astropy.wcs.WCS` object
The world coordinate system (WCS) transformation.
See Also
--------
make_imagehdu
Examples
--------
>>> from photutils.datasets import make_wcs
>>> shape = (100, 100)
>>> wcs = make_wcs(shape)
>>> print(wcs.wcs.crpix) # doctest: +FLOAT_CMP
[50. 50.]
>>> print(wcs.wcs.crval) # doctest: +FLOAT_CMP
[197.8925 -1.36555556]
"""
wcs = WCS(naxis=2)
rho = np.pi / 3.
scale = 0.1 / 3600.
if astropy_version < '3.1':
wcs._naxis1 = shape[1] # nx
wcs._naxis2 = shape[0] # ny
else:
wcs.pixel_shape = shape
wcs.wcs.crpix = [shape[1] / 2, shape[0] / 2] # 1-indexed (x, y)
wcs.wcs.crval = [197.8925, -1.36555556]
wcs.wcs.cunit = ['deg', 'deg']
wcs.wcs.cd = [[-scale * np.cos(rho), scale * np.sin(rho)],
[scale * np.sin(rho), scale * np.cos(rho)]]
if not galactic:
wcs.wcs.radesys = 'ICRS'
wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
else:
wcs.wcs.ctype = ['GLON-CAR', 'GLAT-CAR']
return wcs | def function[make_wcs, parameter[shape, galactic]]:
constant[
Create a simple celestial WCS object in either the ICRS or Galactic
coordinate frame.
Parameters
----------
shape : 2-tuple of int
The shape of the 2D array to be used with the output
`~astropy.wcs.WCS` object.
galactic : bool, optional
If `True`, then the output WCS will be in the Galactic
coordinate frame. If `False` (default), then the output WCS
will be in the ICRS coordinate frame.
Returns
-------
wcs : `~astropy.wcs.WCS` object
The world coordinate system (WCS) transformation.
See Also
--------
make_imagehdu
Examples
--------
>>> from photutils.datasets import make_wcs
>>> shape = (100, 100)
>>> wcs = make_wcs(shape)
>>> print(wcs.wcs.crpix) # doctest: +FLOAT_CMP
[50. 50.]
>>> print(wcs.wcs.crval) # doctest: +FLOAT_CMP
[197.8925 -1.36555556]
]
variable[wcs] assign[=] call[name[WCS], parameter[]]
variable[rho] assign[=] binary_operation[name[np].pi / constant[3.0]]
variable[scale] assign[=] binary_operation[constant[0.1] / constant[3600.0]]
if compare[name[astropy_version] less[<] constant[3.1]] begin[:]
name[wcs]._naxis1 assign[=] call[name[shape]][constant[1]]
name[wcs]._naxis2 assign[=] call[name[shape]][constant[0]]
name[wcs].wcs.crpix assign[=] list[[<ast.BinOp object at 0x7da20c7c80d0>, <ast.BinOp object at 0x7da20c7cb850>]]
name[wcs].wcs.crval assign[=] list[[<ast.Constant object at 0x7da20c7c8c70>, <ast.UnaryOp object at 0x7da20c7c9ed0>]]
name[wcs].wcs.cunit assign[=] list[[<ast.Constant object at 0x7da20c7c8d60>, <ast.Constant object at 0x7da20c7c97b0>]]
name[wcs].wcs.cd assign[=] list[[<ast.List object at 0x7da20c7ca500>, <ast.List object at 0x7da20c7ca410>]]
if <ast.UnaryOp object at 0x7da20c7cbe20> begin[:]
name[wcs].wcs.radesys assign[=] constant[ICRS]
name[wcs].wcs.ctype assign[=] list[[<ast.Constant object at 0x7da20c7c8190>, <ast.Constant object at 0x7da20c7ca5f0>]]
return[name[wcs]] | keyword[def] identifier[make_wcs] ( identifier[shape] , identifier[galactic] = keyword[False] ):
literal[string]
identifier[wcs] = identifier[WCS] ( identifier[naxis] = literal[int] )
identifier[rho] = identifier[np] . identifier[pi] / literal[int]
identifier[scale] = literal[int] / literal[int]
keyword[if] identifier[astropy_version] < literal[string] :
identifier[wcs] . identifier[_naxis1] = identifier[shape] [ literal[int] ]
identifier[wcs] . identifier[_naxis2] = identifier[shape] [ literal[int] ]
keyword[else] :
identifier[wcs] . identifier[pixel_shape] = identifier[shape]
identifier[wcs] . identifier[wcs] . identifier[crpix] =[ identifier[shape] [ literal[int] ]/ literal[int] , identifier[shape] [ literal[int] ]/ literal[int] ]
identifier[wcs] . identifier[wcs] . identifier[crval] =[ literal[int] ,- literal[int] ]
identifier[wcs] . identifier[wcs] . identifier[cunit] =[ literal[string] , literal[string] ]
identifier[wcs] . identifier[wcs] . identifier[cd] =[[- identifier[scale] * identifier[np] . identifier[cos] ( identifier[rho] ), identifier[scale] * identifier[np] . identifier[sin] ( identifier[rho] )],
[ identifier[scale] * identifier[np] . identifier[sin] ( identifier[rho] ), identifier[scale] * identifier[np] . identifier[cos] ( identifier[rho] )]]
keyword[if] keyword[not] identifier[galactic] :
identifier[wcs] . identifier[wcs] . identifier[radesys] = literal[string]
identifier[wcs] . identifier[wcs] . identifier[ctype] =[ literal[string] , literal[string] ]
keyword[else] :
identifier[wcs] . identifier[wcs] . identifier[ctype] =[ literal[string] , literal[string] ]
keyword[return] identifier[wcs] | def make_wcs(shape, galactic=False):
"""
Create a simple celestial WCS object in either the ICRS or Galactic
coordinate frame.
Parameters
----------
shape : 2-tuple of int
The shape of the 2D array to be used with the output
`~astropy.wcs.WCS` object.
galactic : bool, optional
If `True`, then the output WCS will be in the Galactic
coordinate frame. If `False` (default), then the output WCS
will be in the ICRS coordinate frame.
Returns
-------
wcs : `~astropy.wcs.WCS` object
The world coordinate system (WCS) transformation.
See Also
--------
make_imagehdu
Examples
--------
>>> from photutils.datasets import make_wcs
>>> shape = (100, 100)
>>> wcs = make_wcs(shape)
>>> print(wcs.wcs.crpix) # doctest: +FLOAT_CMP
[50. 50.]
>>> print(wcs.wcs.crval) # doctest: +FLOAT_CMP
[197.8925 -1.36555556]
"""
wcs = WCS(naxis=2)
rho = np.pi / 3.0
scale = 0.1 / 3600.0
if astropy_version < '3.1':
wcs._naxis1 = shape[1] # nx
wcs._naxis2 = shape[0] # ny # depends on [control=['if'], data=[]]
else:
wcs.pixel_shape = shape
wcs.wcs.crpix = [shape[1] / 2, shape[0] / 2] # 1-indexed (x, y)
wcs.wcs.crval = [197.8925, -1.36555556]
wcs.wcs.cunit = ['deg', 'deg']
wcs.wcs.cd = [[-scale * np.cos(rho), scale * np.sin(rho)], [scale * np.sin(rho), scale * np.cos(rho)]]
if not galactic:
wcs.wcs.radesys = 'ICRS'
wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN'] # depends on [control=['if'], data=[]]
else:
wcs.wcs.ctype = ['GLON-CAR', 'GLAT-CAR']
return wcs |
def get_eligible_users_for_extension(self, extension_id, options):
"""GetEligibleUsersForExtension.
[Preview API] Returns users that are currently eligible to assign the extension to. the list is filtered based on the value of ExtensionFilterOptions
:param str extension_id: The extension to check the eligibility of the users for.
:param str options: The options to filter the list.
:rtype: [str]
"""
route_values = {}
if extension_id is not None:
route_values['extensionId'] = self._serialize.url('extension_id', extension_id, 'str')
query_parameters = {}
if options is not None:
query_parameters['options'] = self._serialize.query('options', options, 'str')
response = self._send(http_method='GET',
location_id='5434f182-7f32-4135-8326-9340d887c08a',
version='5.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[str]', self._unwrap_collection(response)) | def function[get_eligible_users_for_extension, parameter[self, extension_id, options]]:
constant[GetEligibleUsersForExtension.
[Preview API] Returns users that are currently eligible to assign the extension to. the list is filtered based on the value of ExtensionFilterOptions
:param str extension_id: The extension to check the eligibility of the users for.
:param str options: The options to filter the list.
:rtype: [str]
]
variable[route_values] assign[=] dictionary[[], []]
if compare[name[extension_id] is_not constant[None]] begin[:]
call[name[route_values]][constant[extensionId]] assign[=] call[name[self]._serialize.url, parameter[constant[extension_id], name[extension_id], constant[str]]]
variable[query_parameters] assign[=] dictionary[[], []]
if compare[name[options] is_not constant[None]] begin[:]
call[name[query_parameters]][constant[options]] assign[=] call[name[self]._serialize.query, parameter[constant[options], name[options], constant[str]]]
variable[response] assign[=] call[name[self]._send, parameter[]]
return[call[name[self]._deserialize, parameter[constant[[str]], call[name[self]._unwrap_collection, parameter[name[response]]]]]] | keyword[def] identifier[get_eligible_users_for_extension] ( identifier[self] , identifier[extension_id] , identifier[options] ):
literal[string]
identifier[route_values] ={}
keyword[if] identifier[extension_id] keyword[is] keyword[not] keyword[None] :
identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[extension_id] , literal[string] )
identifier[query_parameters] ={}
keyword[if] identifier[options] keyword[is] keyword[not] keyword[None] :
identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[options] , literal[string] )
identifier[response] = identifier[self] . identifier[_send] ( identifier[http_method] = literal[string] ,
identifier[location_id] = literal[string] ,
identifier[version] = literal[string] ,
identifier[route_values] = identifier[route_values] ,
identifier[query_parameters] = identifier[query_parameters] )
keyword[return] identifier[self] . identifier[_deserialize] ( literal[string] , identifier[self] . identifier[_unwrap_collection] ( identifier[response] )) | def get_eligible_users_for_extension(self, extension_id, options):
"""GetEligibleUsersForExtension.
[Preview API] Returns users that are currently eligible to assign the extension to. the list is filtered based on the value of ExtensionFilterOptions
:param str extension_id: The extension to check the eligibility of the users for.
:param str options: The options to filter the list.
:rtype: [str]
"""
route_values = {}
if extension_id is not None:
route_values['extensionId'] = self._serialize.url('extension_id', extension_id, 'str') # depends on [control=['if'], data=['extension_id']]
query_parameters = {}
if options is not None:
query_parameters['options'] = self._serialize.query('options', options, 'str') # depends on [control=['if'], data=['options']]
response = self._send(http_method='GET', location_id='5434f182-7f32-4135-8326-9340d887c08a', version='5.0-preview.1', route_values=route_values, query_parameters=query_parameters)
return self._deserialize('[str]', self._unwrap_collection(response)) |
def get_next_event(process, queue):
'''
This function polls the process until it returns a valid
item or returns PROCESS_DEAD_AND_QUEUE_EMPTY if it is in
a state where the process has terminated and the queue is empty
Warning: if the child process is in an infinite loop. This will
also infinitely loop.
'''
while True:
try:
return queue.get(block=True, timeout=TICK)
except multiprocessing.queues.Empty:
if not process.is_alive():
# There is a possibility that after the last queue.get the
# process created another event and then died. In that case
# we want to continue draining the queue.
try:
return queue.get(block=False)
except multiprocessing.queues.Empty:
# If the queue empty we know that there are no more events
# and that the process has died.
return PROCESS_DEAD_AND_QUEUE_EMPTY
check.failed('unreachable') | def function[get_next_event, parameter[process, queue]]:
constant[
This function polls the process until it returns a valid
item or returns PROCESS_DEAD_AND_QUEUE_EMPTY if it is in
a state where the process has terminated and the queue is empty
Warning: if the child process is in an infinite loop. This will
also infinitely loop.
]
while constant[True] begin[:]
<ast.Try object at 0x7da1b0394640>
call[name[check].failed, parameter[constant[unreachable]]] | keyword[def] identifier[get_next_event] ( identifier[process] , identifier[queue] ):
literal[string]
keyword[while] keyword[True] :
keyword[try] :
keyword[return] identifier[queue] . identifier[get] ( identifier[block] = keyword[True] , identifier[timeout] = identifier[TICK] )
keyword[except] identifier[multiprocessing] . identifier[queues] . identifier[Empty] :
keyword[if] keyword[not] identifier[process] . identifier[is_alive] ():
keyword[try] :
keyword[return] identifier[queue] . identifier[get] ( identifier[block] = keyword[False] )
keyword[except] identifier[multiprocessing] . identifier[queues] . identifier[Empty] :
keyword[return] identifier[PROCESS_DEAD_AND_QUEUE_EMPTY]
identifier[check] . identifier[failed] ( literal[string] ) | def get_next_event(process, queue):
"""
This function polls the process until it returns a valid
item or returns PROCESS_DEAD_AND_QUEUE_EMPTY if it is in
a state where the process has terminated and the queue is empty
Warning: if the child process is in an infinite loop. This will
also infinitely loop.
"""
while True:
try:
return queue.get(block=True, timeout=TICK) # depends on [control=['try'], data=[]]
except multiprocessing.queues.Empty:
if not process.is_alive():
# There is a possibility that after the last queue.get the
# process created another event and then died. In that case
# we want to continue draining the queue.
try:
return queue.get(block=False) # depends on [control=['try'], data=[]]
except multiprocessing.queues.Empty:
# If the queue empty we know that there are no more events
# and that the process has died.
return PROCESS_DEAD_AND_QUEUE_EMPTY # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]] # depends on [control=['while'], data=[]]
check.failed('unreachable') |
def mix_columns(state):
"""
Transformation in the Cipher that takes all of the columns of the State and
mixes their data (independently of one another) to produce new columns.
"""
state = state.reshape(4, 4, 8)
return fcat(
multiply(MA, state[0]),
multiply(MA, state[1]),
multiply(MA, state[2]),
multiply(MA, state[3]),
) | def function[mix_columns, parameter[state]]:
constant[
Transformation in the Cipher that takes all of the columns of the State and
mixes their data (independently of one another) to produce new columns.
]
variable[state] assign[=] call[name[state].reshape, parameter[constant[4], constant[4], constant[8]]]
return[call[name[fcat], parameter[call[name[multiply], parameter[name[MA], call[name[state]][constant[0]]]], call[name[multiply], parameter[name[MA], call[name[state]][constant[1]]]], call[name[multiply], parameter[name[MA], call[name[state]][constant[2]]]], call[name[multiply], parameter[name[MA], call[name[state]][constant[3]]]]]]] | keyword[def] identifier[mix_columns] ( identifier[state] ):
literal[string]
identifier[state] = identifier[state] . identifier[reshape] ( literal[int] , literal[int] , literal[int] )
keyword[return] identifier[fcat] (
identifier[multiply] ( identifier[MA] , identifier[state] [ literal[int] ]),
identifier[multiply] ( identifier[MA] , identifier[state] [ literal[int] ]),
identifier[multiply] ( identifier[MA] , identifier[state] [ literal[int] ]),
identifier[multiply] ( identifier[MA] , identifier[state] [ literal[int] ]),
) | def mix_columns(state):
"""
Transformation in the Cipher that takes all of the columns of the State and
mixes their data (independently of one another) to produce new columns.
"""
state = state.reshape(4, 4, 8)
return fcat(multiply(MA, state[0]), multiply(MA, state[1]), multiply(MA, state[2]), multiply(MA, state[3])) |
def promote(self, name):
"""Promote to a PartitionName by combining with a bundle Name."""
return PartitionName(**dict(list(name.dict.items()) + list(self.dict.items()))) | def function[promote, parameter[self, name]]:
constant[Promote to a PartitionName by combining with a bundle Name.]
return[call[name[PartitionName], parameter[]]] | keyword[def] identifier[promote] ( identifier[self] , identifier[name] ):
literal[string]
keyword[return] identifier[PartitionName] (** identifier[dict] ( identifier[list] ( identifier[name] . identifier[dict] . identifier[items] ())+ identifier[list] ( identifier[self] . identifier[dict] . identifier[items] ()))) | def promote(self, name):
"""Promote to a PartitionName by combining with a bundle Name."""
return PartitionName(**dict(list(name.dict.items()) + list(self.dict.items()))) |
def generate_py():
"""Generate the python output file"""
model = {}
vk = init()
format_vk(vk)
model_alias(vk, model)
model_typedefs(vk, model)
model_enums(vk, model)
model_macros(vk, model)
model_funcpointers(vk, model)
model_exceptions(vk, model)
model_constructors(vk, model)
model_functions(vk, model)
model_ext_functions(vk, model)
env = jinja2.Environment(
autoescape=False,
trim_blocks=True,
lstrip_blocks=True,
loader=jinja2.FileSystemLoader(HERE)
)
out_file = path.join(HERE, path.pardir, 'vulkan', '_vulkan.py')
with open(out_file, 'w') as out:
out.write(env.get_template('vulkan.template.py').render(model=model)) | def function[generate_py, parameter[]]:
constant[Generate the python output file]
variable[model] assign[=] dictionary[[], []]
variable[vk] assign[=] call[name[init], parameter[]]
call[name[format_vk], parameter[name[vk]]]
call[name[model_alias], parameter[name[vk], name[model]]]
call[name[model_typedefs], parameter[name[vk], name[model]]]
call[name[model_enums], parameter[name[vk], name[model]]]
call[name[model_macros], parameter[name[vk], name[model]]]
call[name[model_funcpointers], parameter[name[vk], name[model]]]
call[name[model_exceptions], parameter[name[vk], name[model]]]
call[name[model_constructors], parameter[name[vk], name[model]]]
call[name[model_functions], parameter[name[vk], name[model]]]
call[name[model_ext_functions], parameter[name[vk], name[model]]]
variable[env] assign[=] call[name[jinja2].Environment, parameter[]]
variable[out_file] assign[=] call[name[path].join, parameter[name[HERE], name[path].pardir, constant[vulkan], constant[_vulkan.py]]]
with call[name[open], parameter[name[out_file], constant[w]]] begin[:]
call[name[out].write, parameter[call[call[name[env].get_template, parameter[constant[vulkan.template.py]]].render, parameter[]]]] | keyword[def] identifier[generate_py] ():
literal[string]
identifier[model] ={}
identifier[vk] = identifier[init] ()
identifier[format_vk] ( identifier[vk] )
identifier[model_alias] ( identifier[vk] , identifier[model] )
identifier[model_typedefs] ( identifier[vk] , identifier[model] )
identifier[model_enums] ( identifier[vk] , identifier[model] )
identifier[model_macros] ( identifier[vk] , identifier[model] )
identifier[model_funcpointers] ( identifier[vk] , identifier[model] )
identifier[model_exceptions] ( identifier[vk] , identifier[model] )
identifier[model_constructors] ( identifier[vk] , identifier[model] )
identifier[model_functions] ( identifier[vk] , identifier[model] )
identifier[model_ext_functions] ( identifier[vk] , identifier[model] )
identifier[env] = identifier[jinja2] . identifier[Environment] (
identifier[autoescape] = keyword[False] ,
identifier[trim_blocks] = keyword[True] ,
identifier[lstrip_blocks] = keyword[True] ,
identifier[loader] = identifier[jinja2] . identifier[FileSystemLoader] ( identifier[HERE] )
)
identifier[out_file] = identifier[path] . identifier[join] ( identifier[HERE] , identifier[path] . identifier[pardir] , literal[string] , literal[string] )
keyword[with] identifier[open] ( identifier[out_file] , literal[string] ) keyword[as] identifier[out] :
identifier[out] . identifier[write] ( identifier[env] . identifier[get_template] ( literal[string] ). identifier[render] ( identifier[model] = identifier[model] )) | def generate_py():
"""Generate the python output file"""
model = {}
vk = init()
format_vk(vk)
model_alias(vk, model)
model_typedefs(vk, model)
model_enums(vk, model)
model_macros(vk, model)
model_funcpointers(vk, model)
model_exceptions(vk, model)
model_constructors(vk, model)
model_functions(vk, model)
model_ext_functions(vk, model)
env = jinja2.Environment(autoescape=False, trim_blocks=True, lstrip_blocks=True, loader=jinja2.FileSystemLoader(HERE))
out_file = path.join(HERE, path.pardir, 'vulkan', '_vulkan.py')
with open(out_file, 'w') as out:
out.write(env.get_template('vulkan.template.py').render(model=model)) # depends on [control=['with'], data=['out']] |
def gregorian_date(year, julian_day):
"""
Gregorian Date is defined as a year and a julian day (1-based
index into the days of the year).
>>> gregorian_date(2007, 15)
datetime.date(2007, 1, 15)
"""
result = datetime.date(year, 1, 1)
result += datetime.timedelta(days=julian_day - 1)
return result | def function[gregorian_date, parameter[year, julian_day]]:
constant[
Gregorian Date is defined as a year and a julian day (1-based
index into the days of the year).
>>> gregorian_date(2007, 15)
datetime.date(2007, 1, 15)
]
variable[result] assign[=] call[name[datetime].date, parameter[name[year], constant[1], constant[1]]]
<ast.AugAssign object at 0x7da1aff54f40>
return[name[result]] | keyword[def] identifier[gregorian_date] ( identifier[year] , identifier[julian_day] ):
literal[string]
identifier[result] = identifier[datetime] . identifier[date] ( identifier[year] , literal[int] , literal[int] )
identifier[result] += identifier[datetime] . identifier[timedelta] ( identifier[days] = identifier[julian_day] - literal[int] )
keyword[return] identifier[result] | def gregorian_date(year, julian_day):
"""
Gregorian Date is defined as a year and a julian day (1-based
index into the days of the year).
>>> gregorian_date(2007, 15)
datetime.date(2007, 1, 15)
"""
result = datetime.date(year, 1, 1)
result += datetime.timedelta(days=julian_day - 1)
return result |
def _write_handle(self, conn, handle, ack, value, timeout=1.0):
"""Write to a BLE device characteristic by its handle
Args:
conn (int): The connection handle for the device we should read from
handle (int): The characteristics handle we should read
ack (bool): Should this be an acknowledges write or unacknowledged
timeout (float): How long to wait before failing
value (bytearray): The value that we should write
"""
conn_handle = conn
char_handle = handle
def write_handle_acked(event):
if event.command_class == 4 and event.command == 1:
conn, _, char = unpack("<BHH", event.payload)
return conn_handle == conn and char_handle == char
data_len = len(value)
if data_len > 20:
return False, {'reason': 'Data too long to write'}
payload = struct.pack("<BHB%ds" % data_len, conn_handle, char_handle, data_len, value)
try:
if ack:
response = self._send_command(4, 5, payload)
else:
response = self._send_command(4, 6, payload)
except InternalTimeoutError:
return False, {'reason': 'Timeout waiting for response to command in _write_handle'}
_, result = unpack("<BH", response.payload)
if result != 0:
return False, {'reason': 'Error writing to handle', 'error_code': result}
if ack:
events = self._wait_process_events(timeout, lambda x: False, write_handle_acked)
if len(events) == 0:
return False, {'reason': 'Timeout waiting for acknowledge on write'}
_, result, _ = unpack("<BHH", events[0].payload)
if result != 0:
return False, {'reason': 'Error received during write to handle', 'error_code': result}
return True, None | def function[_write_handle, parameter[self, conn, handle, ack, value, timeout]]:
constant[Write to a BLE device characteristic by its handle
Args:
conn (int): The connection handle for the device we should read from
handle (int): The characteristics handle we should read
ack (bool): Should this be an acknowledges write or unacknowledged
timeout (float): How long to wait before failing
value (bytearray): The value that we should write
]
variable[conn_handle] assign[=] name[conn]
variable[char_handle] assign[=] name[handle]
def function[write_handle_acked, parameter[event]]:
if <ast.BoolOp object at 0x7da18f720520> begin[:]
<ast.Tuple object at 0x7da18f721cf0> assign[=] call[name[unpack], parameter[constant[<BHH], name[event].payload]]
return[<ast.BoolOp object at 0x7da18f720460>]
variable[data_len] assign[=] call[name[len], parameter[name[value]]]
if compare[name[data_len] greater[>] constant[20]] begin[:]
return[tuple[[<ast.Constant object at 0x7da18f7210c0>, <ast.Dict object at 0x7da18f721450>]]]
variable[payload] assign[=] call[name[struct].pack, parameter[binary_operation[constant[<BHB%ds] <ast.Mod object at 0x7da2590d6920> name[data_len]], name[conn_handle], name[char_handle], name[data_len], name[value]]]
<ast.Try object at 0x7da18f723520>
<ast.Tuple object at 0x7da18f720e80> assign[=] call[name[unpack], parameter[constant[<BH], name[response].payload]]
if compare[name[result] not_equal[!=] constant[0]] begin[:]
return[tuple[[<ast.Constant object at 0x7da1b0d31d80>, <ast.Dict object at 0x7da1b0d333a0>]]]
if name[ack] begin[:]
variable[events] assign[=] call[name[self]._wait_process_events, parameter[name[timeout], <ast.Lambda object at 0x7da1b0d30c10>, name[write_handle_acked]]]
if compare[call[name[len], parameter[name[events]]] equal[==] constant[0]] begin[:]
return[tuple[[<ast.Constant object at 0x7da1b0d32e00>, <ast.Dict object at 0x7da1b0d30460>]]]
<ast.Tuple object at 0x7da1b0d33100> assign[=] call[name[unpack], parameter[constant[<BHH], call[name[events]][constant[0]].payload]]
if compare[name[result] not_equal[!=] constant[0]] begin[:]
return[tuple[[<ast.Constant object at 0x7da1b0d31660>, <ast.Dict object at 0x7da1b0d326b0>]]]
return[tuple[[<ast.Constant object at 0x7da1b0d30f70>, <ast.Constant object at 0x7da1b0d327d0>]]] | keyword[def] identifier[_write_handle] ( identifier[self] , identifier[conn] , identifier[handle] , identifier[ack] , identifier[value] , identifier[timeout] = literal[int] ):
literal[string]
identifier[conn_handle] = identifier[conn]
identifier[char_handle] = identifier[handle]
keyword[def] identifier[write_handle_acked] ( identifier[event] ):
keyword[if] identifier[event] . identifier[command_class] == literal[int] keyword[and] identifier[event] . identifier[command] == literal[int] :
identifier[conn] , identifier[_] , identifier[char] = identifier[unpack] ( literal[string] , identifier[event] . identifier[payload] )
keyword[return] identifier[conn_handle] == identifier[conn] keyword[and] identifier[char_handle] == identifier[char]
identifier[data_len] = identifier[len] ( identifier[value] )
keyword[if] identifier[data_len] > literal[int] :
keyword[return] keyword[False] ,{ literal[string] : literal[string] }
identifier[payload] = identifier[struct] . identifier[pack] ( literal[string] % identifier[data_len] , identifier[conn_handle] , identifier[char_handle] , identifier[data_len] , identifier[value] )
keyword[try] :
keyword[if] identifier[ack] :
identifier[response] = identifier[self] . identifier[_send_command] ( literal[int] , literal[int] , identifier[payload] )
keyword[else] :
identifier[response] = identifier[self] . identifier[_send_command] ( literal[int] , literal[int] , identifier[payload] )
keyword[except] identifier[InternalTimeoutError] :
keyword[return] keyword[False] ,{ literal[string] : literal[string] }
identifier[_] , identifier[result] = identifier[unpack] ( literal[string] , identifier[response] . identifier[payload] )
keyword[if] identifier[result] != literal[int] :
keyword[return] keyword[False] ,{ literal[string] : literal[string] , literal[string] : identifier[result] }
keyword[if] identifier[ack] :
identifier[events] = identifier[self] . identifier[_wait_process_events] ( identifier[timeout] , keyword[lambda] identifier[x] : keyword[False] , identifier[write_handle_acked] )
keyword[if] identifier[len] ( identifier[events] )== literal[int] :
keyword[return] keyword[False] ,{ literal[string] : literal[string] }
identifier[_] , identifier[result] , identifier[_] = identifier[unpack] ( literal[string] , identifier[events] [ literal[int] ]. identifier[payload] )
keyword[if] identifier[result] != literal[int] :
keyword[return] keyword[False] ,{ literal[string] : literal[string] , literal[string] : identifier[result] }
keyword[return] keyword[True] , keyword[None] | def _write_handle(self, conn, handle, ack, value, timeout=1.0):
"""Write to a BLE device characteristic by its handle
Args:
conn (int): The connection handle for the device we should read from
handle (int): The characteristics handle we should read
ack (bool): Should this be an acknowledges write or unacknowledged
timeout (float): How long to wait before failing
value (bytearray): The value that we should write
"""
conn_handle = conn
char_handle = handle
def write_handle_acked(event):
if event.command_class == 4 and event.command == 1:
(conn, _, char) = unpack('<BHH', event.payload)
return conn_handle == conn and char_handle == char # depends on [control=['if'], data=[]]
data_len = len(value)
if data_len > 20:
return (False, {'reason': 'Data too long to write'}) # depends on [control=['if'], data=[]]
payload = struct.pack('<BHB%ds' % data_len, conn_handle, char_handle, data_len, value)
try:
if ack:
response = self._send_command(4, 5, payload) # depends on [control=['if'], data=[]]
else:
response = self._send_command(4, 6, payload) # depends on [control=['try'], data=[]]
except InternalTimeoutError:
return (False, {'reason': 'Timeout waiting for response to command in _write_handle'}) # depends on [control=['except'], data=[]]
(_, result) = unpack('<BH', response.payload)
if result != 0:
return (False, {'reason': 'Error writing to handle', 'error_code': result}) # depends on [control=['if'], data=['result']]
if ack:
events = self._wait_process_events(timeout, lambda x: False, write_handle_acked)
if len(events) == 0:
return (False, {'reason': 'Timeout waiting for acknowledge on write'}) # depends on [control=['if'], data=[]]
(_, result, _) = unpack('<BHH', events[0].payload)
if result != 0:
return (False, {'reason': 'Error received during write to handle', 'error_code': result}) # depends on [control=['if'], data=['result']] # depends on [control=['if'], data=[]]
return (True, None) |
def summarize_mutation(mutation_name, event, inputs, outputs, isAsync=False):
"""
This function provides a standard representation of mutations to be
used when services announce themselves
"""
return dict(
name=mutation_name,
event=event,
isAsync=isAsync,
inputs=inputs,
outputs=outputs,
) | def function[summarize_mutation, parameter[mutation_name, event, inputs, outputs, isAsync]]:
constant[
This function provides a standard representation of mutations to be
used when services announce themselves
]
return[call[name[dict], parameter[]]] | keyword[def] identifier[summarize_mutation] ( identifier[mutation_name] , identifier[event] , identifier[inputs] , identifier[outputs] , identifier[isAsync] = keyword[False] ):
literal[string]
keyword[return] identifier[dict] (
identifier[name] = identifier[mutation_name] ,
identifier[event] = identifier[event] ,
identifier[isAsync] = identifier[isAsync] ,
identifier[inputs] = identifier[inputs] ,
identifier[outputs] = identifier[outputs] ,
) | def summarize_mutation(mutation_name, event, inputs, outputs, isAsync=False):
"""
This function provides a standard representation of mutations to be
used when services announce themselves
"""
return dict(name=mutation_name, event=event, isAsync=isAsync, inputs=inputs, outputs=outputs) |
def split(self, sep=None, maxsplit=None, regex=False):
"""Split based on seperator, optionally using a regex
Capture groups are ignored in regex, the whole pattern is matched
and used to split the original FmtStr."""
if maxsplit is not None:
raise NotImplementedError('no maxsplit yet')
s = self.s
if sep is None:
sep = r'\s+'
elif not regex:
sep = re.escape(sep)
matches = list(re.finditer(sep, s))
return [self[start:end] for start, end in zip(
[0] + [m.end() for m in matches],
[m.start() for m in matches] + [len(s)])] | def function[split, parameter[self, sep, maxsplit, regex]]:
constant[Split based on seperator, optionally using a regex
Capture groups are ignored in regex, the whole pattern is matched
and used to split the original FmtStr.]
if compare[name[maxsplit] is_not constant[None]] begin[:]
<ast.Raise object at 0x7da20e74a9e0>
variable[s] assign[=] name[self].s
if compare[name[sep] is constant[None]] begin[:]
variable[sep] assign[=] constant[\s+]
variable[matches] assign[=] call[name[list], parameter[call[name[re].finditer, parameter[name[sep], name[s]]]]]
return[<ast.ListComp object at 0x7da20e74a1a0>] | keyword[def] identifier[split] ( identifier[self] , identifier[sep] = keyword[None] , identifier[maxsplit] = keyword[None] , identifier[regex] = keyword[False] ):
literal[string]
keyword[if] identifier[maxsplit] keyword[is] keyword[not] keyword[None] :
keyword[raise] identifier[NotImplementedError] ( literal[string] )
identifier[s] = identifier[self] . identifier[s]
keyword[if] identifier[sep] keyword[is] keyword[None] :
identifier[sep] = literal[string]
keyword[elif] keyword[not] identifier[regex] :
identifier[sep] = identifier[re] . identifier[escape] ( identifier[sep] )
identifier[matches] = identifier[list] ( identifier[re] . identifier[finditer] ( identifier[sep] , identifier[s] ))
keyword[return] [ identifier[self] [ identifier[start] : identifier[end] ] keyword[for] identifier[start] , identifier[end] keyword[in] identifier[zip] (
[ literal[int] ]+[ identifier[m] . identifier[end] () keyword[for] identifier[m] keyword[in] identifier[matches] ],
[ identifier[m] . identifier[start] () keyword[for] identifier[m] keyword[in] identifier[matches] ]+[ identifier[len] ( identifier[s] )])] | def split(self, sep=None, maxsplit=None, regex=False):
"""Split based on seperator, optionally using a regex
Capture groups are ignored in regex, the whole pattern is matched
and used to split the original FmtStr."""
if maxsplit is not None:
raise NotImplementedError('no maxsplit yet') # depends on [control=['if'], data=[]]
s = self.s
if sep is None:
sep = '\\s+' # depends on [control=['if'], data=['sep']]
elif not regex:
sep = re.escape(sep) # depends on [control=['if'], data=[]]
matches = list(re.finditer(sep, s))
return [self[start:end] for (start, end) in zip([0] + [m.end() for m in matches], [m.start() for m in matches] + [len(s)])] |
def link_type(arg_type, arg_name=None, include_bt:bool=True):
"Create link to documentation."
arg_name = arg_name or fn_name(arg_type)
if include_bt: arg_name = code_esc(arg_name)
if belongs_to_module(arg_type, 'torch') and ('Tensor' not in arg_name): return f'[{arg_name}]({get_pytorch_link(arg_type)})'
if is_fastai_class(arg_type): return f'[{arg_name}]({get_fn_link(arg_type)})'
return arg_name | def function[link_type, parameter[arg_type, arg_name, include_bt]]:
constant[Create link to documentation.]
variable[arg_name] assign[=] <ast.BoolOp object at 0x7da1b1e9a530>
if name[include_bt] begin[:]
variable[arg_name] assign[=] call[name[code_esc], parameter[name[arg_name]]]
if <ast.BoolOp object at 0x7da1b1e9aad0> begin[:]
return[<ast.JoinedStr object at 0x7da1b1e99ab0>]
if call[name[is_fastai_class], parameter[name[arg_type]]] begin[:]
return[<ast.JoinedStr object at 0x7da1b1e98250>]
return[name[arg_name]] | keyword[def] identifier[link_type] ( identifier[arg_type] , identifier[arg_name] = keyword[None] , identifier[include_bt] : identifier[bool] = keyword[True] ):
literal[string]
identifier[arg_name] = identifier[arg_name] keyword[or] identifier[fn_name] ( identifier[arg_type] )
keyword[if] identifier[include_bt] : identifier[arg_name] = identifier[code_esc] ( identifier[arg_name] )
keyword[if] identifier[belongs_to_module] ( identifier[arg_type] , literal[string] ) keyword[and] ( literal[string] keyword[not] keyword[in] identifier[arg_name] ): keyword[return] literal[string]
keyword[if] identifier[is_fastai_class] ( identifier[arg_type] ): keyword[return] literal[string]
keyword[return] identifier[arg_name] | def link_type(arg_type, arg_name=None, include_bt: bool=True):
"""Create link to documentation."""
arg_name = arg_name or fn_name(arg_type)
if include_bt:
arg_name = code_esc(arg_name) # depends on [control=['if'], data=[]]
if belongs_to_module(arg_type, 'torch') and 'Tensor' not in arg_name:
return f'[{arg_name}]({get_pytorch_link(arg_type)})' # depends on [control=['if'], data=[]]
if is_fastai_class(arg_type):
return f'[{arg_name}]({get_fn_link(arg_type)})' # depends on [control=['if'], data=[]]
return arg_name |
def transaction_write(self, items, client_request_token):
"""
Wraps :func:`boto3.DynamoDB.Client.db.transact_write_items`.
:param items: Unpacked into "TransactionItems" for :func:`boto3.DynamoDB.Client.transact_write_items`
:param client_request_token: Idempotency token valid for 10 minutes from first use.
Unpacked into "ClientRequestToken"
:raises bloop.exceptions.TransactionCanceled: if the transaction was canceled.
"""
try:
self.dynamodb_client.transact_write_items(
TransactItems=items,
ClientRequestToken=client_request_token
)
except botocore.exceptions.ClientError as error:
if error.response["Error"]["Code"] == "TransactionCanceledException":
raise TransactionCanceled from error
raise BloopException("Unexpected error during transaction write.") from error | def function[transaction_write, parameter[self, items, client_request_token]]:
constant[
Wraps :func:`boto3.DynamoDB.Client.db.transact_write_items`.
:param items: Unpacked into "TransactionItems" for :func:`boto3.DynamoDB.Client.transact_write_items`
:param client_request_token: Idempotency token valid for 10 minutes from first use.
Unpacked into "ClientRequestToken"
:raises bloop.exceptions.TransactionCanceled: if the transaction was canceled.
]
<ast.Try object at 0x7da1b0f2f250> | keyword[def] identifier[transaction_write] ( identifier[self] , identifier[items] , identifier[client_request_token] ):
literal[string]
keyword[try] :
identifier[self] . identifier[dynamodb_client] . identifier[transact_write_items] (
identifier[TransactItems] = identifier[items] ,
identifier[ClientRequestToken] = identifier[client_request_token]
)
keyword[except] identifier[botocore] . identifier[exceptions] . identifier[ClientError] keyword[as] identifier[error] :
keyword[if] identifier[error] . identifier[response] [ literal[string] ][ literal[string] ]== literal[string] :
keyword[raise] identifier[TransactionCanceled] keyword[from] identifier[error]
keyword[raise] identifier[BloopException] ( literal[string] ) keyword[from] identifier[error] | def transaction_write(self, items, client_request_token):
"""
Wraps :func:`boto3.DynamoDB.Client.db.transact_write_items`.
:param items: Unpacked into "TransactionItems" for :func:`boto3.DynamoDB.Client.transact_write_items`
:param client_request_token: Idempotency token valid for 10 minutes from first use.
Unpacked into "ClientRequestToken"
:raises bloop.exceptions.TransactionCanceled: if the transaction was canceled.
"""
try:
self.dynamodb_client.transact_write_items(TransactItems=items, ClientRequestToken=client_request_token) # depends on [control=['try'], data=[]]
except botocore.exceptions.ClientError as error:
if error.response['Error']['Code'] == 'TransactionCanceledException':
raise TransactionCanceled from error # depends on [control=['if'], data=[]]
raise BloopException('Unexpected error during transaction write.') from error # depends on [control=['except'], data=['error']] |
def tag(self, alt='', use_size=None, **attrs):
"""
Return a standard XHTML ``<img ... />`` tag for this field.
:param alt: The ``alt=""`` text for the tag. Defaults to ``''``.
:param use_size: Whether to get the size of the thumbnail image for use
in the tag attributes. If ``None`` (default), the size will only
be used it if won't result in a remote file retrieval.
All other keyword parameters are added as (properly escaped) extra
attributes to the `img` tag.
"""
if use_size is None:
if getattr(self, '_dimensions_cache', None):
use_size = True
else:
try:
self.storage.path(self.name)
use_size = True
except NotImplementedError:
use_size = False
attrs['alt'] = alt
attrs['src'] = self.url
if use_size:
attrs.update(dict(width=self.width, height=self.height))
attrs = ' '.join(['%s="%s"' % (key, escape(value))
for key, value in sorted(attrs.items())])
return mark_safe('<img %s />' % attrs) | def function[tag, parameter[self, alt, use_size]]:
constant[
Return a standard XHTML ``<img ... />`` tag for this field.
:param alt: The ``alt=""`` text for the tag. Defaults to ``''``.
:param use_size: Whether to get the size of the thumbnail image for use
in the tag attributes. If ``None`` (default), the size will only
be used it if won't result in a remote file retrieval.
All other keyword parameters are added as (properly escaped) extra
attributes to the `img` tag.
]
if compare[name[use_size] is constant[None]] begin[:]
if call[name[getattr], parameter[name[self], constant[_dimensions_cache], constant[None]]] begin[:]
variable[use_size] assign[=] constant[True]
call[name[attrs]][constant[alt]] assign[=] name[alt]
call[name[attrs]][constant[src]] assign[=] name[self].url
if name[use_size] begin[:]
call[name[attrs].update, parameter[call[name[dict], parameter[]]]]
variable[attrs] assign[=] call[constant[ ].join, parameter[<ast.ListComp object at 0x7da20cabf2e0>]]
return[call[name[mark_safe], parameter[binary_operation[constant[<img %s />] <ast.Mod object at 0x7da2590d6920> name[attrs]]]]] | keyword[def] identifier[tag] ( identifier[self] , identifier[alt] = literal[string] , identifier[use_size] = keyword[None] ,** identifier[attrs] ):
literal[string]
keyword[if] identifier[use_size] keyword[is] keyword[None] :
keyword[if] identifier[getattr] ( identifier[self] , literal[string] , keyword[None] ):
identifier[use_size] = keyword[True]
keyword[else] :
keyword[try] :
identifier[self] . identifier[storage] . identifier[path] ( identifier[self] . identifier[name] )
identifier[use_size] = keyword[True]
keyword[except] identifier[NotImplementedError] :
identifier[use_size] = keyword[False]
identifier[attrs] [ literal[string] ]= identifier[alt]
identifier[attrs] [ literal[string] ]= identifier[self] . identifier[url]
keyword[if] identifier[use_size] :
identifier[attrs] . identifier[update] ( identifier[dict] ( identifier[width] = identifier[self] . identifier[width] , identifier[height] = identifier[self] . identifier[height] ))
identifier[attrs] = literal[string] . identifier[join] ([ literal[string] %( identifier[key] , identifier[escape] ( identifier[value] ))
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[sorted] ( identifier[attrs] . identifier[items] ())])
keyword[return] identifier[mark_safe] ( literal[string] % identifier[attrs] ) | def tag(self, alt='', use_size=None, **attrs):
"""
Return a standard XHTML ``<img ... />`` tag for this field.
:param alt: The ``alt=""`` text for the tag. Defaults to ``''``.
:param use_size: Whether to get the size of the thumbnail image for use
in the tag attributes. If ``None`` (default), the size will only
be used it if won't result in a remote file retrieval.
All other keyword parameters are added as (properly escaped) extra
attributes to the `img` tag.
"""
if use_size is None:
if getattr(self, '_dimensions_cache', None):
use_size = True # depends on [control=['if'], data=[]]
else:
try:
self.storage.path(self.name)
use_size = True # depends on [control=['try'], data=[]]
except NotImplementedError:
use_size = False # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['use_size']]
attrs['alt'] = alt
attrs['src'] = self.url
if use_size:
attrs.update(dict(width=self.width, height=self.height)) # depends on [control=['if'], data=[]]
attrs = ' '.join(['%s="%s"' % (key, escape(value)) for (key, value) in sorted(attrs.items())])
return mark_safe('<img %s />' % attrs) |
def _get_keyid(keytype, scheme, key_value, hash_algorithm = 'sha256'):
"""Return the keyid of 'key_value'."""
# 'keyid' will be generated from an object conformant to KEY_SCHEMA,
# which is the format Metadata files (e.g., root.json) store keys.
# 'format_keyval_to_metadata()' returns the object needed by _get_keyid().
key_meta = format_keyval_to_metadata(keytype, scheme, key_value, private=False)
# Convert the key to JSON Canonical format, suitable for adding
# to digest objects.
key_update_data = securesystemslib.formats.encode_canonical(key_meta)
# Create a digest object and call update(), using the JSON
# canonical format of 'rskey_meta' as the update data.
digest_object = securesystemslib.hash.digest(hash_algorithm)
digest_object.update(key_update_data.encode('utf-8'))
# 'keyid' becomes the hexadecimal representation of the hash.
keyid = digest_object.hexdigest()
return keyid | def function[_get_keyid, parameter[keytype, scheme, key_value, hash_algorithm]]:
constant[Return the keyid of 'key_value'.]
variable[key_meta] assign[=] call[name[format_keyval_to_metadata], parameter[name[keytype], name[scheme], name[key_value]]]
variable[key_update_data] assign[=] call[name[securesystemslib].formats.encode_canonical, parameter[name[key_meta]]]
variable[digest_object] assign[=] call[name[securesystemslib].hash.digest, parameter[name[hash_algorithm]]]
call[name[digest_object].update, parameter[call[name[key_update_data].encode, parameter[constant[utf-8]]]]]
variable[keyid] assign[=] call[name[digest_object].hexdigest, parameter[]]
return[name[keyid]] | keyword[def] identifier[_get_keyid] ( identifier[keytype] , identifier[scheme] , identifier[key_value] , identifier[hash_algorithm] = literal[string] ):
literal[string]
identifier[key_meta] = identifier[format_keyval_to_metadata] ( identifier[keytype] , identifier[scheme] , identifier[key_value] , identifier[private] = keyword[False] )
identifier[key_update_data] = identifier[securesystemslib] . identifier[formats] . identifier[encode_canonical] ( identifier[key_meta] )
identifier[digest_object] = identifier[securesystemslib] . identifier[hash] . identifier[digest] ( identifier[hash_algorithm] )
identifier[digest_object] . identifier[update] ( identifier[key_update_data] . identifier[encode] ( literal[string] ))
identifier[keyid] = identifier[digest_object] . identifier[hexdigest] ()
keyword[return] identifier[keyid] | def _get_keyid(keytype, scheme, key_value, hash_algorithm='sha256'):
"""Return the keyid of 'key_value'."""
# 'keyid' will be generated from an object conformant to KEY_SCHEMA,
# which is the format Metadata files (e.g., root.json) store keys.
# 'format_keyval_to_metadata()' returns the object needed by _get_keyid().
key_meta = format_keyval_to_metadata(keytype, scheme, key_value, private=False)
# Convert the key to JSON Canonical format, suitable for adding
# to digest objects.
key_update_data = securesystemslib.formats.encode_canonical(key_meta)
# Create a digest object and call update(), using the JSON
# canonical format of 'rskey_meta' as the update data.
digest_object = securesystemslib.hash.digest(hash_algorithm)
digest_object.update(key_update_data.encode('utf-8'))
# 'keyid' becomes the hexadecimal representation of the hash.
keyid = digest_object.hexdigest()
return keyid |
def set_composite_filter(filter_proto, op, *filters):
"""Set composite filter contraint in the given datastore.Filter proto message.
Args:
filter_proto: datastore.Filter proto message
op: datastore.CompositeFilter.Operation
filters: vararg list of datastore.Filter
Returns:
the same datastore.Filter.
Usage:
>>> set_composite_filter(filter_proto, datastore.CompositeFilter.AND,
... set_property_filter(datastore.Filter(), ...),
... set_property_filter(datastore.Filter(), ...)) # WHERE ... AND ...
"""
filter_proto.Clear()
cf = filter_proto.composite_filter
cf.op = op
for f in filters:
cf.filters.add().CopyFrom(f)
return filter_proto | def function[set_composite_filter, parameter[filter_proto, op]]:
constant[Set composite filter contraint in the given datastore.Filter proto message.
Args:
filter_proto: datastore.Filter proto message
op: datastore.CompositeFilter.Operation
filters: vararg list of datastore.Filter
Returns:
the same datastore.Filter.
Usage:
>>> set_composite_filter(filter_proto, datastore.CompositeFilter.AND,
... set_property_filter(datastore.Filter(), ...),
... set_property_filter(datastore.Filter(), ...)) # WHERE ... AND ...
]
call[name[filter_proto].Clear, parameter[]]
variable[cf] assign[=] name[filter_proto].composite_filter
name[cf].op assign[=] name[op]
for taget[name[f]] in starred[name[filters]] begin[:]
call[call[name[cf].filters.add, parameter[]].CopyFrom, parameter[name[f]]]
return[name[filter_proto]] | keyword[def] identifier[set_composite_filter] ( identifier[filter_proto] , identifier[op] ,* identifier[filters] ):
literal[string]
identifier[filter_proto] . identifier[Clear] ()
identifier[cf] = identifier[filter_proto] . identifier[composite_filter]
identifier[cf] . identifier[op] = identifier[op]
keyword[for] identifier[f] keyword[in] identifier[filters] :
identifier[cf] . identifier[filters] . identifier[add] (). identifier[CopyFrom] ( identifier[f] )
keyword[return] identifier[filter_proto] | def set_composite_filter(filter_proto, op, *filters):
"""Set composite filter contraint in the given datastore.Filter proto message.
Args:
filter_proto: datastore.Filter proto message
op: datastore.CompositeFilter.Operation
filters: vararg list of datastore.Filter
Returns:
the same datastore.Filter.
Usage:
>>> set_composite_filter(filter_proto, datastore.CompositeFilter.AND,
... set_property_filter(datastore.Filter(), ...),
... set_property_filter(datastore.Filter(), ...)) # WHERE ... AND ...
"""
filter_proto.Clear()
cf = filter_proto.composite_filter
cf.op = op
for f in filters:
cf.filters.add().CopyFrom(f) # depends on [control=['for'], data=['f']]
return filter_proto |
def calibrate(self):
'''
Calibration involves probing for top plate to get the plate height
'''
if self._driver and self._driver.is_connected():
self._driver.probe_plate()
# return if successful or not?
self._engaged = False | def function[calibrate, parameter[self]]:
constant[
Calibration involves probing for top plate to get the plate height
]
if <ast.BoolOp object at 0x7da20c7cad10> begin[:]
call[name[self]._driver.probe_plate, parameter[]]
name[self]._engaged assign[=] constant[False] | keyword[def] identifier[calibrate] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_driver] keyword[and] identifier[self] . identifier[_driver] . identifier[is_connected] ():
identifier[self] . identifier[_driver] . identifier[probe_plate] ()
identifier[self] . identifier[_engaged] = keyword[False] | def calibrate(self):
"""
Calibration involves probing for top plate to get the plate height
"""
if self._driver and self._driver.is_connected():
self._driver.probe_plate()
# return if successful or not?
self._engaged = False # depends on [control=['if'], data=[]] |
def setupViewletByName(self, name):
""" Constructs a viewlet instance by its name.
Viewlet update() and render() method are not called.
@return: Viewlet instance of None if viewlet with name does not exist
"""
context = aq_inner(self.context)
request = self.request
# Perform viewlet regisration look-up
# from adapters registry
reg = self.getViewletByName(name)
if reg is None:
return None
# factory method is responsible for creating the viewlet instance
factory = reg.factory
# Create viewlet and put it to the acquisition chain
# Viewlet need initialization parameters: context, request, view
try:
viewlet = factory(context, request, self, None).__of__(context)
except TypeError:
# Bad constructor call parameters
raise RuntimeError(
"Unable to initialize viewlet {}. "
"Factory method {} call failed."
.format(name, str(factory)))
return viewlet | def function[setupViewletByName, parameter[self, name]]:
constant[ Constructs a viewlet instance by its name.
Viewlet update() and render() method are not called.
@return: Viewlet instance of None if viewlet with name does not exist
]
variable[context] assign[=] call[name[aq_inner], parameter[name[self].context]]
variable[request] assign[=] name[self].request
variable[reg] assign[=] call[name[self].getViewletByName, parameter[name[name]]]
if compare[name[reg] is constant[None]] begin[:]
return[constant[None]]
variable[factory] assign[=] name[reg].factory
<ast.Try object at 0x7da1b07a1e10>
return[name[viewlet]] | keyword[def] identifier[setupViewletByName] ( identifier[self] , identifier[name] ):
literal[string]
identifier[context] = identifier[aq_inner] ( identifier[self] . identifier[context] )
identifier[request] = identifier[self] . identifier[request]
identifier[reg] = identifier[self] . identifier[getViewletByName] ( identifier[name] )
keyword[if] identifier[reg] keyword[is] keyword[None] :
keyword[return] keyword[None]
identifier[factory] = identifier[reg] . identifier[factory]
keyword[try] :
identifier[viewlet] = identifier[factory] ( identifier[context] , identifier[request] , identifier[self] , keyword[None] ). identifier[__of__] ( identifier[context] )
keyword[except] identifier[TypeError] :
keyword[raise] identifier[RuntimeError] (
literal[string]
literal[string]
. identifier[format] ( identifier[name] , identifier[str] ( identifier[factory] )))
keyword[return] identifier[viewlet] | def setupViewletByName(self, name):
""" Constructs a viewlet instance by its name.
Viewlet update() and render() method are not called.
@return: Viewlet instance of None if viewlet with name does not exist
"""
context = aq_inner(self.context)
request = self.request
# Perform viewlet regisration look-up
# from adapters registry
reg = self.getViewletByName(name)
if reg is None:
return None # depends on [control=['if'], data=[]]
# factory method is responsible for creating the viewlet instance
factory = reg.factory
# Create viewlet and put it to the acquisition chain
# Viewlet need initialization parameters: context, request, view
try:
viewlet = factory(context, request, self, None).__of__(context) # depends on [control=['try'], data=[]]
except TypeError:
# Bad constructor call parameters
raise RuntimeError('Unable to initialize viewlet {}. Factory method {} call failed.'.format(name, str(factory))) # depends on [control=['except'], data=[]]
return viewlet |
def open_listing_page(trailing_part_of_url):
"""
Opens a BBC radio tracklisting page based on trailing part of url.
Returns a lxml ElementTree derived from that page.
trailing_part_of_url: a string, like the pid or e.g. pid/segments.inc
"""
base_url = 'http://www.bbc.co.uk/programmes/'
print("Opening web page: " + base_url + trailing_part_of_url)
try:
html = requests.get(base_url + trailing_part_of_url).text
except (IOError, NameError):
print("Error opening web page.")
print("Check network connection and/or programme id.")
sys.exit(1)
try:
return lxml.html.fromstring(html)
except lxml.etree.ParserError:
print("Error trying to parse web page.")
print("Maybe there's no programme listing?")
sys.exit(1) | def function[open_listing_page, parameter[trailing_part_of_url]]:
constant[
Opens a BBC radio tracklisting page based on trailing part of url.
Returns a lxml ElementTree derived from that page.
trailing_part_of_url: a string, like the pid or e.g. pid/segments.inc
]
variable[base_url] assign[=] constant[http://www.bbc.co.uk/programmes/]
call[name[print], parameter[binary_operation[binary_operation[constant[Opening web page: ] + name[base_url]] + name[trailing_part_of_url]]]]
<ast.Try object at 0x7da20e960c10>
<ast.Try object at 0x7da20e961600> | keyword[def] identifier[open_listing_page] ( identifier[trailing_part_of_url] ):
literal[string]
identifier[base_url] = literal[string]
identifier[print] ( literal[string] + identifier[base_url] + identifier[trailing_part_of_url] )
keyword[try] :
identifier[html] = identifier[requests] . identifier[get] ( identifier[base_url] + identifier[trailing_part_of_url] ). identifier[text]
keyword[except] ( identifier[IOError] , identifier[NameError] ):
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[sys] . identifier[exit] ( literal[int] )
keyword[try] :
keyword[return] identifier[lxml] . identifier[html] . identifier[fromstring] ( identifier[html] )
keyword[except] identifier[lxml] . identifier[etree] . identifier[ParserError] :
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[sys] . identifier[exit] ( literal[int] ) | def open_listing_page(trailing_part_of_url):
"""
Opens a BBC radio tracklisting page based on trailing part of url.
Returns a lxml ElementTree derived from that page.
trailing_part_of_url: a string, like the pid or e.g. pid/segments.inc
"""
base_url = 'http://www.bbc.co.uk/programmes/'
print('Opening web page: ' + base_url + trailing_part_of_url)
try:
html = requests.get(base_url + trailing_part_of_url).text # depends on [control=['try'], data=[]]
except (IOError, NameError):
print('Error opening web page.')
print('Check network connection and/or programme id.')
sys.exit(1) # depends on [control=['except'], data=[]]
try:
return lxml.html.fromstring(html) # depends on [control=['try'], data=[]]
except lxml.etree.ParserError:
print('Error trying to parse web page.')
print("Maybe there's no programme listing?")
sys.exit(1) # depends on [control=['except'], data=[]] |
def refresh_content(self, order=None, name=None):
"""
Re-download all subscriptions and reset the page index
"""
# reddit.get_my_subreddits() does not support sorting by order
if order:
self.term.flash()
return
with self.term.loader():
self.content = SubscriptionContent.from_user(
self.reddit, self.term.loader, self.content_type)
if not self.term.loader.exception:
self.nav = Navigator(self.content.get) | def function[refresh_content, parameter[self, order, name]]:
constant[
Re-download all subscriptions and reset the page index
]
if name[order] begin[:]
call[name[self].term.flash, parameter[]]
return[None]
with call[name[self].term.loader, parameter[]] begin[:]
name[self].content assign[=] call[name[SubscriptionContent].from_user, parameter[name[self].reddit, name[self].term.loader, name[self].content_type]]
if <ast.UnaryOp object at 0x7da18fe917b0> begin[:]
name[self].nav assign[=] call[name[Navigator], parameter[name[self].content.get]] | keyword[def] identifier[refresh_content] ( identifier[self] , identifier[order] = keyword[None] , identifier[name] = keyword[None] ):
literal[string]
keyword[if] identifier[order] :
identifier[self] . identifier[term] . identifier[flash] ()
keyword[return]
keyword[with] identifier[self] . identifier[term] . identifier[loader] ():
identifier[self] . identifier[content] = identifier[SubscriptionContent] . identifier[from_user] (
identifier[self] . identifier[reddit] , identifier[self] . identifier[term] . identifier[loader] , identifier[self] . identifier[content_type] )
keyword[if] keyword[not] identifier[self] . identifier[term] . identifier[loader] . identifier[exception] :
identifier[self] . identifier[nav] = identifier[Navigator] ( identifier[self] . identifier[content] . identifier[get] ) | def refresh_content(self, order=None, name=None):
"""
Re-download all subscriptions and reset the page index
"""
# reddit.get_my_subreddits() does not support sorting by order
if order:
self.term.flash()
return # depends on [control=['if'], data=[]]
with self.term.loader():
self.content = SubscriptionContent.from_user(self.reddit, self.term.loader, self.content_type) # depends on [control=['with'], data=[]]
if not self.term.loader.exception:
self.nav = Navigator(self.content.get) # depends on [control=['if'], data=[]] |
def split_unit(self, unit_id, indices):
'''This function splits a root from the curation tree according to the given unit_id and indices. It creates two new unit_ids
and roots that have the split root as a child. This function splits the spike train of the root by the given indices.
Parameters
----------
unit_id: int
The unit id to be split
indices: list
The indices of the unit spike train at which the spike train will be split.
'''
root_ids = []
for i in range(len(self._roots)):
root_id = self._roots[i].unit_id
root_ids.append(root_id)
if(unit_id in root_ids):
indices_1 = np.sort(np.asarray(list(set(indices))))
root_index = root_ids.index(unit_id)
new_child = self._roots[root_index]
original_spike_train = self._roots[root_index].get_spike_train()
try:
spike_train_1 = original_spike_train[indices_1]
except IndexError:
print(str(indices) + " out of bounds for the spike train of " + str(unit_id))
indices_2 = list(set(range(len(original_spike_train))) - set(indices_1))
spike_train_2 = original_spike_train[indices_2]
del original_spike_train
new_root_1_id = max(self._all_ids)+1
self._all_ids.append(new_root_1_id)
new_root_1 = Unit(new_root_1_id)
new_root_1.add_child(new_child)
new_root_1.set_spike_train(spike_train_1)
new_root_2_id = max(self._all_ids)+1
self._all_ids.append(new_root_2_id)
new_root_2 = Unit(new_root_2_id)
new_root_2.add_child(new_child)
new_root_2.set_spike_train(spike_train_2)
self._roots.append(new_root_1)
self._roots.append(new_root_2)
for feature_name in self.get_unit_spike_feature_names(unit_id):
full_features = self.get_unit_spike_features(unit_id, feature_name)
self.set_unit_spike_features(new_root_1_id, feature_name, full_features[indices_1])
self.set_unit_spike_features(new_root_2_id, feature_name, full_features[indices_2])
del self._unit_features[unit_id]
del self._roots[root_index]
else:
raise ValueError(str(unit_id) + " non-valid unit id") | def function[split_unit, parameter[self, unit_id, indices]]:
constant[This function splits a root from the curation tree according to the given unit_id and indices. It creates two new unit_ids
and roots that have the split root as a child. This function splits the spike train of the root by the given indices.
Parameters
----------
unit_id: int
The unit id to be split
indices: list
The indices of the unit spike train at which the spike train will be split.
]
variable[root_ids] assign[=] list[[]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[self]._roots]]]]] begin[:]
variable[root_id] assign[=] call[name[self]._roots][name[i]].unit_id
call[name[root_ids].append, parameter[name[root_id]]]
if compare[name[unit_id] in name[root_ids]] begin[:]
variable[indices_1] assign[=] call[name[np].sort, parameter[call[name[np].asarray, parameter[call[name[list], parameter[call[name[set], parameter[name[indices]]]]]]]]]
variable[root_index] assign[=] call[name[root_ids].index, parameter[name[unit_id]]]
variable[new_child] assign[=] call[name[self]._roots][name[root_index]]
variable[original_spike_train] assign[=] call[call[name[self]._roots][name[root_index]].get_spike_train, parameter[]]
<ast.Try object at 0x7da1b26ac1f0>
variable[indices_2] assign[=] call[name[list], parameter[binary_operation[call[name[set], parameter[call[name[range], parameter[call[name[len], parameter[name[original_spike_train]]]]]]] - call[name[set], parameter[name[indices_1]]]]]]
variable[spike_train_2] assign[=] call[name[original_spike_train]][name[indices_2]]
<ast.Delete object at 0x7da1b26ac730>
variable[new_root_1_id] assign[=] binary_operation[call[name[max], parameter[name[self]._all_ids]] + constant[1]]
call[name[self]._all_ids.append, parameter[name[new_root_1_id]]]
variable[new_root_1] assign[=] call[name[Unit], parameter[name[new_root_1_id]]]
call[name[new_root_1].add_child, parameter[name[new_child]]]
call[name[new_root_1].set_spike_train, parameter[name[spike_train_1]]]
variable[new_root_2_id] assign[=] binary_operation[call[name[max], parameter[name[self]._all_ids]] + constant[1]]
call[name[self]._all_ids.append, parameter[name[new_root_2_id]]]
variable[new_root_2] assign[=] call[name[Unit], parameter[name[new_root_2_id]]]
call[name[new_root_2].add_child, parameter[name[new_child]]]
call[name[new_root_2].set_spike_train, parameter[name[spike_train_2]]]
call[name[self]._roots.append, parameter[name[new_root_1]]]
call[name[self]._roots.append, parameter[name[new_root_2]]]
for taget[name[feature_name]] in starred[call[name[self].get_unit_spike_feature_names, parameter[name[unit_id]]]] begin[:]
variable[full_features] assign[=] call[name[self].get_unit_spike_features, parameter[name[unit_id], name[feature_name]]]
call[name[self].set_unit_spike_features, parameter[name[new_root_1_id], name[feature_name], call[name[full_features]][name[indices_1]]]]
call[name[self].set_unit_spike_features, parameter[name[new_root_2_id], name[feature_name], call[name[full_features]][name[indices_2]]]]
<ast.Delete object at 0x7da1b26aed70>
<ast.Delete object at 0x7da1b26af880> | keyword[def] identifier[split_unit] ( identifier[self] , identifier[unit_id] , identifier[indices] ):
literal[string]
identifier[root_ids] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[_roots] )):
identifier[root_id] = identifier[self] . identifier[_roots] [ identifier[i] ]. identifier[unit_id]
identifier[root_ids] . identifier[append] ( identifier[root_id] )
keyword[if] ( identifier[unit_id] keyword[in] identifier[root_ids] ):
identifier[indices_1] = identifier[np] . identifier[sort] ( identifier[np] . identifier[asarray] ( identifier[list] ( identifier[set] ( identifier[indices] ))))
identifier[root_index] = identifier[root_ids] . identifier[index] ( identifier[unit_id] )
identifier[new_child] = identifier[self] . identifier[_roots] [ identifier[root_index] ]
identifier[original_spike_train] = identifier[self] . identifier[_roots] [ identifier[root_index] ]. identifier[get_spike_train] ()
keyword[try] :
identifier[spike_train_1] = identifier[original_spike_train] [ identifier[indices_1] ]
keyword[except] identifier[IndexError] :
identifier[print] ( identifier[str] ( identifier[indices] )+ literal[string] + identifier[str] ( identifier[unit_id] ))
identifier[indices_2] = identifier[list] ( identifier[set] ( identifier[range] ( identifier[len] ( identifier[original_spike_train] )))- identifier[set] ( identifier[indices_1] ))
identifier[spike_train_2] = identifier[original_spike_train] [ identifier[indices_2] ]
keyword[del] identifier[original_spike_train]
identifier[new_root_1_id] = identifier[max] ( identifier[self] . identifier[_all_ids] )+ literal[int]
identifier[self] . identifier[_all_ids] . identifier[append] ( identifier[new_root_1_id] )
identifier[new_root_1] = identifier[Unit] ( identifier[new_root_1_id] )
identifier[new_root_1] . identifier[add_child] ( identifier[new_child] )
identifier[new_root_1] . identifier[set_spike_train] ( identifier[spike_train_1] )
identifier[new_root_2_id] = identifier[max] ( identifier[self] . identifier[_all_ids] )+ literal[int]
identifier[self] . identifier[_all_ids] . identifier[append] ( identifier[new_root_2_id] )
identifier[new_root_2] = identifier[Unit] ( identifier[new_root_2_id] )
identifier[new_root_2] . identifier[add_child] ( identifier[new_child] )
identifier[new_root_2] . identifier[set_spike_train] ( identifier[spike_train_2] )
identifier[self] . identifier[_roots] . identifier[append] ( identifier[new_root_1] )
identifier[self] . identifier[_roots] . identifier[append] ( identifier[new_root_2] )
keyword[for] identifier[feature_name] keyword[in] identifier[self] . identifier[get_unit_spike_feature_names] ( identifier[unit_id] ):
identifier[full_features] = identifier[self] . identifier[get_unit_spike_features] ( identifier[unit_id] , identifier[feature_name] )
identifier[self] . identifier[set_unit_spike_features] ( identifier[new_root_1_id] , identifier[feature_name] , identifier[full_features] [ identifier[indices_1] ])
identifier[self] . identifier[set_unit_spike_features] ( identifier[new_root_2_id] , identifier[feature_name] , identifier[full_features] [ identifier[indices_2] ])
keyword[del] identifier[self] . identifier[_unit_features] [ identifier[unit_id] ]
keyword[del] identifier[self] . identifier[_roots] [ identifier[root_index] ]
keyword[else] :
keyword[raise] identifier[ValueError] ( identifier[str] ( identifier[unit_id] )+ literal[string] ) | def split_unit(self, unit_id, indices):
"""This function splits a root from the curation tree according to the given unit_id and indices. It creates two new unit_ids
and roots that have the split root as a child. This function splits the spike train of the root by the given indices.
Parameters
----------
unit_id: int
The unit id to be split
indices: list
The indices of the unit spike train at which the spike train will be split.
"""
root_ids = []
for i in range(len(self._roots)):
root_id = self._roots[i].unit_id
root_ids.append(root_id) # depends on [control=['for'], data=['i']]
if unit_id in root_ids:
indices_1 = np.sort(np.asarray(list(set(indices))))
root_index = root_ids.index(unit_id)
new_child = self._roots[root_index]
original_spike_train = self._roots[root_index].get_spike_train()
try:
spike_train_1 = original_spike_train[indices_1] # depends on [control=['try'], data=[]]
except IndexError:
print(str(indices) + ' out of bounds for the spike train of ' + str(unit_id)) # depends on [control=['except'], data=[]]
indices_2 = list(set(range(len(original_spike_train))) - set(indices_1))
spike_train_2 = original_spike_train[indices_2]
del original_spike_train
new_root_1_id = max(self._all_ids) + 1
self._all_ids.append(new_root_1_id)
new_root_1 = Unit(new_root_1_id)
new_root_1.add_child(new_child)
new_root_1.set_spike_train(spike_train_1)
new_root_2_id = max(self._all_ids) + 1
self._all_ids.append(new_root_2_id)
new_root_2 = Unit(new_root_2_id)
new_root_2.add_child(new_child)
new_root_2.set_spike_train(spike_train_2)
self._roots.append(new_root_1)
self._roots.append(new_root_2)
for feature_name in self.get_unit_spike_feature_names(unit_id):
full_features = self.get_unit_spike_features(unit_id, feature_name)
self.set_unit_spike_features(new_root_1_id, feature_name, full_features[indices_1])
self.set_unit_spike_features(new_root_2_id, feature_name, full_features[indices_2]) # depends on [control=['for'], data=['feature_name']]
del self._unit_features[unit_id]
del self._roots[root_index] # depends on [control=['if'], data=['unit_id', 'root_ids']]
else:
raise ValueError(str(unit_id) + ' non-valid unit id') |
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Same as parent but sets the widget for any OrderFields to
HiddenTextInput.
"""
if isinstance(db_field, fields.OrderField):
kwargs['widget'] = widgets.HiddenTextInput
return super(ListView, self).formfield_for_dbfield(db_field, **kwargs) | def function[formfield_for_dbfield, parameter[self, db_field]]:
constant[
Same as parent but sets the widget for any OrderFields to
HiddenTextInput.
]
if call[name[isinstance], parameter[name[db_field], name[fields].OrderField]] begin[:]
call[name[kwargs]][constant[widget]] assign[=] name[widgets].HiddenTextInput
return[call[call[name[super], parameter[name[ListView], name[self]]].formfield_for_dbfield, parameter[name[db_field]]]] | keyword[def] identifier[formfield_for_dbfield] ( identifier[self] , identifier[db_field] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[db_field] , identifier[fields] . identifier[OrderField] ):
identifier[kwargs] [ literal[string] ]= identifier[widgets] . identifier[HiddenTextInput]
keyword[return] identifier[super] ( identifier[ListView] , identifier[self] ). identifier[formfield_for_dbfield] ( identifier[db_field] ,** identifier[kwargs] ) | def formfield_for_dbfield(self, db_field, **kwargs):
"""
Same as parent but sets the widget for any OrderFields to
HiddenTextInput.
"""
if isinstance(db_field, fields.OrderField):
kwargs['widget'] = widgets.HiddenTextInput # depends on [control=['if'], data=[]]
return super(ListView, self).formfield_for_dbfield(db_field, **kwargs) |
def _get_add_trustee_cmd(self, trustee):
'''Get tmsh command to add a trusted device.
:param trustee: ManagementRoot object -- device to add as trusted
:returns: str -- tmsh command to add trustee
'''
trustee_info = pollster(get_device_info)(trustee)
username = trustee._meta_data['username']
password = trustee._meta_data['password']
return 'tmsh::modify cm trust-domain Root ca-devices add ' \
'\\{ %s \\} name %s username %s password %s' % \
(trustee_info.managementIp, trustee_info.name, username, password) | def function[_get_add_trustee_cmd, parameter[self, trustee]]:
constant[Get tmsh command to add a trusted device.
:param trustee: ManagementRoot object -- device to add as trusted
:returns: str -- tmsh command to add trustee
]
variable[trustee_info] assign[=] call[call[name[pollster], parameter[name[get_device_info]]], parameter[name[trustee]]]
variable[username] assign[=] call[name[trustee]._meta_data][constant[username]]
variable[password] assign[=] call[name[trustee]._meta_data][constant[password]]
return[binary_operation[constant[tmsh::modify cm trust-domain Root ca-devices add \{ %s \} name %s username %s password %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b17f9e10>, <ast.Attribute object at 0x7da1b17f88b0>, <ast.Name object at 0x7da1b17f82b0>, <ast.Name object at 0x7da1b17f8be0>]]]] | keyword[def] identifier[_get_add_trustee_cmd] ( identifier[self] , identifier[trustee] ):
literal[string]
identifier[trustee_info] = identifier[pollster] ( identifier[get_device_info] )( identifier[trustee] )
identifier[username] = identifier[trustee] . identifier[_meta_data] [ literal[string] ]
identifier[password] = identifier[trustee] . identifier[_meta_data] [ literal[string] ]
keyword[return] literal[string] literal[string] %( identifier[trustee_info] . identifier[managementIp] , identifier[trustee_info] . identifier[name] , identifier[username] , identifier[password] ) | def _get_add_trustee_cmd(self, trustee):
"""Get tmsh command to add a trusted device.
:param trustee: ManagementRoot object -- device to add as trusted
:returns: str -- tmsh command to add trustee
"""
trustee_info = pollster(get_device_info)(trustee)
username = trustee._meta_data['username']
password = trustee._meta_data['password']
return 'tmsh::modify cm trust-domain Root ca-devices add \\{ %s \\} name %s username %s password %s' % (trustee_info.managementIp, trustee_info.name, username, password) |
def help(self):
"""Return full help message for the step wizard.
:returns: A message object contains help text.
:rtype: m.Message
"""
message = m.Message()
message.add(m.Brand())
message.add(self.help_heading())
message.add(self.help_content())
return message | def function[help, parameter[self]]:
constant[Return full help message for the step wizard.
:returns: A message object contains help text.
:rtype: m.Message
]
variable[message] assign[=] call[name[m].Message, parameter[]]
call[name[message].add, parameter[call[name[m].Brand, parameter[]]]]
call[name[message].add, parameter[call[name[self].help_heading, parameter[]]]]
call[name[message].add, parameter[call[name[self].help_content, parameter[]]]]
return[name[message]] | keyword[def] identifier[help] ( identifier[self] ):
literal[string]
identifier[message] = identifier[m] . identifier[Message] ()
identifier[message] . identifier[add] ( identifier[m] . identifier[Brand] ())
identifier[message] . identifier[add] ( identifier[self] . identifier[help_heading] ())
identifier[message] . identifier[add] ( identifier[self] . identifier[help_content] ())
keyword[return] identifier[message] | def help(self):
"""Return full help message for the step wizard.
:returns: A message object contains help text.
:rtype: m.Message
"""
message = m.Message()
message.add(m.Brand())
message.add(self.help_heading())
message.add(self.help_content())
return message |
def article(word, function=INDEFINITE, gender=MALE, role=SUBJECT):
""" Returns the indefinite (ein) or definite (der/die/das/die) article for the given word.
"""
return function == DEFINITE \
and definite_article(word, gender, role) \
or indefinite_article(word, gender, role) | def function[article, parameter[word, function, gender, role]]:
constant[ Returns the indefinite (ein) or definite (der/die/das/die) article for the given word.
]
return[<ast.BoolOp object at 0x7da1b2346800>] | keyword[def] identifier[article] ( identifier[word] , identifier[function] = identifier[INDEFINITE] , identifier[gender] = identifier[MALE] , identifier[role] = identifier[SUBJECT] ):
literal[string]
keyword[return] identifier[function] == identifier[DEFINITE] keyword[and] identifier[definite_article] ( identifier[word] , identifier[gender] , identifier[role] ) keyword[or] identifier[indefinite_article] ( identifier[word] , identifier[gender] , identifier[role] ) | def article(word, function=INDEFINITE, gender=MALE, role=SUBJECT):
""" Returns the indefinite (ein) or definite (der/die/das/die) article for the given word.
"""
return function == DEFINITE and definite_article(word, gender, role) or indefinite_article(word, gender, role) |
def infer_type(self, in_type):
"""infer_type interface. override to create new operators
Parameters
----------
in_type : list of np.dtype
list of argument types in the same order as
declared in list_arguments.
Returns
-------
in_type : list
list of argument types. Can be modified from in_type.
out_type : list
list of output types calculated from in_type,
in the same order as declared in list_outputs.
aux_type : Optional, list
list of aux types calculated from in_type,
in the same order as declared in list_auxiliary_states.
"""
return in_type, [in_type[0]]*len(self.list_outputs()), \
[in_type[0]]*len(self.list_auxiliary_states()) | def function[infer_type, parameter[self, in_type]]:
constant[infer_type interface. override to create new operators
Parameters
----------
in_type : list of np.dtype
list of argument types in the same order as
declared in list_arguments.
Returns
-------
in_type : list
list of argument types. Can be modified from in_type.
out_type : list
list of output types calculated from in_type,
in the same order as declared in list_outputs.
aux_type : Optional, list
list of aux types calculated from in_type,
in the same order as declared in list_auxiliary_states.
]
return[tuple[[<ast.Name object at 0x7da1b1e7cc40>, <ast.BinOp object at 0x7da1b1e7cc10>, <ast.BinOp object at 0x7da1b1e40a00>]]] | keyword[def] identifier[infer_type] ( identifier[self] , identifier[in_type] ):
literal[string]
keyword[return] identifier[in_type] ,[ identifier[in_type] [ literal[int] ]]* identifier[len] ( identifier[self] . identifier[list_outputs] ()),[ identifier[in_type] [ literal[int] ]]* identifier[len] ( identifier[self] . identifier[list_auxiliary_states] ()) | def infer_type(self, in_type):
"""infer_type interface. override to create new operators
Parameters
----------
in_type : list of np.dtype
list of argument types in the same order as
declared in list_arguments.
Returns
-------
in_type : list
list of argument types. Can be modified from in_type.
out_type : list
list of output types calculated from in_type,
in the same order as declared in list_outputs.
aux_type : Optional, list
list of aux types calculated from in_type,
in the same order as declared in list_auxiliary_states.
"""
return (in_type, [in_type[0]] * len(self.list_outputs()), [in_type[0]] * len(self.list_auxiliary_states())) |
def OnVideoCell(self, event):
"""Event handler for video cell toggle button"""
if self.video_cell_button_id == event.GetId():
if event.IsChecked():
wildcard = _("Media files") + " (*.*)|*.*"
videofile, __ = self.get_filepath_findex_from_user(
wildcard, "Choose video or audio file", wx.OPEN)
post_command_event(self, self.VideoCellMsg,
videofile=videofile)
else:
post_command_event(self, self.VideoCellMsg, videofile=False)
event.Skip() | def function[OnVideoCell, parameter[self, event]]:
constant[Event handler for video cell toggle button]
if compare[name[self].video_cell_button_id equal[==] call[name[event].GetId, parameter[]]] begin[:]
if call[name[event].IsChecked, parameter[]] begin[:]
variable[wildcard] assign[=] binary_operation[call[name[_], parameter[constant[Media files]]] + constant[ (*.*)|*.*]]
<ast.Tuple object at 0x7da1b17dc760> assign[=] call[name[self].get_filepath_findex_from_user, parameter[name[wildcard], constant[Choose video or audio file], name[wx].OPEN]]
call[name[post_command_event], parameter[name[self], name[self].VideoCellMsg]]
call[name[event].Skip, parameter[]] | keyword[def] identifier[OnVideoCell] ( identifier[self] , identifier[event] ):
literal[string]
keyword[if] identifier[self] . identifier[video_cell_button_id] == identifier[event] . identifier[GetId] ():
keyword[if] identifier[event] . identifier[IsChecked] ():
identifier[wildcard] = identifier[_] ( literal[string] )+ literal[string]
identifier[videofile] , identifier[__] = identifier[self] . identifier[get_filepath_findex_from_user] (
identifier[wildcard] , literal[string] , identifier[wx] . identifier[OPEN] )
identifier[post_command_event] ( identifier[self] , identifier[self] . identifier[VideoCellMsg] ,
identifier[videofile] = identifier[videofile] )
keyword[else] :
identifier[post_command_event] ( identifier[self] , identifier[self] . identifier[VideoCellMsg] , identifier[videofile] = keyword[False] )
identifier[event] . identifier[Skip] () | def OnVideoCell(self, event):
"""Event handler for video cell toggle button"""
if self.video_cell_button_id == event.GetId():
if event.IsChecked():
wildcard = _('Media files') + ' (*.*)|*.*'
(videofile, __) = self.get_filepath_findex_from_user(wildcard, 'Choose video or audio file', wx.OPEN)
post_command_event(self, self.VideoCellMsg, videofile=videofile) # depends on [control=['if'], data=[]]
else:
post_command_event(self, self.VideoCellMsg, videofile=False) # depends on [control=['if'], data=[]]
event.Skip() |
def _in_list(self, original_list, item):
"""
Check that an item as contained in a list.
:param original_list: The list.
:type original_list: list(object)
:param item: The item.
:type item: hatemile.util.html.htmldomelement.HTMLDOMElement
:return: True if the item contained in the list or False if not.
:rtype: bool
"""
# pylint: disable=no-self-use
for item_list in original_list:
if item is item_list:
return True
return False | def function[_in_list, parameter[self, original_list, item]]:
constant[
Check that an item as contained in a list.
:param original_list: The list.
:type original_list: list(object)
:param item: The item.
:type item: hatemile.util.html.htmldomelement.HTMLDOMElement
:return: True if the item contained in the list or False if not.
:rtype: bool
]
for taget[name[item_list]] in starred[name[original_list]] begin[:]
if compare[name[item] is name[item_list]] begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[_in_list] ( identifier[self] , identifier[original_list] , identifier[item] ):
literal[string]
keyword[for] identifier[item_list] keyword[in] identifier[original_list] :
keyword[if] identifier[item] keyword[is] identifier[item_list] :
keyword[return] keyword[True]
keyword[return] keyword[False] | def _in_list(self, original_list, item):
"""
Check that an item as contained in a list.
:param original_list: The list.
:type original_list: list(object)
:param item: The item.
:type item: hatemile.util.html.htmldomelement.HTMLDOMElement
:return: True if the item contained in the list or False if not.
:rtype: bool
"""
# pylint: disable=no-self-use
for item_list in original_list:
if item is item_list:
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item_list']]
return False |
def pw(
ctx,
key_pattern,
user_pattern,
mode,
strict_flag,
user_flag,
file,
edit_subcommand,
gen_subcommand,
):
"""Search for USER and KEY in GPG-encrypted password file."""
# install silent Ctrl-C handler
def handle_sigint(*_):
click.echo()
ctx.exit(1)
signal.signal(signal.SIGINT, handle_sigint)
# invoke a subcommand?
if gen_subcommand:
length = int(key_pattern) if key_pattern else None
generate_password(mode, length)
return
elif edit_subcommand:
launch_editor(ctx, file)
return
# verify that database file is present
if not os.path.exists(file):
click.echo("error: password store not found at '%s'" % file, err=True)
ctx.exit(1)
# load database
store = Store.load(file)
# if no user query provided, split key query according to right-most "@" sign (since usernames are typically email addresses)
if not user_pattern:
user_pattern, _, key_pattern = key_pattern.rpartition("@")
# search database
results = store.search(key_pattern, user_pattern)
results = list(results)
# if strict flag is enabled, check that precisely a single record was found
if strict_flag and len(results) != 1:
click.echo(
"error: multiple or no records found (but using --strict flag)", err=True
)
ctx.exit(2)
# raw mode?
if mode == Mode.RAW:
for entry in results:
click.echo(entry.user if user_flag else entry.password)
return
# print results
for idx, entry in enumerate(results):
# start with key and user
line = highlight_match(key_pattern, entry.key)
if entry.user:
line += ": " + highlight_match(user_pattern, entry.user)
# add password or copy&paste sucess message
if mode == Mode.ECHO and not user_flag:
line += " | " + style_password(entry.password)
elif mode == Mode.COPY and idx == 0:
try:
import pyperclip
pyperclip.copy(entry.user if user_flag else entry.password)
result = style_success(
"*** %s COPIED TO CLIPBOARD ***"
% ("USERNAME" if user_flag else "PASSWORD")
)
except ImportError:
result = style_error('*** PYTHON PACKAGE "PYPERCLIP" NOT FOUND ***')
line += " | " + result
# add notes
if entry.notes:
if idx == 0:
line += "\n"
line += "\n".join(" " + line for line in entry.notes.splitlines())
else:
lines = entry.notes.splitlines()
line += " | " + lines[0]
if len(lines) > 1:
line += " (...)"
click.echo(line) | def function[pw, parameter[ctx, key_pattern, user_pattern, mode, strict_flag, user_flag, file, edit_subcommand, gen_subcommand]]:
constant[Search for USER and KEY in GPG-encrypted password file.]
def function[handle_sigint, parameter[]]:
call[name[click].echo, parameter[]]
call[name[ctx].exit, parameter[constant[1]]]
call[name[signal].signal, parameter[name[signal].SIGINT, name[handle_sigint]]]
if name[gen_subcommand] begin[:]
variable[length] assign[=] <ast.IfExp object at 0x7da1b229b670>
call[name[generate_password], parameter[name[mode], name[length]]]
return[None]
if <ast.UnaryOp object at 0x7da1b229afb0> begin[:]
call[name[click].echo, parameter[binary_operation[constant[error: password store not found at '%s'] <ast.Mod object at 0x7da2590d6920> name[file]]]]
call[name[ctx].exit, parameter[constant[1]]]
variable[store] assign[=] call[name[Store].load, parameter[name[file]]]
if <ast.UnaryOp object at 0x7da1b22985e0> begin[:]
<ast.Tuple object at 0x7da1b2298fd0> assign[=] call[name[key_pattern].rpartition, parameter[constant[@]]]
variable[results] assign[=] call[name[store].search, parameter[name[key_pattern], name[user_pattern]]]
variable[results] assign[=] call[name[list], parameter[name[results]]]
if <ast.BoolOp object at 0x7da1b229baf0> begin[:]
call[name[click].echo, parameter[constant[error: multiple or no records found (but using --strict flag)]]]
call[name[ctx].exit, parameter[constant[2]]]
if compare[name[mode] equal[==] name[Mode].RAW] begin[:]
for taget[name[entry]] in starred[name[results]] begin[:]
call[name[click].echo, parameter[<ast.IfExp object at 0x7da18bc713c0>]]
return[None]
for taget[tuple[[<ast.Name object at 0x7da18bc732b0>, <ast.Name object at 0x7da18bc71510>]]] in starred[call[name[enumerate], parameter[name[results]]]] begin[:]
variable[line] assign[=] call[name[highlight_match], parameter[name[key_pattern], name[entry].key]]
if name[entry].user begin[:]
<ast.AugAssign object at 0x7da18bc73d30>
if <ast.BoolOp object at 0x7da18bc73730> begin[:]
<ast.AugAssign object at 0x7da18bc71750>
if name[entry].notes begin[:]
if compare[name[idx] equal[==] constant[0]] begin[:]
<ast.AugAssign object at 0x7da18bc723e0>
<ast.AugAssign object at 0x7da18bc72e30>
call[name[click].echo, parameter[name[line]]] | keyword[def] identifier[pw] (
identifier[ctx] ,
identifier[key_pattern] ,
identifier[user_pattern] ,
identifier[mode] ,
identifier[strict_flag] ,
identifier[user_flag] ,
identifier[file] ,
identifier[edit_subcommand] ,
identifier[gen_subcommand] ,
):
literal[string]
keyword[def] identifier[handle_sigint] (* identifier[_] ):
identifier[click] . identifier[echo] ()
identifier[ctx] . identifier[exit] ( literal[int] )
identifier[signal] . identifier[signal] ( identifier[signal] . identifier[SIGINT] , identifier[handle_sigint] )
keyword[if] identifier[gen_subcommand] :
identifier[length] = identifier[int] ( identifier[key_pattern] ) keyword[if] identifier[key_pattern] keyword[else] keyword[None]
identifier[generate_password] ( identifier[mode] , identifier[length] )
keyword[return]
keyword[elif] identifier[edit_subcommand] :
identifier[launch_editor] ( identifier[ctx] , identifier[file] )
keyword[return]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[file] ):
identifier[click] . identifier[echo] ( literal[string] % identifier[file] , identifier[err] = keyword[True] )
identifier[ctx] . identifier[exit] ( literal[int] )
identifier[store] = identifier[Store] . identifier[load] ( identifier[file] )
keyword[if] keyword[not] identifier[user_pattern] :
identifier[user_pattern] , identifier[_] , identifier[key_pattern] = identifier[key_pattern] . identifier[rpartition] ( literal[string] )
identifier[results] = identifier[store] . identifier[search] ( identifier[key_pattern] , identifier[user_pattern] )
identifier[results] = identifier[list] ( identifier[results] )
keyword[if] identifier[strict_flag] keyword[and] identifier[len] ( identifier[results] )!= literal[int] :
identifier[click] . identifier[echo] (
literal[string] , identifier[err] = keyword[True]
)
identifier[ctx] . identifier[exit] ( literal[int] )
keyword[if] identifier[mode] == identifier[Mode] . identifier[RAW] :
keyword[for] identifier[entry] keyword[in] identifier[results] :
identifier[click] . identifier[echo] ( identifier[entry] . identifier[user] keyword[if] identifier[user_flag] keyword[else] identifier[entry] . identifier[password] )
keyword[return]
keyword[for] identifier[idx] , identifier[entry] keyword[in] identifier[enumerate] ( identifier[results] ):
identifier[line] = identifier[highlight_match] ( identifier[key_pattern] , identifier[entry] . identifier[key] )
keyword[if] identifier[entry] . identifier[user] :
identifier[line] += literal[string] + identifier[highlight_match] ( identifier[user_pattern] , identifier[entry] . identifier[user] )
keyword[if] identifier[mode] == identifier[Mode] . identifier[ECHO] keyword[and] keyword[not] identifier[user_flag] :
identifier[line] += literal[string] + identifier[style_password] ( identifier[entry] . identifier[password] )
keyword[elif] identifier[mode] == identifier[Mode] . identifier[COPY] keyword[and] identifier[idx] == literal[int] :
keyword[try] :
keyword[import] identifier[pyperclip]
identifier[pyperclip] . identifier[copy] ( identifier[entry] . identifier[user] keyword[if] identifier[user_flag] keyword[else] identifier[entry] . identifier[password] )
identifier[result] = identifier[style_success] (
literal[string]
%( literal[string] keyword[if] identifier[user_flag] keyword[else] literal[string] )
)
keyword[except] identifier[ImportError] :
identifier[result] = identifier[style_error] ( literal[string] )
identifier[line] += literal[string] + identifier[result]
keyword[if] identifier[entry] . identifier[notes] :
keyword[if] identifier[idx] == literal[int] :
identifier[line] += literal[string]
identifier[line] += literal[string] . identifier[join] ( literal[string] + identifier[line] keyword[for] identifier[line] keyword[in] identifier[entry] . identifier[notes] . identifier[splitlines] ())
keyword[else] :
identifier[lines] = identifier[entry] . identifier[notes] . identifier[splitlines] ()
identifier[line] += literal[string] + identifier[lines] [ literal[int] ]
keyword[if] identifier[len] ( identifier[lines] )> literal[int] :
identifier[line] += literal[string]
identifier[click] . identifier[echo] ( identifier[line] ) | def pw(ctx, key_pattern, user_pattern, mode, strict_flag, user_flag, file, edit_subcommand, gen_subcommand):
"""Search for USER and KEY in GPG-encrypted password file."""
# install silent Ctrl-C handler
def handle_sigint(*_):
click.echo()
ctx.exit(1)
signal.signal(signal.SIGINT, handle_sigint)
# invoke a subcommand?
if gen_subcommand:
length = int(key_pattern) if key_pattern else None
generate_password(mode, length)
return # depends on [control=['if'], data=[]]
elif edit_subcommand:
launch_editor(ctx, file)
return # depends on [control=['if'], data=[]]
# verify that database file is present
if not os.path.exists(file):
click.echo("error: password store not found at '%s'" % file, err=True)
ctx.exit(1) # depends on [control=['if'], data=[]]
# load database
store = Store.load(file)
# if no user query provided, split key query according to right-most "@" sign (since usernames are typically email addresses)
if not user_pattern:
(user_pattern, _, key_pattern) = key_pattern.rpartition('@') # depends on [control=['if'], data=[]]
# search database
results = store.search(key_pattern, user_pattern)
results = list(results)
# if strict flag is enabled, check that precisely a single record was found
if strict_flag and len(results) != 1:
click.echo('error: multiple or no records found (but using --strict flag)', err=True)
ctx.exit(2) # depends on [control=['if'], data=[]]
# raw mode?
if mode == Mode.RAW:
for entry in results:
click.echo(entry.user if user_flag else entry.password) # depends on [control=['for'], data=['entry']]
return # depends on [control=['if'], data=[]]
# print results
for (idx, entry) in enumerate(results):
# start with key and user
line = highlight_match(key_pattern, entry.key)
if entry.user:
line += ': ' + highlight_match(user_pattern, entry.user) # depends on [control=['if'], data=[]]
# add password or copy&paste sucess message
if mode == Mode.ECHO and (not user_flag):
line += ' | ' + style_password(entry.password) # depends on [control=['if'], data=[]]
elif mode == Mode.COPY and idx == 0:
try:
import pyperclip
pyperclip.copy(entry.user if user_flag else entry.password)
result = style_success('*** %s COPIED TO CLIPBOARD ***' % ('USERNAME' if user_flag else 'PASSWORD')) # depends on [control=['try'], data=[]]
except ImportError:
result = style_error('*** PYTHON PACKAGE "PYPERCLIP" NOT FOUND ***') # depends on [control=['except'], data=[]]
line += ' | ' + result # depends on [control=['if'], data=[]]
# add notes
if entry.notes:
if idx == 0:
line += '\n'
line += '\n'.join((' ' + line for line in entry.notes.splitlines())) # depends on [control=['if'], data=[]]
else:
lines = entry.notes.splitlines()
line += ' | ' + lines[0]
if len(lines) > 1:
line += ' (...)' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
click.echo(line) # depends on [control=['for'], data=[]] |
def binarySearch(self, ip):
"""
" binary search method
" param: ip
"""
if not ip.isdigit(): ip = self.ip2Long(ip)
if self.__indexLen < 1:
self.__f.seek(0)
b = self.__f.read(8)
self.__sPtr = self.getLong(b, 0)
endPtr = self.getLong(b, 4)
self.__indexLen = endPtr - self.__sPtr
startPtr = self.__sPtr
indexLen = self.__indexLen
self.__f.seek(startPtr)
b = self.__f.read(indexLen+12)
l, h, mixPtr = (0, int(indexLen/12), 0)
while l <= h:
m = int((l+h)/2)
ptr = startPtr + m*12
self.__f.seek(ptr)
b = self.__f.read(12)
sip = self.getLong(b, 0)
eip = self.getLong(b, 4)
if ip >= sip:
if ip > eip:
l = m + 1
else:
mixPtr = self.getLong(b, 8)
break;
else:
h = m - 1
if mixPtr == 0: return "N2"
return self.returnData(mixPtr) | def function[binarySearch, parameter[self, ip]]:
constant[
" binary search method
" param: ip
]
if <ast.UnaryOp object at 0x7da1b15b1a80> begin[:]
variable[ip] assign[=] call[name[self].ip2Long, parameter[name[ip]]]
if compare[name[self].__indexLen less[<] constant[1]] begin[:]
call[name[self].__f.seek, parameter[constant[0]]]
variable[b] assign[=] call[name[self].__f.read, parameter[constant[8]]]
name[self].__sPtr assign[=] call[name[self].getLong, parameter[name[b], constant[0]]]
variable[endPtr] assign[=] call[name[self].getLong, parameter[name[b], constant[4]]]
name[self].__indexLen assign[=] binary_operation[name[endPtr] - name[self].__sPtr]
variable[startPtr] assign[=] name[self].__sPtr
variable[indexLen] assign[=] name[self].__indexLen
call[name[self].__f.seek, parameter[name[startPtr]]]
variable[b] assign[=] call[name[self].__f.read, parameter[binary_operation[name[indexLen] + constant[12]]]]
<ast.Tuple object at 0x7da1b13423e0> assign[=] tuple[[<ast.Constant object at 0x7da1b1342200>, <ast.Call object at 0x7da1b1341750>, <ast.Constant object at 0x7da1b1340670>]]
while compare[name[l] less_or_equal[<=] name[h]] begin[:]
variable[m] assign[=] call[name[int], parameter[binary_operation[binary_operation[name[l] + name[h]] / constant[2]]]]
variable[ptr] assign[=] binary_operation[name[startPtr] + binary_operation[name[m] * constant[12]]]
call[name[self].__f.seek, parameter[name[ptr]]]
variable[b] assign[=] call[name[self].__f.read, parameter[constant[12]]]
variable[sip] assign[=] call[name[self].getLong, parameter[name[b], constant[0]]]
variable[eip] assign[=] call[name[self].getLong, parameter[name[b], constant[4]]]
if compare[name[ip] greater_or_equal[>=] name[sip]] begin[:]
if compare[name[ip] greater[>] name[eip]] begin[:]
variable[l] assign[=] binary_operation[name[m] + constant[1]]
if compare[name[mixPtr] equal[==] constant[0]] begin[:]
return[constant[N2]]
return[call[name[self].returnData, parameter[name[mixPtr]]]] | keyword[def] identifier[binarySearch] ( identifier[self] , identifier[ip] ):
literal[string]
keyword[if] keyword[not] identifier[ip] . identifier[isdigit] (): identifier[ip] = identifier[self] . identifier[ip2Long] ( identifier[ip] )
keyword[if] identifier[self] . identifier[__indexLen] < literal[int] :
identifier[self] . identifier[__f] . identifier[seek] ( literal[int] )
identifier[b] = identifier[self] . identifier[__f] . identifier[read] ( literal[int] )
identifier[self] . identifier[__sPtr] = identifier[self] . identifier[getLong] ( identifier[b] , literal[int] )
identifier[endPtr] = identifier[self] . identifier[getLong] ( identifier[b] , literal[int] )
identifier[self] . identifier[__indexLen] = identifier[endPtr] - identifier[self] . identifier[__sPtr]
identifier[startPtr] = identifier[self] . identifier[__sPtr]
identifier[indexLen] = identifier[self] . identifier[__indexLen]
identifier[self] . identifier[__f] . identifier[seek] ( identifier[startPtr] )
identifier[b] = identifier[self] . identifier[__f] . identifier[read] ( identifier[indexLen] + literal[int] )
identifier[l] , identifier[h] , identifier[mixPtr] =( literal[int] , identifier[int] ( identifier[indexLen] / literal[int] ), literal[int] )
keyword[while] identifier[l] <= identifier[h] :
identifier[m] = identifier[int] (( identifier[l] + identifier[h] )/ literal[int] )
identifier[ptr] = identifier[startPtr] + identifier[m] * literal[int]
identifier[self] . identifier[__f] . identifier[seek] ( identifier[ptr] )
identifier[b] = identifier[self] . identifier[__f] . identifier[read] ( literal[int] )
identifier[sip] = identifier[self] . identifier[getLong] ( identifier[b] , literal[int] )
identifier[eip] = identifier[self] . identifier[getLong] ( identifier[b] , literal[int] )
keyword[if] identifier[ip] >= identifier[sip] :
keyword[if] identifier[ip] > identifier[eip] :
identifier[l] = identifier[m] + literal[int]
keyword[else] :
identifier[mixPtr] = identifier[self] . identifier[getLong] ( identifier[b] , literal[int] )
keyword[break] ;
keyword[else] :
identifier[h] = identifier[m] - literal[int]
keyword[if] identifier[mixPtr] == literal[int] : keyword[return] literal[string]
keyword[return] identifier[self] . identifier[returnData] ( identifier[mixPtr] ) | def binarySearch(self, ip):
"""
" binary search method
" param: ip
"""
if not ip.isdigit():
ip = self.ip2Long(ip) # depends on [control=['if'], data=[]]
if self.__indexLen < 1:
self.__f.seek(0)
b = self.__f.read(8)
self.__sPtr = self.getLong(b, 0)
endPtr = self.getLong(b, 4)
self.__indexLen = endPtr - self.__sPtr # depends on [control=['if'], data=[]]
startPtr = self.__sPtr
indexLen = self.__indexLen
self.__f.seek(startPtr)
b = self.__f.read(indexLen + 12)
(l, h, mixPtr) = (0, int(indexLen / 12), 0)
while l <= h:
m = int((l + h) / 2)
ptr = startPtr + m * 12
self.__f.seek(ptr)
b = self.__f.read(12)
sip = self.getLong(b, 0)
eip = self.getLong(b, 4)
if ip >= sip:
if ip > eip:
l = m + 1 # depends on [control=['if'], data=[]]
else:
mixPtr = self.getLong(b, 8)
break # depends on [control=['if'], data=['ip']]
else:
h = m - 1 # depends on [control=['while'], data=['l', 'h']]
if mixPtr == 0:
return 'N2' # depends on [control=['if'], data=[]]
return self.returnData(mixPtr) |
def _disk_profile(profile, hypervisor, disks=None, vm_name=None, image=None, pool=None, **kwargs):
'''
Gather the disk profile from the config or apply the default based
on the active hypervisor
This is the ``default`` profile for KVM/QEMU, which can be
overridden in the configuration:
.. code-block:: yaml
virt:
disk:
default:
- system:
size: 8192
format: qcow2
model: virtio
Example profile for KVM/QEMU with two disks, first is created
from specified image, the second is empty:
.. code-block:: yaml
virt:
disk:
two_disks:
- system:
size: 8192
format: qcow2
model: virtio
image: http://path/to/image.qcow2
- lvm:
size: 32768
format: qcow2
model: virtio
The ``format`` and ``model`` parameters are optional, and will
default to whatever is best suitable for the active hypervisor.
'''
default = [{'system':
{'size': 8192}}]
if hypervisor == 'vmware':
overlay = {'format': 'vmdk',
'model': 'scsi',
'device': 'disk',
'pool': '[{0}] '.format(pool if pool else '0')}
elif hypervisor in ['qemu', 'kvm']:
overlay = {'format': 'qcow2',
'device': 'disk',
'model': 'virtio'}
elif hypervisor in ['bhyve']:
overlay = {'format': 'raw',
'device': 'disk',
'model': 'virtio',
'sparse_volume': False}
elif hypervisor == 'xen':
overlay = {'format': 'qcow2',
'device': 'disk',
'model': 'xen'}
else:
overlay = {}
# Get the disks from the profile
disklist = []
if profile:
disklist = copy.deepcopy(
__salt__['config.get']('virt:disk', {}).get(profile, default))
# Transform the list to remove one level of dictionnary and add the name as a property
disklist = [dict(d, name=name) for disk in disklist for name, d in disk.items()]
# Add the image to the first disk if there is one
if image:
# If image is specified in module arguments, then it will be used
# for the first disk instead of the image from the disk profile
log.debug('%s image from module arguments will be used for disk "%s"'
' instead of %s', image, disklist[0]['name'], disklist[0].get('image', ""))
disklist[0]['image'] = image
# Merge with the user-provided disks definitions
if disks:
for udisk in disks:
if 'name' in udisk:
found = [disk for disk in disklist if udisk['name'] == disk['name']]
if found:
found[0].update(udisk)
else:
disklist.append(udisk)
for disk in disklist:
# Add the missing properties that have defaults
for key, val in six.iteritems(overlay):
if key not in disk:
disk[key] = val
# We may have an already computed source_file (i.e. image not created by our module)
if 'source_file' in disk and disk['source_file']:
disk['filename'] = os.path.basename(disk['source_file'])
elif 'source_file' not in disk:
_fill_disk_filename(vm_name, disk, hypervisor, **kwargs)
return disklist | def function[_disk_profile, parameter[profile, hypervisor, disks, vm_name, image, pool]]:
constant[
Gather the disk profile from the config or apply the default based
on the active hypervisor
This is the ``default`` profile for KVM/QEMU, which can be
overridden in the configuration:
.. code-block:: yaml
virt:
disk:
default:
- system:
size: 8192
format: qcow2
model: virtio
Example profile for KVM/QEMU with two disks, first is created
from specified image, the second is empty:
.. code-block:: yaml
virt:
disk:
two_disks:
- system:
size: 8192
format: qcow2
model: virtio
image: http://path/to/image.qcow2
- lvm:
size: 32768
format: qcow2
model: virtio
The ``format`` and ``model`` parameters are optional, and will
default to whatever is best suitable for the active hypervisor.
]
variable[default] assign[=] list[[<ast.Dict object at 0x7da207f984c0>]]
if compare[name[hypervisor] equal[==] constant[vmware]] begin[:]
variable[overlay] assign[=] dictionary[[<ast.Constant object at 0x7da207f99b70>, <ast.Constant object at 0x7da207f9bb20>, <ast.Constant object at 0x7da207f98d90>, <ast.Constant object at 0x7da207f9b730>], [<ast.Constant object at 0x7da207f9bdf0>, <ast.Constant object at 0x7da207f9b9a0>, <ast.Constant object at 0x7da207f98d60>, <ast.Call object at 0x7da207f9a7a0>]]
variable[disklist] assign[=] list[[]]
if name[profile] begin[:]
variable[disklist] assign[=] call[name[copy].deepcopy, parameter[call[call[call[name[__salt__]][constant[config.get]], parameter[constant[virt:disk], dictionary[[], []]]].get, parameter[name[profile], name[default]]]]]
variable[disklist] assign[=] <ast.ListComp object at 0x7da207f987f0>
if name[image] begin[:]
call[name[log].debug, parameter[constant[%s image from module arguments will be used for disk "%s" instead of %s], name[image], call[call[name[disklist]][constant[0]]][constant[name]], call[call[name[disklist]][constant[0]].get, parameter[constant[image], constant[]]]]]
call[call[name[disklist]][constant[0]]][constant[image]] assign[=] name[image]
if name[disks] begin[:]
for taget[name[udisk]] in starred[name[disks]] begin[:]
if compare[constant[name] in name[udisk]] begin[:]
variable[found] assign[=] <ast.ListComp object at 0x7da1b200ceb0>
if name[found] begin[:]
call[call[name[found]][constant[0]].update, parameter[name[udisk]]]
for taget[name[disk]] in starred[name[disklist]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b200c2e0>, <ast.Name object at 0x7da1b200c520>]]] in starred[call[name[six].iteritems, parameter[name[overlay]]]] begin[:]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[disk]] begin[:]
call[name[disk]][name[key]] assign[=] name[val]
if <ast.BoolOp object at 0x7da1b200d540> begin[:]
call[name[disk]][constant[filename]] assign[=] call[name[os].path.basename, parameter[call[name[disk]][constant[source_file]]]]
return[name[disklist]] | keyword[def] identifier[_disk_profile] ( identifier[profile] , identifier[hypervisor] , identifier[disks] = keyword[None] , identifier[vm_name] = keyword[None] , identifier[image] = keyword[None] , identifier[pool] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[default] =[{ literal[string] :
{ literal[string] : literal[int] }}]
keyword[if] identifier[hypervisor] == literal[string] :
identifier[overlay] ={ literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] . identifier[format] ( identifier[pool] keyword[if] identifier[pool] keyword[else] literal[string] )}
keyword[elif] identifier[hypervisor] keyword[in] [ literal[string] , literal[string] ]:
identifier[overlay] ={ literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] }
keyword[elif] identifier[hypervisor] keyword[in] [ literal[string] ]:
identifier[overlay] ={ literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : keyword[False] }
keyword[elif] identifier[hypervisor] == literal[string] :
identifier[overlay] ={ literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] }
keyword[else] :
identifier[overlay] ={}
identifier[disklist] =[]
keyword[if] identifier[profile] :
identifier[disklist] = identifier[copy] . identifier[deepcopy] (
identifier[__salt__] [ literal[string] ]( literal[string] ,{}). identifier[get] ( identifier[profile] , identifier[default] ))
identifier[disklist] =[ identifier[dict] ( identifier[d] , identifier[name] = identifier[name] ) keyword[for] identifier[disk] keyword[in] identifier[disklist] keyword[for] identifier[name] , identifier[d] keyword[in] identifier[disk] . identifier[items] ()]
keyword[if] identifier[image] :
identifier[log] . identifier[debug] ( literal[string]
literal[string] , identifier[image] , identifier[disklist] [ literal[int] ][ literal[string] ], identifier[disklist] [ literal[int] ]. identifier[get] ( literal[string] , literal[string] ))
identifier[disklist] [ literal[int] ][ literal[string] ]= identifier[image]
keyword[if] identifier[disks] :
keyword[for] identifier[udisk] keyword[in] identifier[disks] :
keyword[if] literal[string] keyword[in] identifier[udisk] :
identifier[found] =[ identifier[disk] keyword[for] identifier[disk] keyword[in] identifier[disklist] keyword[if] identifier[udisk] [ literal[string] ]== identifier[disk] [ literal[string] ]]
keyword[if] identifier[found] :
identifier[found] [ literal[int] ]. identifier[update] ( identifier[udisk] )
keyword[else] :
identifier[disklist] . identifier[append] ( identifier[udisk] )
keyword[for] identifier[disk] keyword[in] identifier[disklist] :
keyword[for] identifier[key] , identifier[val] keyword[in] identifier[six] . identifier[iteritems] ( identifier[overlay] ):
keyword[if] identifier[key] keyword[not] keyword[in] identifier[disk] :
identifier[disk] [ identifier[key] ]= identifier[val]
keyword[if] literal[string] keyword[in] identifier[disk] keyword[and] identifier[disk] [ literal[string] ]:
identifier[disk] [ literal[string] ]= identifier[os] . identifier[path] . identifier[basename] ( identifier[disk] [ literal[string] ])
keyword[elif] literal[string] keyword[not] keyword[in] identifier[disk] :
identifier[_fill_disk_filename] ( identifier[vm_name] , identifier[disk] , identifier[hypervisor] ,** identifier[kwargs] )
keyword[return] identifier[disklist] | def _disk_profile(profile, hypervisor, disks=None, vm_name=None, image=None, pool=None, **kwargs):
"""
Gather the disk profile from the config or apply the default based
on the active hypervisor
This is the ``default`` profile for KVM/QEMU, which can be
overridden in the configuration:
.. code-block:: yaml
virt:
disk:
default:
- system:
size: 8192
format: qcow2
model: virtio
Example profile for KVM/QEMU with two disks, first is created
from specified image, the second is empty:
.. code-block:: yaml
virt:
disk:
two_disks:
- system:
size: 8192
format: qcow2
model: virtio
image: http://path/to/image.qcow2
- lvm:
size: 32768
format: qcow2
model: virtio
The ``format`` and ``model`` parameters are optional, and will
default to whatever is best suitable for the active hypervisor.
"""
default = [{'system': {'size': 8192}}]
if hypervisor == 'vmware':
overlay = {'format': 'vmdk', 'model': 'scsi', 'device': 'disk', 'pool': '[{0}] '.format(pool if pool else '0')} # depends on [control=['if'], data=[]]
elif hypervisor in ['qemu', 'kvm']:
overlay = {'format': 'qcow2', 'device': 'disk', 'model': 'virtio'} # depends on [control=['if'], data=[]]
elif hypervisor in ['bhyve']:
overlay = {'format': 'raw', 'device': 'disk', 'model': 'virtio', 'sparse_volume': False} # depends on [control=['if'], data=[]]
elif hypervisor == 'xen':
overlay = {'format': 'qcow2', 'device': 'disk', 'model': 'xen'} # depends on [control=['if'], data=[]]
else:
overlay = {}
# Get the disks from the profile
disklist = []
if profile:
disklist = copy.deepcopy(__salt__['config.get']('virt:disk', {}).get(profile, default))
# Transform the list to remove one level of dictionnary and add the name as a property
disklist = [dict(d, name=name) for disk in disklist for (name, d) in disk.items()]
# Add the image to the first disk if there is one
if image:
# If image is specified in module arguments, then it will be used
# for the first disk instead of the image from the disk profile
log.debug('%s image from module arguments will be used for disk "%s" instead of %s', image, disklist[0]['name'], disklist[0].get('image', ''))
disklist[0]['image'] = image # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Merge with the user-provided disks definitions
if disks:
for udisk in disks:
if 'name' in udisk:
found = [disk for disk in disklist if udisk['name'] == disk['name']]
if found:
found[0].update(udisk) # depends on [control=['if'], data=[]]
else:
disklist.append(udisk) # depends on [control=['if'], data=['udisk']] # depends on [control=['for'], data=['udisk']] # depends on [control=['if'], data=[]]
for disk in disklist:
# Add the missing properties that have defaults
for (key, val) in six.iteritems(overlay):
if key not in disk:
disk[key] = val # depends on [control=['if'], data=['key', 'disk']] # depends on [control=['for'], data=[]]
# We may have an already computed source_file (i.e. image not created by our module)
if 'source_file' in disk and disk['source_file']:
disk['filename'] = os.path.basename(disk['source_file']) # depends on [control=['if'], data=[]]
elif 'source_file' not in disk:
_fill_disk_filename(vm_name, disk, hypervisor, **kwargs) # depends on [control=['if'], data=['disk']] # depends on [control=['for'], data=['disk']]
return disklist |
def on_new(self):
"""
Add a new empty code editor to the tab widget
"""
interpreter, pyserver, args = self._get_backend_parameters()
self.setup_editor(self.tabWidget.create_new_document(
extension='.py', interpreter=interpreter, server_script=pyserver,
args=args))
self.actionRun.setDisabled(True)
self.actionConfigure_run.setDisabled(True) | def function[on_new, parameter[self]]:
constant[
Add a new empty code editor to the tab widget
]
<ast.Tuple object at 0x7da1b00cb070> assign[=] call[name[self]._get_backend_parameters, parameter[]]
call[name[self].setup_editor, parameter[call[name[self].tabWidget.create_new_document, parameter[]]]]
call[name[self].actionRun.setDisabled, parameter[constant[True]]]
call[name[self].actionConfigure_run.setDisabled, parameter[constant[True]]] | keyword[def] identifier[on_new] ( identifier[self] ):
literal[string]
identifier[interpreter] , identifier[pyserver] , identifier[args] = identifier[self] . identifier[_get_backend_parameters] ()
identifier[self] . identifier[setup_editor] ( identifier[self] . identifier[tabWidget] . identifier[create_new_document] (
identifier[extension] = literal[string] , identifier[interpreter] = identifier[interpreter] , identifier[server_script] = identifier[pyserver] ,
identifier[args] = identifier[args] ))
identifier[self] . identifier[actionRun] . identifier[setDisabled] ( keyword[True] )
identifier[self] . identifier[actionConfigure_run] . identifier[setDisabled] ( keyword[True] ) | def on_new(self):
"""
Add a new empty code editor to the tab widget
"""
(interpreter, pyserver, args) = self._get_backend_parameters()
self.setup_editor(self.tabWidget.create_new_document(extension='.py', interpreter=interpreter, server_script=pyserver, args=args))
self.actionRun.setDisabled(True)
self.actionConfigure_run.setDisabled(True) |
def addTab(self, elem, icon, name):
"""
Extends QTabWidget.addTab to keep an internal list of added tabs.
:param elem: tab widget
:param icon: tab icon
:param name: tab name
"""
self._widgets.append(elem)
return super(TabWidget, self).addTab(elem, icon, name) | def function[addTab, parameter[self, elem, icon, name]]:
constant[
Extends QTabWidget.addTab to keep an internal list of added tabs.
:param elem: tab widget
:param icon: tab icon
:param name: tab name
]
call[name[self]._widgets.append, parameter[name[elem]]]
return[call[call[name[super], parameter[name[TabWidget], name[self]]].addTab, parameter[name[elem], name[icon], name[name]]]] | keyword[def] identifier[addTab] ( identifier[self] , identifier[elem] , identifier[icon] , identifier[name] ):
literal[string]
identifier[self] . identifier[_widgets] . identifier[append] ( identifier[elem] )
keyword[return] identifier[super] ( identifier[TabWidget] , identifier[self] ). identifier[addTab] ( identifier[elem] , identifier[icon] , identifier[name] ) | def addTab(self, elem, icon, name):
"""
Extends QTabWidget.addTab to keep an internal list of added tabs.
:param elem: tab widget
:param icon: tab icon
:param name: tab name
"""
self._widgets.append(elem)
return super(TabWidget, self).addTab(elem, icon, name) |
def get_patch_from_uid(self, uid):
"""
Returns the patch with given uid.
:param uid: Patch uid.
:type uid: unicode
:return: Patch.
:rtype: Patch
"""
for name, patch in self:
if patch.uid == uid:
return patch | def function[get_patch_from_uid, parameter[self, uid]]:
constant[
Returns the patch with given uid.
:param uid: Patch uid.
:type uid: unicode
:return: Patch.
:rtype: Patch
]
for taget[tuple[[<ast.Name object at 0x7da1b0810ee0>, <ast.Name object at 0x7da1b0813490>]]] in starred[name[self]] begin[:]
if compare[name[patch].uid equal[==] name[uid]] begin[:]
return[name[patch]] | keyword[def] identifier[get_patch_from_uid] ( identifier[self] , identifier[uid] ):
literal[string]
keyword[for] identifier[name] , identifier[patch] keyword[in] identifier[self] :
keyword[if] identifier[patch] . identifier[uid] == identifier[uid] :
keyword[return] identifier[patch] | def get_patch_from_uid(self, uid):
"""
Returns the patch with given uid.
:param uid: Patch uid.
:type uid: unicode
:return: Patch.
:rtype: Patch
"""
for (name, patch) in self:
if patch.uid == uid:
return patch # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] |
def set(self, indexes, values=None):
"""
Given indexes will set a sub-set of the Series to the values provided. This method will direct to the below
methods based on what types are passed in for the indexes. If the indexes contains values not in the Series
then new rows or columns will be added.
:param indexes: indexes value, list of indexes values, or a list of booleans.
:param values: value or list of values to set. If a list then must be the same length as the indexes parameter.
:return: nothing
"""
if isinstance(indexes, (list, blist)):
self.set_rows(indexes, values)
else:
self.set_cell(indexes, values) | def function[set, parameter[self, indexes, values]]:
constant[
Given indexes will set a sub-set of the Series to the values provided. This method will direct to the below
methods based on what types are passed in for the indexes. If the indexes contains values not in the Series
then new rows or columns will be added.
:param indexes: indexes value, list of indexes values, or a list of booleans.
:param values: value or list of values to set. If a list then must be the same length as the indexes parameter.
:return: nothing
]
if call[name[isinstance], parameter[name[indexes], tuple[[<ast.Name object at 0x7da20c7c9810>, <ast.Name object at 0x7da20c7c9f90>]]]] begin[:]
call[name[self].set_rows, parameter[name[indexes], name[values]]] | keyword[def] identifier[set] ( identifier[self] , identifier[indexes] , identifier[values] = keyword[None] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[indexes] ,( identifier[list] , identifier[blist] )):
identifier[self] . identifier[set_rows] ( identifier[indexes] , identifier[values] )
keyword[else] :
identifier[self] . identifier[set_cell] ( identifier[indexes] , identifier[values] ) | def set(self, indexes, values=None):
"""
Given indexes will set a sub-set of the Series to the values provided. This method will direct to the below
methods based on what types are passed in for the indexes. If the indexes contains values not in the Series
then new rows or columns will be added.
:param indexes: indexes value, list of indexes values, or a list of booleans.
:param values: value or list of values to set. If a list then must be the same length as the indexes parameter.
:return: nothing
"""
if isinstance(indexes, (list, blist)):
self.set_rows(indexes, values) # depends on [control=['if'], data=[]]
else:
self.set_cell(indexes, values) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.