body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def __init__(self, **kwargs): '\n Initialize a new cuckoo search problem.\n ' self.__upper_boundary = kwargs.get('upper_boundary', 4.0) self.__lower_boundary = kwargs.get('lower_boundary', 0.0) self.__alpha = kwargs.pop('alpha', 1) self.__max_generations = kwargs.pop('max_generations', 10) self.__lambda = kwargs.pop('lambda', 1.5) self.__p_a = kwargs.pop('p_a', 0.1) self.__function = kwargs['function'] self.__nests = [Nest(lower_boundary=self.__lower_boundary, upper_boundary=self.__upper_boundary, function=self.__function) for _ in range(kwargs['nests'])] kwargs['iteration_number'] = self.__max_generations self.__visualizer = Visualizer(**kwargs)
7,076,526,501,844,270,000
Initialize a new cuckoo search problem.
swarmlib/cuckoosearch/cuckoo_problem.py
__init__
Geetha-github-cloud/swarmlib
python
def __init__(self, **kwargs): '\n \n ' self.__upper_boundary = kwargs.get('upper_boundary', 4.0) self.__lower_boundary = kwargs.get('lower_boundary', 0.0) self.__alpha = kwargs.pop('alpha', 1) self.__max_generations = kwargs.pop('max_generations', 10) self.__lambda = kwargs.pop('lambda', 1.5) self.__p_a = kwargs.pop('p_a', 0.1) self.__function = kwargs['function'] self.__nests = [Nest(lower_boundary=self.__lower_boundary, upper_boundary=self.__upper_boundary, function=self.__function) for _ in range(kwargs['nests'])] kwargs['iteration_number'] = self.__max_generations self.__visualizer = Visualizer(**kwargs)
def replay(self): '\n Start the problems visualization.\n ' self.__visualizer.replay()
-2,730,549,339,398,622,700
Start the problems visualization.
swarmlib/cuckoosearch/cuckoo_problem.py
replay
Geetha-github-cloud/swarmlib
python
def replay(self): '\n \n ' self.__visualizer.replay()
def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None): 'A better wrapper over request for deferred signing' if self.enableRateLimit: self.throttle() self.lastRestRequestTimestamp = self.milliseconds() request = self.sign(path, api, method, params, headers, body) return self.fetch(request['url'], request['method'], request['headers'], request['body'])
-5,809,463,524,355,869,000
A better wrapper over request for deferred signing
python/ccxt/base/exchange.py
fetch2
tssujt/ccxt
python
def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None): if self.enableRateLimit: self.throttle() self.lastRestRequestTimestamp = self.milliseconds() request = self.sign(path, api, method, params, headers, body) return self.fetch(request['url'], request['method'], request['headers'], request['body'])
def find_broadly_matched_key(self, broad, string): 'A helper method for matching error strings exactly vs broadly' keys = list(broad.keys()) for i in range(0, len(keys)): key = keys[i] if (string.find(key) >= 0): return key return None
1,118,882,194,763,658,900
A helper method for matching error strings exactly vs broadly
python/ccxt/base/exchange.py
find_broadly_matched_key
tssujt/ccxt
python
def find_broadly_matched_key(self, broad, string): keys = list(broad.keys()) for i in range(0, len(keys)): key = keys[i] if (string.find(key) >= 0): return key return None
def fetch(self, url, method='GET', headers=None, body=None): 'Perform a HTTP request and return decoded JSON data' request_headers = self.prepare_request_headers(headers) url = (self.proxy + url) if self.verbose: print('\nRequest:', method, url, request_headers, body) self.logger.debug('%s %s, Request: %s %s', method, url, request_headers, body) if body: body = body.encode() self.session.cookies.clear() response = None http_response = None try: response = self.session.request(method, url, data=body, headers=request_headers, timeout=int((self.timeout / 1000)), proxies=self.proxies) http_response = response.text if self.enableLastHttpResponse: self.last_http_response = http_response headers = response.headers if self.enableLastResponseHeaders: self.last_response_headers = headers if self.verbose: print('\nResponse:', method, url, str(response.status_code), str(headers), http_response) self.logger.debug('%s %s, Response: %s %s %s', method, url, response.status_code, headers, http_response) response.raise_for_status() except Timeout as e: self.raise_error(RequestTimeout, method, url, e) except TooManyRedirects as e: self.raise_error(ExchangeError, url, method, e) except SSLError as e: self.raise_error(ExchangeError, url, method, e) except HTTPError as e: self.handle_errors(response.status_code, response.reason, url, method, headers, http_response) self.handle_rest_errors(e, response.status_code, http_response, url, method) self.raise_error(ExchangeError, url, method, e, http_response) except RequestException as e: error_string = str(e) if (('ECONNRESET' in error_string) or ('Connection aborted.' in error_string)): self.raise_error(NetworkError, url, method, e) else: self.raise_error(ExchangeError, url, method, e) self.handle_errors(response.status_code, response.reason, url, method, None, http_response) return self.handle_rest_response(http_response, url, method, headers, body)
5,832,230,086,645,174,000
Perform a HTTP request and return decoded JSON data
python/ccxt/base/exchange.py
fetch
tssujt/ccxt
python
def fetch(self, url, method='GET', headers=None, body=None): request_headers = self.prepare_request_headers(headers) url = (self.proxy + url) if self.verbose: print('\nRequest:', method, url, request_headers, body) self.logger.debug('%s %s, Request: %s %s', method, url, request_headers, body) if body: body = body.encode() self.session.cookies.clear() response = None http_response = None try: response = self.session.request(method, url, data=body, headers=request_headers, timeout=int((self.timeout / 1000)), proxies=self.proxies) http_response = response.text if self.enableLastHttpResponse: self.last_http_response = http_response headers = response.headers if self.enableLastResponseHeaders: self.last_response_headers = headers if self.verbose: print('\nResponse:', method, url, str(response.status_code), str(headers), http_response) self.logger.debug('%s %s, Response: %s %s %s', method, url, response.status_code, headers, http_response) response.raise_for_status() except Timeout as e: self.raise_error(RequestTimeout, method, url, e) except TooManyRedirects as e: self.raise_error(ExchangeError, url, method, e) except SSLError as e: self.raise_error(ExchangeError, url, method, e) except HTTPError as e: self.handle_errors(response.status_code, response.reason, url, method, headers, http_response) self.handle_rest_errors(e, response.status_code, http_response, url, method) self.raise_error(ExchangeError, url, method, e, http_response) except RequestException as e: error_string = str(e) if (('ECONNRESET' in error_string) or ('Connection aborted.' in error_string)): self.raise_error(NetworkError, url, method, e) else: self.raise_error(ExchangeError, url, method, e) self.handle_errors(response.status_code, response.reason, url, method, None, http_response) return self.handle_rest_response(http_response, url, method, headers, body)
@staticmethod def safe_either(method, dictionary, key1, key2, default_value=None): 'A helper-wrapper for the safe_value_2() family.' value = method(dictionary, key1) return (value if (value is not None) else method(dictionary, key2, default_value))
-2,371,737,021,285,098,500
A helper-wrapper for the safe_value_2() family.
python/ccxt/base/exchange.py
safe_either
tssujt/ccxt
python
@staticmethod def safe_either(method, dictionary, key1, key2, default_value=None): value = method(dictionary, key1) return (value if (value is not None) else method(dictionary, key2, default_value))
@staticmethod def truncate(num, precision=0): 'Deprecated, use decimal_to_precision instead' if (precision > 0): decimal_precision = math.pow(10, precision) return (math.trunc((num * decimal_precision)) / decimal_precision) return int(Exchange.truncate_to_string(num, precision))
5,881,430,384,757,220,000
Deprecated, use decimal_to_precision instead
python/ccxt/base/exchange.py
truncate
tssujt/ccxt
python
@staticmethod def truncate(num, precision=0): if (precision > 0): decimal_precision = math.pow(10, precision) return (math.trunc((num * decimal_precision)) / decimal_precision) return int(Exchange.truncate_to_string(num, precision))
@staticmethod def truncate_to_string(num, precision=0): 'Deprecated, todo: remove references from subclasses' if (precision > 0): parts = ('{0:.%df}' % precision).format(Decimal(num)).split('.') decimal_digits = parts[1][:precision].rstrip('0') decimal_digits = (decimal_digits if len(decimal_digits) else '0') return ((parts[0] + '.') + decimal_digits) return ('%d' % num)
-3,156,627,279,850,857,000
Deprecated, todo: remove references from subclasses
python/ccxt/base/exchange.py
truncate_to_string
tssujt/ccxt
python
@staticmethod def truncate_to_string(num, precision=0): if (precision > 0): parts = ('{0:.%df}' % precision).format(Decimal(num)).split('.') decimal_digits = parts[1][:precision].rstrip('0') decimal_digits = (decimal_digits if len(decimal_digits) else '0') return ((parts[0] + '.') + decimal_digits) return ('%d' % num)
def check_address(self, address): 'Checks an address is not the same character repeated or an empty sequence' if (address is None): self.raise_error(InvalidAddress, details='address is None') if (all(((letter == address[0]) for letter in address)) or (len(address) < self.minFundingAddressLength) or (' ' in address)): self.raise_error(InvalidAddress, details=(((('address is invalid or has less than ' + str(self.minFundingAddressLength)) + ' characters: "') + str(address)) + '"')) return address
-2,909,175,738,945,414,700
Checks an address is not the same character repeated or an empty sequence
python/ccxt/base/exchange.py
check_address
tssujt/ccxt
python
def check_address(self, address): if (address is None): self.raise_error(InvalidAddress, details='address is None') if (all(((letter == address[0]) for letter in address)) or (len(address) < self.minFundingAddressLength) or (' ' in address)): self.raise_error(InvalidAddress, details=(((('address is invalid or has less than ' + str(self.minFundingAddressLength)) + ' characters: "') + str(address)) + '"')) return address
def __init__(self, file): ' Init audio stream ' self.file = file
-1,504,669,398,592,276,500
Init audio stream
AudioFile.py
__init__
CoryXie/SpeechShadowing
python
def __init__(self, file): ' ' self.file = file
def play(self): ' Play entire file ' utils.displayInfoMessage('Playing Audio') pathparts = self.file.rsplit('.', 1) fileformat = pathparts[1] song = AudioSegment.from_file(self.file, format=fileformat) play(song) utils.displayInfoMessage('') utils.displayErrorMessage('')
-74,452,650,981,497,420
Play entire file
AudioFile.py
play
CoryXie/SpeechShadowing
python
def play(self): ' ' utils.displayInfoMessage('Playing Audio') pathparts = self.file.rsplit('.', 1) fileformat = pathparts[1] song = AudioSegment.from_file(self.file, format=fileformat) play(song) utils.displayInfoMessage() utils.displayErrorMessage()
def send_commands(mqtt_client, command_topic, commands): 'Send a sequence of commands.' backlog_topic = (command_topic + COMMAND_BACKLOG) backlog = ';'.join([('NoDelay;%s %s' % command) for command in commands]) mqtt_client.publish(backlog_topic, backlog)
2,324,701,565,265,341,000
Send a sequence of commands.
hatasmota/mqtt.py
send_commands
ascillato/hatasmota
python
def send_commands(mqtt_client, command_topic, commands): backlog_topic = (command_topic + COMMAND_BACKLOG) backlog = ';'.join([('NoDelay;%s %s' % command) for command in commands]) mqtt_client.publish(backlog_topic, backlog)
def cancel(self): 'Cancel the timer.' self._task.cancel()
4,089,125,113,064,289,000
Cancel the timer.
hatasmota/mqtt.py
cancel
ascillato/hatasmota
python
def cancel(self): self._task.cancel()
def __init__(self, publish, subscribe, unsubscribe): 'Initialize.' self._pending_messages = {} self._publish = publish self._subscribe = subscribe self._unsubscribe = unsubscribe
-6,452,863,049,671,550,000
Initialize.
hatasmota/mqtt.py
__init__
ascillato/hatasmota
python
def __init__(self, publish, subscribe, unsubscribe): self._pending_messages = {} self._publish = publish self._subscribe = subscribe self._unsubscribe = unsubscribe
def publish(self, *args, **kwds): 'Publish a message.' return self._publish(*args, **kwds)
-3,842,568,635,347,020,300
Publish a message.
hatasmota/mqtt.py
publish
ascillato/hatasmota
python
def publish(self, *args, **kwds): return self._publish(*args, **kwds)
def publish_debounced(self, topic, payload, qos=None, retain=None): 'Publish a message, with debounce.' msg = Message(topic, payload, qos, retain) def publish_callback(): _LOGGER.debug('publish_debounced: publishing %s', msg) self._pending_messages.pop(msg) self.publish(msg.topic, msg.payload, qos=msg.qos, retain=msg.retain) if (msg in self._pending_messages): timer = self._pending_messages.pop(msg) timer.cancel() timer = Timer(DEBOUNCE_TIMEOUT, publish_callback) self._pending_messages[msg] = timer
7,393,002,072,308,514,000
Publish a message, with debounce.
hatasmota/mqtt.py
publish_debounced
ascillato/hatasmota
python
def publish_debounced(self, topic, payload, qos=None, retain=None): msg = Message(topic, payload, qos, retain) def publish_callback(): _LOGGER.debug('publish_debounced: publishing %s', msg) self._pending_messages.pop(msg) self.publish(msg.topic, msg.payload, qos=msg.qos, retain=msg.retain) if (msg in self._pending_messages): timer = self._pending_messages.pop(msg) timer.cancel() timer = Timer(DEBOUNCE_TIMEOUT, publish_callback) self._pending_messages[msg] = timer
async def subscribe(self, sub_state, topics): 'Subscribe to topics.' return (await self._subscribe(sub_state, topics))
1,127,118,368,039,434,400
Subscribe to topics.
hatasmota/mqtt.py
subscribe
ascillato/hatasmota
python
async def subscribe(self, sub_state, topics): return (await self._subscribe(sub_state, topics))
async def unsubscribe(self, sub_state): 'Unsubscribe from topics.' return (await self._unsubscribe(sub_state))
-3,378,789,737,602,925,600
Unsubscribe from topics.
hatasmota/mqtt.py
unsubscribe
ascillato/hatasmota
python
async def unsubscribe(self, sub_state): return (await self._unsubscribe(sub_state))
def _reward(self, i, rewards, reward=1): '\n Compute the reward to be given upon success\n ' for (j, a) in enumerate(self.agents): if ((a.index == i) or (a.index == 0)): rewards[j] += reward if self.zero_sum: if ((a.index != i) or (a.index == 0)): rewards[j] -= reward
-7,247,224,356,617,500,000
Compute the reward to be given upon success
gym_multigrid/envs/collect_game.py
_reward
ArnaudFickinger/gym-multigrid
python
def _reward(self, i, rewards, reward=1): '\n \n ' for (j, a) in enumerate(self.agents): if ((a.index == i) or (a.index == 0)): rewards[j] += reward if self.zero_sum: if ((a.index != i) or (a.index == 0)): rewards[j] -= reward
@classmethod def host(cls) -> str: ' get the host of the url, so we can use the correct scraper ' raise NotImplementedError('This should be implemented.')
1,255,193,424,983,882,800
get the host of the url, so we can use the correct scraper
recipe_scrapers/_abstract.py
host
AlexRogalskiy/recipe-scrapers
python
@classmethod def host(cls) -> str: ' ' raise NotImplementedError('This should be implemented.')
def total_time(self): ' total time it takes to preparate the recipe in minutes ' raise NotImplementedError('This should be implemented.')
-7,147,276,316,743,142,000
total time it takes to preparate the recipe in minutes
recipe_scrapers/_abstract.py
total_time
AlexRogalskiy/recipe-scrapers
python
def total_time(self): ' ' raise NotImplementedError('This should be implemented.')
def yields(self): ' The number of servings or items in the recipe ' raise NotImplementedError('This should be implemented.')
-5,047,820,617,410,046,000
The number of servings or items in the recipe
recipe_scrapers/_abstract.py
yields
AlexRogalskiy/recipe-scrapers
python
def yields(self): ' ' raise NotImplementedError('This should be implemented.')
def language(self): '\n Human language the recipe is written in.\n\n May be overridden by individual scrapers.\n ' candidate_languages = OrderedDict() html = self.soup.find('html', {'lang': True}) candidate_languages[html.get('lang')] = True meta_language = (self.soup.find('meta', {'http-equiv': (lambda x: (x and (x.lower() == 'content-language'))), 'content': True}) if settings.META_HTTP_EQUIV else None) if meta_language: language = meta_language.get('content').split(',', 1)[0] if language: candidate_languages[language] = True if (len(candidate_languages) > 1): candidate_languages.pop('en', None) return candidate_languages.popitem(last=False)[0]
-5,964,747,132,220,465,000
Human language the recipe is written in. May be overridden by individual scrapers.
recipe_scrapers/_abstract.py
language
AlexRogalskiy/recipe-scrapers
python
def language(self): '\n Human language the recipe is written in.\n\n May be overridden by individual scrapers.\n ' candidate_languages = OrderedDict() html = self.soup.find('html', {'lang': True}) candidate_languages[html.get('lang')] = True meta_language = (self.soup.find('meta', {'http-equiv': (lambda x: (x and (x.lower() == 'content-language'))), 'content': True}) if settings.META_HTTP_EQUIV else None) if meta_language: language = meta_language.get('content').split(',', 1)[0] if language: candidate_languages[language] = True if (len(candidate_languages) > 1): candidate_languages.pop('en', None) return candidate_languages.popitem(last=False)[0]
def sigmoid_cross_entropy_with_logits(logits, targets, name=None): 'Computes sigmoid cross entropy given `logits`.\n\n Measures the probability error in discrete classification tasks in which each\n class is independent and not mutually exclusive. For instance, one could\n perform multilabel classification where a picture can contain both an elephant\n and a dog at the same time.\n\n For brevity, let `x = logits`, `z = targets`. The logistic loss is\n\n z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))\n = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))\n = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))\n = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))\n = (1 - z) * x + log(1 + exp(-x))\n = x - x * z + log(1 + exp(-x))\n\n For x < 0, to avoid overflow in exp(-x), we reformulate the above\n\n x - x * z + log(1 + exp(-x))\n = log(exp(x)) - x * z + log(1 + exp(-x))\n = - x * z + log(1 + exp(x))\n\n Hence, to ensure stability and avoid overflow, the implementation uses this\n equivalent formulation\n\n max(x, 0) - x * z + log(1 + exp(-abs(x)))\n\n `logits` and `targets` must have the same type and shape.\n\n Args:\n logits: A `Tensor` of type `float32` or `float64`.\n targets: A `Tensor` of the same type and shape as `logits`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of the same shape as `logits` with the componentwise\n logistic losses.\n\n Raises:\n ValueError: If `logits` and `targets` do not have the same shape.\n ' with ops.op_scope([logits, targets], name, 'logistic_loss') as name: logits = ops.convert_to_tensor(logits, name='logits') targets = ops.convert_to_tensor(targets, name='targets') try: targets.get_shape().merge_with(logits.get_shape()) except ValueError: raise ValueError(('logits and targets must have the same shape (%s vs %s)' % (logits.get_shape(), targets.get_shape()))) zeros = array_ops.zeros_like(logits, dtype=logits.dtype) cond = (logits >= zeros) relu_logits = math_ops.select(cond, logits, zeros) neg_abs_logits = math_ops.select(cond, (- logits), logits) return math_ops.add((relu_logits - (logits * targets)), math_ops.log((1 + math_ops.exp(neg_abs_logits))), name=name)
-2,597,133,487,863,943,000
Computes sigmoid cross entropy given `logits`. Measures the probability error in discrete classification tasks in which each class is independent and not mutually exclusive. For instance, one could perform multilabel classification where a picture can contain both an elephant and a dog at the same time. For brevity, let `x = logits`, `z = targets`. The logistic loss is z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x)) = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x))) = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x))) = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x)) = (1 - z) * x + log(1 + exp(-x)) = x - x * z + log(1 + exp(-x)) For x < 0, to avoid overflow in exp(-x), we reformulate the above x - x * z + log(1 + exp(-x)) = log(exp(x)) - x * z + log(1 + exp(-x)) = - x * z + log(1 + exp(x)) Hence, to ensure stability and avoid overflow, the implementation uses this equivalent formulation max(x, 0) - x * z + log(1 + exp(-abs(x))) `logits` and `targets` must have the same type and shape. Args: logits: A `Tensor` of type `float32` or `float64`. targets: A `Tensor` of the same type and shape as `logits`. name: A name for the operation (optional). Returns: A `Tensor` of the same shape as `logits` with the componentwise logistic losses. Raises: ValueError: If `logits` and `targets` do not have the same shape.
tensorflow/python/ops/nn.py
sigmoid_cross_entropy_with_logits
AdityaPai2398/tensorflow
python
def sigmoid_cross_entropy_with_logits(logits, targets, name=None): 'Computes sigmoid cross entropy given `logits`.\n\n Measures the probability error in discrete classification tasks in which each\n class is independent and not mutually exclusive. For instance, one could\n perform multilabel classification where a picture can contain both an elephant\n and a dog at the same time.\n\n For brevity, let `x = logits`, `z = targets`. The logistic loss is\n\n z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))\n = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))\n = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))\n = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))\n = (1 - z) * x + log(1 + exp(-x))\n = x - x * z + log(1 + exp(-x))\n\n For x < 0, to avoid overflow in exp(-x), we reformulate the above\n\n x - x * z + log(1 + exp(-x))\n = log(exp(x)) - x * z + log(1 + exp(-x))\n = - x * z + log(1 + exp(x))\n\n Hence, to ensure stability and avoid overflow, the implementation uses this\n equivalent formulation\n\n max(x, 0) - x * z + log(1 + exp(-abs(x)))\n\n `logits` and `targets` must have the same type and shape.\n\n Args:\n logits: A `Tensor` of type `float32` or `float64`.\n targets: A `Tensor` of the same type and shape as `logits`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of the same shape as `logits` with the componentwise\n logistic losses.\n\n Raises:\n ValueError: If `logits` and `targets` do not have the same shape.\n ' with ops.op_scope([logits, targets], name, 'logistic_loss') as name: logits = ops.convert_to_tensor(logits, name='logits') targets = ops.convert_to_tensor(targets, name='targets') try: targets.get_shape().merge_with(logits.get_shape()) except ValueError: raise ValueError(('logits and targets must have the same shape (%s vs %s)' % (logits.get_shape(), targets.get_shape()))) zeros = array_ops.zeros_like(logits, dtype=logits.dtype) cond = (logits >= zeros) relu_logits = math_ops.select(cond, logits, zeros) neg_abs_logits = math_ops.select(cond, (- logits), logits) return math_ops.add((relu_logits - (logits * targets)), math_ops.log((1 + math_ops.exp(neg_abs_logits))), name=name)
def weighted_cross_entropy_with_logits(logits, targets, pos_weight, name=None): 'Computes a weighted cross entropy.\n\n This is like `sigmoid_cross_entropy_with_logits()` except that `pos_weight`,\n allows one to trade off recall and precision by up- or down-weighting the\n cost of a positive error relative to a negative error.\n\n The usual cross-entropy cost is defined as:\n\n targets * -log(sigmoid(logits)) + (1 - targets) * -log(1 - sigmoid(logits))\n\n The argument `pos_weight` is used as a multiplier for the positive targets:\n\n targets * -log(sigmoid(logits)) * pos_weight +\n (1 - targets) * -log(1 - sigmoid(logits))\n\n For brevity, let `x = logits`, `z = targets`, `q = pos_weight`.\n The loss is:\n\n qz * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))\n = qz * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))\n = qz * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))\n = qz * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))\n = (1 - z) * x + (qz + 1 - z) * log(1 + exp(-x))\n = (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(-x))\n\n Setting `l = (1 + (q - 1) * z)`, to ensure stability and avoid overflow,\n the implementation uses\n\n (1 - z) * x + l * (log(1 + exp(-abs(x))) + max(-x, 0))\n\n `logits` and `targets` must have the same type and shape.\n\n Args:\n logits: A `Tensor` of type `float32` or `float64`.\n targets: A `Tensor` of the same type and shape as `logits`.\n pos_weight: A coefficient to use on the positive examples.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of the same shape as `logits` with the componentwise\n weightedlogistic losses.\n\n Raises:\n ValueError: If `logits` and `targets` do not have the same shape.\n ' with ops.op_scope([logits, targets], name, 'logistic_loss') as name: logits = ops.convert_to_tensor(logits, name='logits') targets = ops.convert_to_tensor(targets, name='targets') try: targets.get_shape().merge_with(logits.get_shape()) except ValueError: raise ValueError(('logits and targets must have the same shape (%s vs %s)' % (logits.get_shape(), targets.get_shape()))) log_weight = (1 + ((pos_weight - 1) * targets)) return math_ops.add(((1 - targets) * logits), (log_weight * (math_ops.log((1 + math_ops.exp((- math_ops.abs(logits))))) + nn_ops.relu((- logits)))), name=name)
8,742,524,507,999,195,000
Computes a weighted cross entropy. This is like `sigmoid_cross_entropy_with_logits()` except that `pos_weight`, allows one to trade off recall and precision by up- or down-weighting the cost of a positive error relative to a negative error. The usual cross-entropy cost is defined as: targets * -log(sigmoid(logits)) + (1 - targets) * -log(1 - sigmoid(logits)) The argument `pos_weight` is used as a multiplier for the positive targets: targets * -log(sigmoid(logits)) * pos_weight + (1 - targets) * -log(1 - sigmoid(logits)) For brevity, let `x = logits`, `z = targets`, `q = pos_weight`. The loss is: qz * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x)) = qz * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x))) = qz * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x))) = qz * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x)) = (1 - z) * x + (qz + 1 - z) * log(1 + exp(-x)) = (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(-x)) Setting `l = (1 + (q - 1) * z)`, to ensure stability and avoid overflow, the implementation uses (1 - z) * x + l * (log(1 + exp(-abs(x))) + max(-x, 0)) `logits` and `targets` must have the same type and shape. Args: logits: A `Tensor` of type `float32` or `float64`. targets: A `Tensor` of the same type and shape as `logits`. pos_weight: A coefficient to use on the positive examples. name: A name for the operation (optional). Returns: A `Tensor` of the same shape as `logits` with the componentwise weightedlogistic losses. Raises: ValueError: If `logits` and `targets` do not have the same shape.
tensorflow/python/ops/nn.py
weighted_cross_entropy_with_logits
AdityaPai2398/tensorflow
python
def weighted_cross_entropy_with_logits(logits, targets, pos_weight, name=None): 'Computes a weighted cross entropy.\n\n This is like `sigmoid_cross_entropy_with_logits()` except that `pos_weight`,\n allows one to trade off recall and precision by up- or down-weighting the\n cost of a positive error relative to a negative error.\n\n The usual cross-entropy cost is defined as:\n\n targets * -log(sigmoid(logits)) + (1 - targets) * -log(1 - sigmoid(logits))\n\n The argument `pos_weight` is used as a multiplier for the positive targets:\n\n targets * -log(sigmoid(logits)) * pos_weight +\n (1 - targets) * -log(1 - sigmoid(logits))\n\n For brevity, let `x = logits`, `z = targets`, `q = pos_weight`.\n The loss is:\n\n qz * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))\n = qz * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))\n = qz * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))\n = qz * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))\n = (1 - z) * x + (qz + 1 - z) * log(1 + exp(-x))\n = (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(-x))\n\n Setting `l = (1 + (q - 1) * z)`, to ensure stability and avoid overflow,\n the implementation uses\n\n (1 - z) * x + l * (log(1 + exp(-abs(x))) + max(-x, 0))\n\n `logits` and `targets` must have the same type and shape.\n\n Args:\n logits: A `Tensor` of type `float32` or `float64`.\n targets: A `Tensor` of the same type and shape as `logits`.\n pos_weight: A coefficient to use on the positive examples.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of the same shape as `logits` with the componentwise\n weightedlogistic losses.\n\n Raises:\n ValueError: If `logits` and `targets` do not have the same shape.\n ' with ops.op_scope([logits, targets], name, 'logistic_loss') as name: logits = ops.convert_to_tensor(logits, name='logits') targets = ops.convert_to_tensor(targets, name='targets') try: targets.get_shape().merge_with(logits.get_shape()) except ValueError: raise ValueError(('logits and targets must have the same shape (%s vs %s)' % (logits.get_shape(), targets.get_shape()))) log_weight = (1 + ((pos_weight - 1) * targets)) return math_ops.add(((1 - targets) * logits), (log_weight * (math_ops.log((1 + math_ops.exp((- math_ops.abs(logits))))) + nn_ops.relu((- logits)))), name=name)
def relu_layer(x, weights, biases, name=None): 'Computes Relu(x * weight + biases).\n\n Args:\n x: a 2D tensor. Dimensions typically: batch, in_units\n weights: a 2D tensor. Dimensions typically: in_units, out_units\n biases: a 1D tensor. Dimensions: out_units\n name: A name for the operation (optional). If not specified\n "nn_relu_layer" is used.\n\n Returns:\n A 2-D Tensor computing relu(matmul(x, weights) + biases).\n Dimensions typically: batch, out_units.\n ' with ops.op_scope([x, weights, biases], name, 'relu_layer') as name: x = ops.convert_to_tensor(x, name='x') weights = ops.convert_to_tensor(weights, name='weights') biases = ops.convert_to_tensor(biases, name='biases') xw_plus_b = nn_ops.bias_add(math_ops.matmul(x, weights), biases) return nn_ops.relu(xw_plus_b, name=name)
-4,549,435,547,551,919,000
Computes Relu(x * weight + biases). Args: x: a 2D tensor. Dimensions typically: batch, in_units weights: a 2D tensor. Dimensions typically: in_units, out_units biases: a 1D tensor. Dimensions: out_units name: A name for the operation (optional). If not specified "nn_relu_layer" is used. Returns: A 2-D Tensor computing relu(matmul(x, weights) + biases). Dimensions typically: batch, out_units.
tensorflow/python/ops/nn.py
relu_layer
AdityaPai2398/tensorflow
python
def relu_layer(x, weights, biases, name=None): 'Computes Relu(x * weight + biases).\n\n Args:\n x: a 2D tensor. Dimensions typically: batch, in_units\n weights: a 2D tensor. Dimensions typically: in_units, out_units\n biases: a 1D tensor. Dimensions: out_units\n name: A name for the operation (optional). If not specified\n "nn_relu_layer" is used.\n\n Returns:\n A 2-D Tensor computing relu(matmul(x, weights) + biases).\n Dimensions typically: batch, out_units.\n ' with ops.op_scope([x, weights, biases], name, 'relu_layer') as name: x = ops.convert_to_tensor(x, name='x') weights = ops.convert_to_tensor(weights, name='weights') biases = ops.convert_to_tensor(biases, name='biases') xw_plus_b = nn_ops.bias_add(math_ops.matmul(x, weights), biases) return nn_ops.relu(xw_plus_b, name=name)
def l2_normalize(x, dim, epsilon=1e-12, name=None): 'Normalizes along dimension `dim` using an L2 norm.\n\n For a 1-D tensor with `dim = 0`, computes\n\n output = x / sqrt(max(sum(x**2), epsilon))\n\n For `x` with more dimensions, independently normalizes each 1-D slice along\n dimension `dim`.\n\n Args:\n x: A `Tensor`.\n dim: Dimension along which to normalize.\n epsilon: A lower bound value for the norm. Will use `sqrt(epsilon)` as the\n divisor if `norm < sqrt(epsilon)`.\n name: A name for this operation (optional).\n\n Returns:\n A `Tensor` with the same shape as `x`.\n ' with ops.op_scope([x], name, 'l2_normalize') as name: x = ops.convert_to_tensor(x, name='x') square_sum = math_ops.reduce_sum(math_ops.square(x), [dim], keep_dims=True) x_inv_norm = math_ops.rsqrt(math_ops.maximum(square_sum, epsilon)) return math_ops.mul(x, x_inv_norm, name=name)
-620,941,079,581,741,000
Normalizes along dimension `dim` using an L2 norm. For a 1-D tensor with `dim = 0`, computes output = x / sqrt(max(sum(x**2), epsilon)) For `x` with more dimensions, independently normalizes each 1-D slice along dimension `dim`. Args: x: A `Tensor`. dim: Dimension along which to normalize. epsilon: A lower bound value for the norm. Will use `sqrt(epsilon)` as the divisor if `norm < sqrt(epsilon)`. name: A name for this operation (optional). Returns: A `Tensor` with the same shape as `x`.
tensorflow/python/ops/nn.py
l2_normalize
AdityaPai2398/tensorflow
python
def l2_normalize(x, dim, epsilon=1e-12, name=None): 'Normalizes along dimension `dim` using an L2 norm.\n\n For a 1-D tensor with `dim = 0`, computes\n\n output = x / sqrt(max(sum(x**2), epsilon))\n\n For `x` with more dimensions, independently normalizes each 1-D slice along\n dimension `dim`.\n\n Args:\n x: A `Tensor`.\n dim: Dimension along which to normalize.\n epsilon: A lower bound value for the norm. Will use `sqrt(epsilon)` as the\n divisor if `norm < sqrt(epsilon)`.\n name: A name for this operation (optional).\n\n Returns:\n A `Tensor` with the same shape as `x`.\n ' with ops.op_scope([x], name, 'l2_normalize') as name: x = ops.convert_to_tensor(x, name='x') square_sum = math_ops.reduce_sum(math_ops.square(x), [dim], keep_dims=True) x_inv_norm = math_ops.rsqrt(math_ops.maximum(square_sum, epsilon)) return math_ops.mul(x, x_inv_norm, name=name)
def zero_fraction(value, name=None): "Returns the fraction of zeros in `value`.\n\n If `value` is empty, the result is `nan`.\n\n This is useful in summaries to measure and report sparsity. For example,\n\n z = tf.Relu(...)\n summ = tf.scalar_summary('sparsity', tf.nn.zero_fraction(z))\n\n Args:\n value: A tensor of numeric type.\n name: A name for the operation (optional).\n\n Returns:\n The fraction of zeros in `value`, with type `float32`.\n " with ops.op_scope([value], name, 'zero_fraction'): value = ops.convert_to_tensor(value, name='value') zero = constant_op.constant(0, dtype=value.dtype, name='zero') return math_ops.reduce_mean(math_ops.cast(math_ops.equal(value, zero), dtypes.float32))
8,074,424,809,428,103,000
Returns the fraction of zeros in `value`. If `value` is empty, the result is `nan`. This is useful in summaries to measure and report sparsity. For example, z = tf.Relu(...) summ = tf.scalar_summary('sparsity', tf.nn.zero_fraction(z)) Args: value: A tensor of numeric type. name: A name for the operation (optional). Returns: The fraction of zeros in `value`, with type `float32`.
tensorflow/python/ops/nn.py
zero_fraction
AdityaPai2398/tensorflow
python
def zero_fraction(value, name=None): "Returns the fraction of zeros in `value`.\n\n If `value` is empty, the result is `nan`.\n\n This is useful in summaries to measure and report sparsity. For example,\n\n z = tf.Relu(...)\n summ = tf.scalar_summary('sparsity', tf.nn.zero_fraction(z))\n\n Args:\n value: A tensor of numeric type.\n name: A name for the operation (optional).\n\n Returns:\n The fraction of zeros in `value`, with type `float32`.\n " with ops.op_scope([value], name, 'zero_fraction'): value = ops.convert_to_tensor(value, name='value') zero = constant_op.constant(0, dtype=value.dtype, name='zero') return math_ops.reduce_mean(math_ops.cast(math_ops.equal(value, zero), dtypes.float32))
def depthwise_conv2d(input, filter, strides, padding, name=None): "Depthwise 2-D convolution.\n\n Given an input tensor of shape `[batch, in_height, in_width, in_channels]`\n and a filter tensor of shape\n `[filter_height, filter_width, in_channels, channel_multiplier]`\n containing `in_channels` convolutional filters of depth 1, `depthwise_conv2d`\n applies a different filter to each input channel (expanding from 1 channel\n to `channel_multiplier` channels for each), then concatenates the results\n together. The output has `in_channels * channel_multiplier` channels.\n\n In detail,\n\n output[b, i, j, k * channel_multiplier + q] =\n sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] *\n filter[di, dj, k, q]\n\n Must have `strides[0] = strides[3] = 1`. For the most common case of the\n same horizontal and vertical strides, `strides = [1, stride, stride, 1]`.\n\n Args:\n input: 4-D with shape `[batch, in_height, in_width, in_channels]`.\n filter: 4-D with shape\n `[filter_height, filter_width, in_channels, channel_multiplier]`.\n strides: 1-D of size 4. The stride of the sliding window for each\n dimension of `input`.\n padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.\n See the [comment here](https://www.tensorflow.org/api_docs/python/nn.html#convolution)\n name: A name for this operation (optional).\n\n Returns:\n A 4-D `Tensor` of shape\n `[batch, out_height, out_width, in_channels * channel_multiplier].`\n " with ops.op_scope([input, filter], name, 'depthwise') as name: input = ops.convert_to_tensor(input, name='tensor_in') filter = ops.convert_to_tensor(filter, name='filter_in') if (filter.get_shape().ndims is not None): assert (len(filter.get_shape()) == 4) in_channels = filter.get_shape()[2] if (input.get_shape().ndims is not None): assert (len(input.get_shape()) == 4) assert (input.get_shape()[3] == in_channels), ('Mismatched input depth %d and number of depthwise filters %d.' % (input.get_shape()[3].value, in_channels)) else: assert (input.get_shape().ndims is not None), 'Either tensor must provide static shape information.' assert (input.get_shape().ndims == 4) in_channels = input.get_shape()[3] if (in_channels == 1): return nn_ops.conv2d(input, filter, strides, padding, name=name) else: return nn_ops.depthwise_conv2d_native(input, filter, strides, padding, name=name)
-9,087,105,612,821,949,000
Depthwise 2-D convolution. Given an input tensor of shape `[batch, in_height, in_width, in_channels]` and a filter tensor of shape `[filter_height, filter_width, in_channels, channel_multiplier]` containing `in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies a different filter to each input channel (expanding from 1 channel to `channel_multiplier` channels for each), then concatenates the results together. The output has `in_channels * channel_multiplier` channels. In detail, output[b, i, j, k * channel_multiplier + q] = sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] * filter[di, dj, k, q] Must have `strides[0] = strides[3] = 1`. For the most common case of the same horizontal and vertical strides, `strides = [1, stride, stride, 1]`. Args: input: 4-D with shape `[batch, in_height, in_width, in_channels]`. filter: 4-D with shape `[filter_height, filter_width, in_channels, channel_multiplier]`. strides: 1-D of size 4. The stride of the sliding window for each dimension of `input`. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the [comment here](https://www.tensorflow.org/api_docs/python/nn.html#convolution) name: A name for this operation (optional). Returns: A 4-D `Tensor` of shape `[batch, out_height, out_width, in_channels * channel_multiplier].`
tensorflow/python/ops/nn.py
depthwise_conv2d
AdityaPai2398/tensorflow
python
def depthwise_conv2d(input, filter, strides, padding, name=None): "Depthwise 2-D convolution.\n\n Given an input tensor of shape `[batch, in_height, in_width, in_channels]`\n and a filter tensor of shape\n `[filter_height, filter_width, in_channels, channel_multiplier]`\n containing `in_channels` convolutional filters of depth 1, `depthwise_conv2d`\n applies a different filter to each input channel (expanding from 1 channel\n to `channel_multiplier` channels for each), then concatenates the results\n together. The output has `in_channels * channel_multiplier` channels.\n\n In detail,\n\n output[b, i, j, k * channel_multiplier + q] =\n sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] *\n filter[di, dj, k, q]\n\n Must have `strides[0] = strides[3] = 1`. For the most common case of the\n same horizontal and vertical strides, `strides = [1, stride, stride, 1]`.\n\n Args:\n input: 4-D with shape `[batch, in_height, in_width, in_channels]`.\n filter: 4-D with shape\n `[filter_height, filter_width, in_channels, channel_multiplier]`.\n strides: 1-D of size 4. The stride of the sliding window for each\n dimension of `input`.\n padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.\n See the [comment here](https://www.tensorflow.org/api_docs/python/nn.html#convolution)\n name: A name for this operation (optional).\n\n Returns:\n A 4-D `Tensor` of shape\n `[batch, out_height, out_width, in_channels * channel_multiplier].`\n " with ops.op_scope([input, filter], name, 'depthwise') as name: input = ops.convert_to_tensor(input, name='tensor_in') filter = ops.convert_to_tensor(filter, name='filter_in') if (filter.get_shape().ndims is not None): assert (len(filter.get_shape()) == 4) in_channels = filter.get_shape()[2] if (input.get_shape().ndims is not None): assert (len(input.get_shape()) == 4) assert (input.get_shape()[3] == in_channels), ('Mismatched input depth %d and number of depthwise filters %d.' % (input.get_shape()[3].value, in_channels)) else: assert (input.get_shape().ndims is not None), 'Either tensor must provide static shape information.' assert (input.get_shape().ndims == 4) in_channels = input.get_shape()[3] if (in_channels == 1): return nn_ops.conv2d(input, filter, strides, padding, name=name) else: return nn_ops.depthwise_conv2d_native(input, filter, strides, padding, name=name)
def separable_conv2d(input, depthwise_filter, pointwise_filter, strides, padding, name=None): "2-D convolution with separable filters.\n\n Performs a depthwise convolution that acts separately on channels followed by\n a pointwise convolution that mixes channels. Note that this is separability\n between dimensions `[1, 2]` and `3`, not spatial separability between\n dimensions `1` and `2`.\n\n In detail,\n\n output[b, i, j, k] = sum_{di, dj, q, r]\n input[b, strides[1] * i + di, strides[2] * j + dj, q] *\n depthwise_filter[di, dj, q, r] *\n pointwise_filter[0, 0, q * channel_multiplier + r, k]\n\n `strides` controls the strides for the depthwise convolution only, since\n the pointwise convolution has implicit strides of `[1, 1, 1, 1]`. Must have\n `strides[0] = strides[3] = 1`. For the most common case of the same\n horizontal and vertical strides, `strides = [1, stride, stride, 1]`.\n\n Args:\n input: 4-D `Tensor` with shape `[batch, in_height, in_width, in_channels]`.\n depthwise_filter: 4-D `Tensor` with shape\n `[filter_height, filter_width, in_channels, channel_multiplier]`.\n Contains `in_channels` convolutional filters of depth 1.\n pointwise_filter: 4-D `Tensor` with shape\n `[1, 1, channel_multiplier * in_channels, out_channels]`. Pointwise\n filter to mix channels after `depthwise_filter` has convolved spatially.\n strides: 1-D of size 4. The strides for the depthwise convolution for\n each dimension of `input`.\n padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.\n See the [comment here](https://www.tensorflow.org/api_docs/python/nn.html#convolution)\n name: A name for this operation (optional).\n\n Returns:\n A 4-D `Tensor` of shape `[batch, out_height, out_width, out_channels]`.\n\n Raises:\n ValueError: If channel_multiplier * in_channels > out_channels,\n which means that the separable convolution is overparameterized.\n " with ops.op_scope([input, depthwise_filter, pointwise_filter], name, 'separable_conv2d') as name: input = ops.convert_to_tensor(input, name='tensor_in') depthwise_filter = ops.convert_to_tensor(depthwise_filter, name='depthwise_filter') pointwise_filter = ops.convert_to_tensor(pointwise_filter, name='pointwise_filter') if (pointwise_filter.get_shape().ndims is not None): assert (len(pointwise_filter.get_shape()) == 4) assert (pointwise_filter.get_shape()[0] == 1) assert (pointwise_filter.get_shape()[1] == 1) if (depthwise_filter.get_shape().ndims and input.get_shape().ndims): channel_multiplier = depthwise_filter.get_shape()[3] in_channels = input.get_shape()[3] out_channels = pointwise_filter.get_shape()[3] if ((channel_multiplier * in_channels) > out_channels): raise ValueError(('Refusing to perform an overparameterized separable convolution: channel_multiplier * in_channels = %d * %d = %d > %d = out_channels' % (channel_multiplier, in_channels, (channel_multiplier * in_channels), out_channels))) depthwise = nn_ops.depthwise_conv2d_native(input, depthwise_filter, strides, padding, name='depthwise') return nn_ops.conv2d(depthwise, pointwise_filter, [1, 1, 1, 1], padding='VALID', name=name)
9,064,386,940,410,162,000
2-D convolution with separable filters. Performs a depthwise convolution that acts separately on channels followed by a pointwise convolution that mixes channels. Note that this is separability between dimensions `[1, 2]` and `3`, not spatial separability between dimensions `1` and `2`. In detail, output[b, i, j, k] = sum_{di, dj, q, r] input[b, strides[1] * i + di, strides[2] * j + dj, q] * depthwise_filter[di, dj, q, r] * pointwise_filter[0, 0, q * channel_multiplier + r, k] `strides` controls the strides for the depthwise convolution only, since the pointwise convolution has implicit strides of `[1, 1, 1, 1]`. Must have `strides[0] = strides[3] = 1`. For the most common case of the same horizontal and vertical strides, `strides = [1, stride, stride, 1]`. Args: input: 4-D `Tensor` with shape `[batch, in_height, in_width, in_channels]`. depthwise_filter: 4-D `Tensor` with shape `[filter_height, filter_width, in_channels, channel_multiplier]`. Contains `in_channels` convolutional filters of depth 1. pointwise_filter: 4-D `Tensor` with shape `[1, 1, channel_multiplier * in_channels, out_channels]`. Pointwise filter to mix channels after `depthwise_filter` has convolved spatially. strides: 1-D of size 4. The strides for the depthwise convolution for each dimension of `input`. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the [comment here](https://www.tensorflow.org/api_docs/python/nn.html#convolution) name: A name for this operation (optional). Returns: A 4-D `Tensor` of shape `[batch, out_height, out_width, out_channels]`. Raises: ValueError: If channel_multiplier * in_channels > out_channels, which means that the separable convolution is overparameterized.
tensorflow/python/ops/nn.py
separable_conv2d
AdityaPai2398/tensorflow
python
def separable_conv2d(input, depthwise_filter, pointwise_filter, strides, padding, name=None): "2-D convolution with separable filters.\n\n Performs a depthwise convolution that acts separately on channels followed by\n a pointwise convolution that mixes channels. Note that this is separability\n between dimensions `[1, 2]` and `3`, not spatial separability between\n dimensions `1` and `2`.\n\n In detail,\n\n output[b, i, j, k] = sum_{di, dj, q, r]\n input[b, strides[1] * i + di, strides[2] * j + dj, q] *\n depthwise_filter[di, dj, q, r] *\n pointwise_filter[0, 0, q * channel_multiplier + r, k]\n\n `strides` controls the strides for the depthwise convolution only, since\n the pointwise convolution has implicit strides of `[1, 1, 1, 1]`. Must have\n `strides[0] = strides[3] = 1`. For the most common case of the same\n horizontal and vertical strides, `strides = [1, stride, stride, 1]`.\n\n Args:\n input: 4-D `Tensor` with shape `[batch, in_height, in_width, in_channels]`.\n depthwise_filter: 4-D `Tensor` with shape\n `[filter_height, filter_width, in_channels, channel_multiplier]`.\n Contains `in_channels` convolutional filters of depth 1.\n pointwise_filter: 4-D `Tensor` with shape\n `[1, 1, channel_multiplier * in_channels, out_channels]`. Pointwise\n filter to mix channels after `depthwise_filter` has convolved spatially.\n strides: 1-D of size 4. The strides for the depthwise convolution for\n each dimension of `input`.\n padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.\n See the [comment here](https://www.tensorflow.org/api_docs/python/nn.html#convolution)\n name: A name for this operation (optional).\n\n Returns:\n A 4-D `Tensor` of shape `[batch, out_height, out_width, out_channels]`.\n\n Raises:\n ValueError: If channel_multiplier * in_channels > out_channels,\n which means that the separable convolution is overparameterized.\n " with ops.op_scope([input, depthwise_filter, pointwise_filter], name, 'separable_conv2d') as name: input = ops.convert_to_tensor(input, name='tensor_in') depthwise_filter = ops.convert_to_tensor(depthwise_filter, name='depthwise_filter') pointwise_filter = ops.convert_to_tensor(pointwise_filter, name='pointwise_filter') if (pointwise_filter.get_shape().ndims is not None): assert (len(pointwise_filter.get_shape()) == 4) assert (pointwise_filter.get_shape()[0] == 1) assert (pointwise_filter.get_shape()[1] == 1) if (depthwise_filter.get_shape().ndims and input.get_shape().ndims): channel_multiplier = depthwise_filter.get_shape()[3] in_channels = input.get_shape()[3] out_channels = pointwise_filter.get_shape()[3] if ((channel_multiplier * in_channels) > out_channels): raise ValueError(('Refusing to perform an overparameterized separable convolution: channel_multiplier * in_channels = %d * %d = %d > %d = out_channels' % (channel_multiplier, in_channels, (channel_multiplier * in_channels), out_channels))) depthwise = nn_ops.depthwise_conv2d_native(input, depthwise_filter, strides, padding, name='depthwise') return nn_ops.conv2d(depthwise, pointwise_filter, [1, 1, 1, 1], padding='VALID', name=name)
def sufficient_statistics(x, axes, shift=None, keep_dims=False, name=None): "Calculate the sufficient statistics for the mean and variance of `x`.\n\n These sufficient statistics are computed using the one pass algorithm on\n an input that's optionally shifted. See:\n https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Computing_shifted_data\n\n Args:\n x: A `Tensor`.\n axes: Array of ints. Axes along which to compute mean and variance.\n shift: A `Tensor` containing the value by which to shift the data for\n numerical stability, or `None` if no shift is to be performed. A shift\n close to the true mean provides the most numerically stable results.\n keep_dims: produce statistics with the same dimensionality as the input.\n name: Name used to scope the operations that compute the sufficient stats.\n\n Returns:\n Four `Tensor` objects of the same type as `x`:\n * the count (number of elements to average over).\n * the (possibly shifted) sum of the elements in the array.\n * the (possibly shifted) sum of squares of the elements in the array.\n * the shift by which the mean must be corrected or None if `shift` is None.\n " with ops.op_scope([x, axes, shift], name, 'sufficient_statistics'): x = ops.convert_to_tensor(x, name='x') x_shape = x.get_shape() if x_shape.is_fully_defined(): counts = 1 m_shape = [] for d in xrange(x_shape.ndims): dim = x_shape[d].value if (d in set(axes)): counts *= dim dim = 1 m_shape.append(dim) counts = constant_op.constant(counts, dtype=x.dtype) else: x_shape = array_ops.shape(x) select_axes = sparse_ops.sparse_to_dense(axes, array_ops.shape(x_shape), True, False) m_shape = math_ops.select(select_axes, array_ops.ones_like(x_shape), x_shape) counts = math_ops.cast(math_ops.reduce_prod((x_shape / m_shape)), x.dtype, name='count') if (shift is not None): shift = ops.convert_to_tensor(shift, name='shift') m_ss = math_ops.sub(x, shift) v_ss = math_ops.squared_difference(x, shift) else: m_ss = x v_ss = math_ops.square(x) m_ss = math_ops.reduce_sum(m_ss, axes, keep_dims=keep_dims, name='mean_ss') v_ss = math_ops.reduce_sum(v_ss, axes, keep_dims=keep_dims, name='var_ss') return (counts, m_ss, v_ss, shift)
-59,927,612,229,581,570
Calculate the sufficient statistics for the mean and variance of `x`. These sufficient statistics are computed using the one pass algorithm on an input that's optionally shifted. See: https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Computing_shifted_data Args: x: A `Tensor`. axes: Array of ints. Axes along which to compute mean and variance. shift: A `Tensor` containing the value by which to shift the data for numerical stability, or `None` if no shift is to be performed. A shift close to the true mean provides the most numerically stable results. keep_dims: produce statistics with the same dimensionality as the input. name: Name used to scope the operations that compute the sufficient stats. Returns: Four `Tensor` objects of the same type as `x`: * the count (number of elements to average over). * the (possibly shifted) sum of the elements in the array. * the (possibly shifted) sum of squares of the elements in the array. * the shift by which the mean must be corrected or None if `shift` is None.
tensorflow/python/ops/nn.py
sufficient_statistics
AdityaPai2398/tensorflow
python
def sufficient_statistics(x, axes, shift=None, keep_dims=False, name=None): "Calculate the sufficient statistics for the mean and variance of `x`.\n\n These sufficient statistics are computed using the one pass algorithm on\n an input that's optionally shifted. See:\n https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Computing_shifted_data\n\n Args:\n x: A `Tensor`.\n axes: Array of ints. Axes along which to compute mean and variance.\n shift: A `Tensor` containing the value by which to shift the data for\n numerical stability, or `None` if no shift is to be performed. A shift\n close to the true mean provides the most numerically stable results.\n keep_dims: produce statistics with the same dimensionality as the input.\n name: Name used to scope the operations that compute the sufficient stats.\n\n Returns:\n Four `Tensor` objects of the same type as `x`:\n * the count (number of elements to average over).\n * the (possibly shifted) sum of the elements in the array.\n * the (possibly shifted) sum of squares of the elements in the array.\n * the shift by which the mean must be corrected or None if `shift` is None.\n " with ops.op_scope([x, axes, shift], name, 'sufficient_statistics'): x = ops.convert_to_tensor(x, name='x') x_shape = x.get_shape() if x_shape.is_fully_defined(): counts = 1 m_shape = [] for d in xrange(x_shape.ndims): dim = x_shape[d].value if (d in set(axes)): counts *= dim dim = 1 m_shape.append(dim) counts = constant_op.constant(counts, dtype=x.dtype) else: x_shape = array_ops.shape(x) select_axes = sparse_ops.sparse_to_dense(axes, array_ops.shape(x_shape), True, False) m_shape = math_ops.select(select_axes, array_ops.ones_like(x_shape), x_shape) counts = math_ops.cast(math_ops.reduce_prod((x_shape / m_shape)), x.dtype, name='count') if (shift is not None): shift = ops.convert_to_tensor(shift, name='shift') m_ss = math_ops.sub(x, shift) v_ss = math_ops.squared_difference(x, shift) else: m_ss = x v_ss = math_ops.square(x) m_ss = math_ops.reduce_sum(m_ss, axes, keep_dims=keep_dims, name='mean_ss') v_ss = math_ops.reduce_sum(v_ss, axes, keep_dims=keep_dims, name='var_ss') return (counts, m_ss, v_ss, shift)
def normalize_moments(counts, mean_ss, variance_ss, shift, name=None): 'Calculate the mean and variance of based on the sufficient statistics.\n\n Args:\n counts: A `Tensor` containing a the total count of the data (one value).\n mean_ss: A `Tensor` containing the mean sufficient statistics: the (possibly\n shifted) sum of the elements to average over.\n variance_ss: A `Tensor` containing the variance sufficient statistics: the\n (possibly shifted) squared sum of the data to compute the variance over.\n shift: A `Tensor` containing the value by which the data is shifted for\n numerical stability, or `None` if no shift was performed.\n name: Name used to scope the operations that compute the moments.\n\n Returns:\n Two `Tensor` objects: `mean` and `variance`.\n ' with ops.op_scope([counts, mean_ss, variance_ss, shift], name, 'normalize'): divisor = math_ops.inv(counts, name='divisor') if (shift is not None): shifted_mean = math_ops.mul(mean_ss, divisor, name='shifted_mean') mean = math_ops.add(shifted_mean, shift, name='mean') else: shifted_mean = math_ops.mul(mean_ss, divisor, name='mean') mean = shifted_mean variance = math_ops.sub(math_ops.mul(variance_ss, divisor), math_ops.square(shifted_mean), name='variance') return (mean, variance)
6,797,078,140,429,583,000
Calculate the mean and variance of based on the sufficient statistics. Args: counts: A `Tensor` containing a the total count of the data (one value). mean_ss: A `Tensor` containing the mean sufficient statistics: the (possibly shifted) sum of the elements to average over. variance_ss: A `Tensor` containing the variance sufficient statistics: the (possibly shifted) squared sum of the data to compute the variance over. shift: A `Tensor` containing the value by which the data is shifted for numerical stability, or `None` if no shift was performed. name: Name used to scope the operations that compute the moments. Returns: Two `Tensor` objects: `mean` and `variance`.
tensorflow/python/ops/nn.py
normalize_moments
AdityaPai2398/tensorflow
python
def normalize_moments(counts, mean_ss, variance_ss, shift, name=None): 'Calculate the mean and variance of based on the sufficient statistics.\n\n Args:\n counts: A `Tensor` containing a the total count of the data (one value).\n mean_ss: A `Tensor` containing the mean sufficient statistics: the (possibly\n shifted) sum of the elements to average over.\n variance_ss: A `Tensor` containing the variance sufficient statistics: the\n (possibly shifted) squared sum of the data to compute the variance over.\n shift: A `Tensor` containing the value by which the data is shifted for\n numerical stability, or `None` if no shift was performed.\n name: Name used to scope the operations that compute the moments.\n\n Returns:\n Two `Tensor` objects: `mean` and `variance`.\n ' with ops.op_scope([counts, mean_ss, variance_ss, shift], name, 'normalize'): divisor = math_ops.inv(counts, name='divisor') if (shift is not None): shifted_mean = math_ops.mul(mean_ss, divisor, name='shifted_mean') mean = math_ops.add(shifted_mean, shift, name='mean') else: shifted_mean = math_ops.mul(mean_ss, divisor, name='mean') mean = shifted_mean variance = math_ops.sub(math_ops.mul(variance_ss, divisor), math_ops.square(shifted_mean), name='variance') return (mean, variance)
def moments(x, axes, shift=None, name=None, keep_dims=False): 'Calculate the mean and variance of `x`.\n\n The mean and variance are calculated by aggregating the contents of `x`\n across `axes`. If `x` is 1-D and `axes = [0]` this is just the mean\n and variance of a vector.\n\n When using these moments for batch normalization (see\n `tf.nn.batch_normalization`):\n * for so-called "global normalization", used with convolutional filters with\n shape `[batch, height, width, depth]`, pass `axes=[0, 1, 2]`.\n * for simple batch normalization pass `axes=[0]` (batch only).\n\n Args:\n x: A `Tensor`.\n axes: array of ints. Axes along which to compute mean and\n variance.\n shift: A `Tensor` containing the value by which to shift the data for\n numerical stability, or `None` if no shift is to be performed. A shift\n close to the true mean provides the most numerically stable results.\n keep_dims: produce moments with the same dimensionality as the input.\n name: Name used to scope the operations that compute the moments.\n\n Returns:\n Two `Tensor` objects: `mean` and `variance`.\n ' with ops.op_scope([x, axes, shift], name, 'moments'): y = (math_ops.cast(x, dtypes.float32) if (x.dtype == dtypes.float16) else x) (counts, m_ss, v_ss, shift) = sufficient_statistics(y, axes, shift=shift, keep_dims=keep_dims, name=name) with ops.control_dependencies([counts, m_ss, v_ss]): (mean, variance) = normalize_moments(counts, m_ss, v_ss, shift, name=name) if (x.dtype == dtypes.float16): return (math_ops.cast(mean, dtypes.float16), math_ops.cast(variance, dtypes.float16)) else: return (mean, variance)
-2,044,667,341,312,066,600
Calculate the mean and variance of `x`. The mean and variance are calculated by aggregating the contents of `x` across `axes`. If `x` is 1-D and `axes = [0]` this is just the mean and variance of a vector. When using these moments for batch normalization (see `tf.nn.batch_normalization`): * for so-called "global normalization", used with convolutional filters with shape `[batch, height, width, depth]`, pass `axes=[0, 1, 2]`. * for simple batch normalization pass `axes=[0]` (batch only). Args: x: A `Tensor`. axes: array of ints. Axes along which to compute mean and variance. shift: A `Tensor` containing the value by which to shift the data for numerical stability, or `None` if no shift is to be performed. A shift close to the true mean provides the most numerically stable results. keep_dims: produce moments with the same dimensionality as the input. name: Name used to scope the operations that compute the moments. Returns: Two `Tensor` objects: `mean` and `variance`.
tensorflow/python/ops/nn.py
moments
AdityaPai2398/tensorflow
python
def moments(x, axes, shift=None, name=None, keep_dims=False): 'Calculate the mean and variance of `x`.\n\n The mean and variance are calculated by aggregating the contents of `x`\n across `axes`. If `x` is 1-D and `axes = [0]` this is just the mean\n and variance of a vector.\n\n When using these moments for batch normalization (see\n `tf.nn.batch_normalization`):\n * for so-called "global normalization", used with convolutional filters with\n shape `[batch, height, width, depth]`, pass `axes=[0, 1, 2]`.\n * for simple batch normalization pass `axes=[0]` (batch only).\n\n Args:\n x: A `Tensor`.\n axes: array of ints. Axes along which to compute mean and\n variance.\n shift: A `Tensor` containing the value by which to shift the data for\n numerical stability, or `None` if no shift is to be performed. A shift\n close to the true mean provides the most numerically stable results.\n keep_dims: produce moments with the same dimensionality as the input.\n name: Name used to scope the operations that compute the moments.\n\n Returns:\n Two `Tensor` objects: `mean` and `variance`.\n ' with ops.op_scope([x, axes, shift], name, 'moments'): y = (math_ops.cast(x, dtypes.float32) if (x.dtype == dtypes.float16) else x) (counts, m_ss, v_ss, shift) = sufficient_statistics(y, axes, shift=shift, keep_dims=keep_dims, name=name) with ops.control_dependencies([counts, m_ss, v_ss]): (mean, variance) = normalize_moments(counts, m_ss, v_ss, shift, name=name) if (x.dtype == dtypes.float16): return (math_ops.cast(mean, dtypes.float16), math_ops.cast(variance, dtypes.float16)) else: return (mean, variance)
def batch_normalization(x, mean, variance, offset, scale, variance_epsilon, name=None): "Batch normalization.\n\n As described in http://arxiv.org/abs/1502.03167.\n Normalizes a tensor by `mean` and `variance`, and applies (optionally) a\n `scale` \\\\(\\gamma\\\\) to it, as well as an `offset` \\\\(\\beta\\\\):\n\n \\\\(\\frac{\\gamma(x-\\mu)}{\\sigma}+\\beta\\\\)\n\n `mean`, `variance`, `offset` and `scale` are all expected to be of one of two\n shapes:\n * In all generality, they can have the same number of dimensions as the\n input `x`, with identical sizes as `x` for the dimensions that are not\n normalized over (the 'depth' dimension(s)), and dimension 1 for the\n others which are being normalized over.\n `mean` and `variance` in this case would typically be the outputs of\n `tf.nn.moments(..., keep_dims=True)` during training, or running averages\n thereof during inference.\n * In the common case where the 'depth' dimension is the last dimension in\n the input tensor `x`, they may be one dimensional tensors of the same\n size as the 'depth' dimension.\n This is the case for example for the common `[batch, depth]` layout of\n fully-connected layers, and `[batch, height, width, depth]` for\n convolutions.\n `mean` and `variance` in this case would typically be the outputs of\n `tf.nn.moments(..., keep_dims=False)` during training, or running averages\n thereof during inference.\n\n Args:\n x: Input `Tensor` of arbitrary dimensionality.\n mean: A mean `Tensor`.\n variance: A variance `Tensor`.\n offset: An offset `Tensor`, often denoted \\\\(\\beta\\\\) in equations, or\n None. If present, will be added to the normalized tensor.\n scale: A scale `Tensor`, often denoted \\\\(\\gamma\\\\) in equations, or\n `None`. If present, the scale is applied to the normalized tensor.\n variance_epsilon: A small float number to avoid dividing by 0.\n name: A name for this operation (optional).\n\n Returns:\n the normalized, scaled, offset tensor.\n " with ops.op_scope([x, mean, variance, scale, offset], name, 'batchnorm'): inv = math_ops.rsqrt((variance + variance_epsilon)) if (scale is not None): inv *= scale return ((x * inv) + ((offset - (mean * inv)) if (offset is not None) else ((- mean) * inv)))
4,443,138,785,886,978,000
Batch normalization. As described in http://arxiv.org/abs/1502.03167. Normalizes a tensor by `mean` and `variance`, and applies (optionally) a `scale` \\(\gamma\\) to it, as well as an `offset` \\(\beta\\): \\(\frac{\gamma(x-\mu)}{\sigma}+\beta\\) `mean`, `variance`, `offset` and `scale` are all expected to be of one of two shapes: * In all generality, they can have the same number of dimensions as the input `x`, with identical sizes as `x` for the dimensions that are not normalized over (the 'depth' dimension(s)), and dimension 1 for the others which are being normalized over. `mean` and `variance` in this case would typically be the outputs of `tf.nn.moments(..., keep_dims=True)` during training, or running averages thereof during inference. * In the common case where the 'depth' dimension is the last dimension in the input tensor `x`, they may be one dimensional tensors of the same size as the 'depth' dimension. This is the case for example for the common `[batch, depth]` layout of fully-connected layers, and `[batch, height, width, depth]` for convolutions. `mean` and `variance` in this case would typically be the outputs of `tf.nn.moments(..., keep_dims=False)` during training, or running averages thereof during inference. Args: x: Input `Tensor` of arbitrary dimensionality. mean: A mean `Tensor`. variance: A variance `Tensor`. offset: An offset `Tensor`, often denoted \\(\beta\\) in equations, or None. If present, will be added to the normalized tensor. scale: A scale `Tensor`, often denoted \\(\gamma\\) in equations, or `None`. If present, the scale is applied to the normalized tensor. variance_epsilon: A small float number to avoid dividing by 0. name: A name for this operation (optional). Returns: the normalized, scaled, offset tensor.
tensorflow/python/ops/nn.py
batch_normalization
AdityaPai2398/tensorflow
python
def batch_normalization(x, mean, variance, offset, scale, variance_epsilon, name=None): "Batch normalization.\n\n As described in http://arxiv.org/abs/1502.03167.\n Normalizes a tensor by `mean` and `variance`, and applies (optionally) a\n `scale` \\\\(\\gamma\\\\) to it, as well as an `offset` \\\\(\\beta\\\\):\n\n \\\\(\\frac{\\gamma(x-\\mu)}{\\sigma}+\\beta\\\\)\n\n `mean`, `variance`, `offset` and `scale` are all expected to be of one of two\n shapes:\n * In all generality, they can have the same number of dimensions as the\n input `x`, with identical sizes as `x` for the dimensions that are not\n normalized over (the 'depth' dimension(s)), and dimension 1 for the\n others which are being normalized over.\n `mean` and `variance` in this case would typically be the outputs of\n `tf.nn.moments(..., keep_dims=True)` during training, or running averages\n thereof during inference.\n * In the common case where the 'depth' dimension is the last dimension in\n the input tensor `x`, they may be one dimensional tensors of the same\n size as the 'depth' dimension.\n This is the case for example for the common `[batch, depth]` layout of\n fully-connected layers, and `[batch, height, width, depth]` for\n convolutions.\n `mean` and `variance` in this case would typically be the outputs of\n `tf.nn.moments(..., keep_dims=False)` during training, or running averages\n thereof during inference.\n\n Args:\n x: Input `Tensor` of arbitrary dimensionality.\n mean: A mean `Tensor`.\n variance: A variance `Tensor`.\n offset: An offset `Tensor`, often denoted \\\\(\\beta\\\\) in equations, or\n None. If present, will be added to the normalized tensor.\n scale: A scale `Tensor`, often denoted \\\\(\\gamma\\\\) in equations, or\n `None`. If present, the scale is applied to the normalized tensor.\n variance_epsilon: A small float number to avoid dividing by 0.\n name: A name for this operation (optional).\n\n Returns:\n the normalized, scaled, offset tensor.\n " with ops.op_scope([x, mean, variance, scale, offset], name, 'batchnorm'): inv = math_ops.rsqrt((variance + variance_epsilon)) if (scale is not None): inv *= scale return ((x * inv) + ((offset - (mean * inv)) if (offset is not None) else ((- mean) * inv)))
def batch_norm_with_global_normalization(t, m, v, beta, gamma, variance_epsilon, scale_after_normalization, name=None): 'Batch normalization.\n\n This op is deprecated. See `tf.nn.batch_normalization`.\n\n Args:\n t: A 4D input Tensor.\n m: A 1D mean Tensor with size matching the last dimension of t.\n This is the first output from tf.nn.moments,\n or a saved moving average thereof.\n v: A 1D variance Tensor with size matching the last dimension of t.\n This is the second output from tf.nn.moments,\n or a saved moving average thereof.\n beta: A 1D beta Tensor with size matching the last dimension of t.\n An offset to be added to the normalized tensor.\n gamma: A 1D gamma Tensor with size matching the last dimension of t.\n If "scale_after_normalization" is true, this tensor will be multiplied\n with the normalized tensor.\n variance_epsilon: A small float number to avoid dividing by 0.\n scale_after_normalization: A bool indicating whether the resulted tensor\n needs to be multiplied with gamma.\n name: A name for this operation (optional).\n\n Returns:\n A batch-normalized `t`.\n ' return batch_normalization(t, m, v, beta, (gamma if scale_after_normalization else None), variance_epsilon, name)
4,882,801,512,902,475,000
Batch normalization. This op is deprecated. See `tf.nn.batch_normalization`. Args: t: A 4D input Tensor. m: A 1D mean Tensor with size matching the last dimension of t. This is the first output from tf.nn.moments, or a saved moving average thereof. v: A 1D variance Tensor with size matching the last dimension of t. This is the second output from tf.nn.moments, or a saved moving average thereof. beta: A 1D beta Tensor with size matching the last dimension of t. An offset to be added to the normalized tensor. gamma: A 1D gamma Tensor with size matching the last dimension of t. If "scale_after_normalization" is true, this tensor will be multiplied with the normalized tensor. variance_epsilon: A small float number to avoid dividing by 0. scale_after_normalization: A bool indicating whether the resulted tensor needs to be multiplied with gamma. name: A name for this operation (optional). Returns: A batch-normalized `t`.
tensorflow/python/ops/nn.py
batch_norm_with_global_normalization
AdityaPai2398/tensorflow
python
def batch_norm_with_global_normalization(t, m, v, beta, gamma, variance_epsilon, scale_after_normalization, name=None): 'Batch normalization.\n\n This op is deprecated. See `tf.nn.batch_normalization`.\n\n Args:\n t: A 4D input Tensor.\n m: A 1D mean Tensor with size matching the last dimension of t.\n This is the first output from tf.nn.moments,\n or a saved moving average thereof.\n v: A 1D variance Tensor with size matching the last dimension of t.\n This is the second output from tf.nn.moments,\n or a saved moving average thereof.\n beta: A 1D beta Tensor with size matching the last dimension of t.\n An offset to be added to the normalized tensor.\n gamma: A 1D gamma Tensor with size matching the last dimension of t.\n If "scale_after_normalization" is true, this tensor will be multiplied\n with the normalized tensor.\n variance_epsilon: A small float number to avoid dividing by 0.\n scale_after_normalization: A bool indicating whether the resulted tensor\n needs to be multiplied with gamma.\n name: A name for this operation (optional).\n\n Returns:\n A batch-normalized `t`.\n ' return batch_normalization(t, m, v, beta, (gamma if scale_after_normalization else None), variance_epsilon, name)
def _sum_rows(x): 'Returns a vector summing up each row of the matrix x.' cols = array_ops.shape(x)[1] ones_shape = array_ops.pack([cols, 1]) ones = array_ops.ones(ones_shape, x.dtype) return array_ops.reshape(math_ops.matmul(x, ones), [(- 1)])
1,137,400,891,671,356,800
Returns a vector summing up each row of the matrix x.
tensorflow/python/ops/nn.py
_sum_rows
AdityaPai2398/tensorflow
python
def _sum_rows(x): cols = array_ops.shape(x)[1] ones_shape = array_ops.pack([cols, 1]) ones = array_ops.ones(ones_shape, x.dtype) return array_ops.reshape(math_ops.matmul(x, ones), [(- 1)])
def _compute_sampled_logits(weights, biases, inputs, labels, num_sampled, num_classes, num_true=1, sampled_values=None, subtract_log_q=True, remove_accidental_hits=False, partition_strategy='mod', name=None): 'Helper function for nce_loss and sampled_softmax_loss functions.\n\n Computes sampled output training logits and labels suitable for implementing\n e.g. noise-contrastive estimation (see nce_loss) or sampled softmax (see\n sampled_softmax_loss).\n\n Note: In the case where num_true > 1, we assign to each target class\n the target probability 1 / num_true so that the target probabilities\n sum to 1 per-example.\n\n Args:\n weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`\n objects whose concatenation along dimension 0 has shape\n `[num_classes, dim]`. The (possibly-partitioned) class embeddings.\n biases: A `Tensor` of shape `[num_classes]`. The class biases.\n inputs: A `Tensor` of shape `[batch_size, dim]`. The forward\n activations of the input network.\n labels: A `Tensor` of type `int64` and shape `[batch_size,\n num_true]`. The target classes. Note that this format differs from\n the `labels` argument of `nn.softmax_cross_entropy_with_logits`.\n num_sampled: An `int`. The number of classes to randomly sample per batch.\n num_classes: An `int`. The number of possible classes.\n num_true: An `int`. The number of target classes per training example.\n sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,\n `sampled_expected_count`) returned by a `*_candidate_sampler` function.\n (if None, we default to `log_uniform_candidate_sampler`)\n subtract_log_q: A `bool`. whether to subtract the log expected count of\n the labels in the sample to get the logits of the true labels.\n Default is True. Turn off for Negative Sampling.\n remove_accidental_hits: A `bool`. whether to remove "accidental hits"\n where a sampled class equals one of the target classes. Default is\n False.\n partition_strategy: A string specifying the partitioning strategy, relevant\n if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported.\n Default is `"mod"`. See `tf.nn.embedding_lookup` for more details.\n name: A name for the operation (optional).\n Returns:\n out_logits, out_labels: `Tensor` objects each with shape\n `[batch_size, num_true + num_sampled]`, for passing to either\n `nn.sigmoid_cross_entropy_with_logits` (NCE) or\n `nn.softmax_cross_entropy_with_logits` (sampled softmax).\n ' if (not isinstance(weights, list)): weights = [weights] with ops.op_scope((weights + [biases, inputs, labels]), name, 'compute_sampled_logits'): if (labels.dtype != dtypes.int64): labels = math_ops.cast(labels, dtypes.int64) labels_flat = array_ops.reshape(labels, [(- 1)]) if (sampled_values is None): sampled_values = candidate_sampling_ops.log_uniform_candidate_sampler(true_classes=labels, num_true=num_true, num_sampled=num_sampled, unique=True, range_max=num_classes) (sampled, true_expected_count, sampled_expected_count) = sampled_values all_ids = array_ops.concat(0, [labels_flat, sampled]) all_w = embedding_ops.embedding_lookup(weights, all_ids, partition_strategy=partition_strategy) all_b = embedding_ops.embedding_lookup(biases, all_ids) true_w = array_ops.slice(all_w, [0, 0], array_ops.pack([array_ops.shape(labels_flat)[0], (- 1)])) true_b = array_ops.slice(all_b, [0], array_ops.shape(labels_flat)) dim = array_ops.shape(true_w)[1:2] new_true_w_shape = array_ops.concat(0, [[(- 1), num_true], dim]) row_wise_dots = math_ops.mul(array_ops.expand_dims(inputs, 1), array_ops.reshape(true_w, new_true_w_shape)) dots_as_matrix = array_ops.reshape(row_wise_dots, array_ops.concat(0, [[(- 1)], dim])) true_logits = array_ops.reshape(_sum_rows(dots_as_matrix), [(- 1), num_true]) true_b = array_ops.reshape(true_b, [(- 1), num_true]) true_logits += true_b sampled_w = array_ops.slice(all_w, array_ops.pack([array_ops.shape(labels_flat)[0], 0]), [(- 1), (- 1)]) sampled_b = array_ops.slice(all_b, array_ops.shape(labels_flat), [(- 1)]) sampled_logits = (math_ops.matmul(inputs, sampled_w, transpose_b=True) + sampled_b) if remove_accidental_hits: acc_hits = candidate_sampling_ops.compute_accidental_hits(labels, sampled, num_true=num_true) (acc_indices, acc_ids, acc_weights) = acc_hits acc_indices_2d = array_ops.reshape(acc_indices, [(- 1), 1]) acc_ids_2d_int32 = array_ops.reshape(math_ops.cast(acc_ids, dtypes.int32), [(- 1), 1]) sparse_indices = array_ops.concat(1, [acc_indices_2d, acc_ids_2d_int32], 'sparse_indices') sampled_logits_shape = array_ops.concat(0, [array_ops.shape(labels)[:1], array_ops.expand_dims(num_sampled, 0)]) if (sampled_logits.dtype != acc_weights.dtype): acc_weights = math_ops.cast(acc_weights, sampled_logits.dtype) sampled_logits += sparse_ops.sparse_to_dense(sparse_indices, sampled_logits_shape, acc_weights, default_value=0.0, validate_indices=False) if subtract_log_q: true_logits -= math_ops.log(true_expected_count) sampled_logits -= math_ops.log(sampled_expected_count) out_logits = array_ops.concat(1, [true_logits, sampled_logits]) out_labels = array_ops.concat(1, [(array_ops.ones_like(true_logits) / num_true), array_ops.zeros_like(sampled_logits)]) return (out_logits, out_labels)
3,862,293,874,763,613,000
Helper function for nce_loss and sampled_softmax_loss functions. Computes sampled output training logits and labels suitable for implementing e.g. noise-contrastive estimation (see nce_loss) or sampled softmax (see sampled_softmax_loss). Note: In the case where num_true > 1, we assign to each target class the target probability 1 / num_true so that the target probabilities sum to 1 per-example. Args: weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor` objects whose concatenation along dimension 0 has shape `[num_classes, dim]`. The (possibly-partitioned) class embeddings. biases: A `Tensor` of shape `[num_classes]`. The class biases. inputs: A `Tensor` of shape `[batch_size, dim]`. The forward activations of the input network. labels: A `Tensor` of type `int64` and shape `[batch_size, num_true]`. The target classes. Note that this format differs from the `labels` argument of `nn.softmax_cross_entropy_with_logits`. num_sampled: An `int`. The number of classes to randomly sample per batch. num_classes: An `int`. The number of possible classes. num_true: An `int`. The number of target classes per training example. sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`, `sampled_expected_count`) returned by a `*_candidate_sampler` function. (if None, we default to `log_uniform_candidate_sampler`) subtract_log_q: A `bool`. whether to subtract the log expected count of the labels in the sample to get the logits of the true labels. Default is True. Turn off for Negative Sampling. remove_accidental_hits: A `bool`. whether to remove "accidental hits" where a sampled class equals one of the target classes. Default is False. partition_strategy: A string specifying the partitioning strategy, relevant if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported. Default is `"mod"`. See `tf.nn.embedding_lookup` for more details. name: A name for the operation (optional). Returns: out_logits, out_labels: `Tensor` objects each with shape `[batch_size, num_true + num_sampled]`, for passing to either `nn.sigmoid_cross_entropy_with_logits` (NCE) or `nn.softmax_cross_entropy_with_logits` (sampled softmax).
tensorflow/python/ops/nn.py
_compute_sampled_logits
AdityaPai2398/tensorflow
python
def _compute_sampled_logits(weights, biases, inputs, labels, num_sampled, num_classes, num_true=1, sampled_values=None, subtract_log_q=True, remove_accidental_hits=False, partition_strategy='mod', name=None): 'Helper function for nce_loss and sampled_softmax_loss functions.\n\n Computes sampled output training logits and labels suitable for implementing\n e.g. noise-contrastive estimation (see nce_loss) or sampled softmax (see\n sampled_softmax_loss).\n\n Note: In the case where num_true > 1, we assign to each target class\n the target probability 1 / num_true so that the target probabilities\n sum to 1 per-example.\n\n Args:\n weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`\n objects whose concatenation along dimension 0 has shape\n `[num_classes, dim]`. The (possibly-partitioned) class embeddings.\n biases: A `Tensor` of shape `[num_classes]`. The class biases.\n inputs: A `Tensor` of shape `[batch_size, dim]`. The forward\n activations of the input network.\n labels: A `Tensor` of type `int64` and shape `[batch_size,\n num_true]`. The target classes. Note that this format differs from\n the `labels` argument of `nn.softmax_cross_entropy_with_logits`.\n num_sampled: An `int`. The number of classes to randomly sample per batch.\n num_classes: An `int`. The number of possible classes.\n num_true: An `int`. The number of target classes per training example.\n sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,\n `sampled_expected_count`) returned by a `*_candidate_sampler` function.\n (if None, we default to `log_uniform_candidate_sampler`)\n subtract_log_q: A `bool`. whether to subtract the log expected count of\n the labels in the sample to get the logits of the true labels.\n Default is True. Turn off for Negative Sampling.\n remove_accidental_hits: A `bool`. whether to remove "accidental hits"\n where a sampled class equals one of the target classes. Default is\n False.\n partition_strategy: A string specifying the partitioning strategy, relevant\n if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported.\n Default is `"mod"`. See `tf.nn.embedding_lookup` for more details.\n name: A name for the operation (optional).\n Returns:\n out_logits, out_labels: `Tensor` objects each with shape\n `[batch_size, num_true + num_sampled]`, for passing to either\n `nn.sigmoid_cross_entropy_with_logits` (NCE) or\n `nn.softmax_cross_entropy_with_logits` (sampled softmax).\n ' if (not isinstance(weights, list)): weights = [weights] with ops.op_scope((weights + [biases, inputs, labels]), name, 'compute_sampled_logits'): if (labels.dtype != dtypes.int64): labels = math_ops.cast(labels, dtypes.int64) labels_flat = array_ops.reshape(labels, [(- 1)]) if (sampled_values is None): sampled_values = candidate_sampling_ops.log_uniform_candidate_sampler(true_classes=labels, num_true=num_true, num_sampled=num_sampled, unique=True, range_max=num_classes) (sampled, true_expected_count, sampled_expected_count) = sampled_values all_ids = array_ops.concat(0, [labels_flat, sampled]) all_w = embedding_ops.embedding_lookup(weights, all_ids, partition_strategy=partition_strategy) all_b = embedding_ops.embedding_lookup(biases, all_ids) true_w = array_ops.slice(all_w, [0, 0], array_ops.pack([array_ops.shape(labels_flat)[0], (- 1)])) true_b = array_ops.slice(all_b, [0], array_ops.shape(labels_flat)) dim = array_ops.shape(true_w)[1:2] new_true_w_shape = array_ops.concat(0, [[(- 1), num_true], dim]) row_wise_dots = math_ops.mul(array_ops.expand_dims(inputs, 1), array_ops.reshape(true_w, new_true_w_shape)) dots_as_matrix = array_ops.reshape(row_wise_dots, array_ops.concat(0, [[(- 1)], dim])) true_logits = array_ops.reshape(_sum_rows(dots_as_matrix), [(- 1), num_true]) true_b = array_ops.reshape(true_b, [(- 1), num_true]) true_logits += true_b sampled_w = array_ops.slice(all_w, array_ops.pack([array_ops.shape(labels_flat)[0], 0]), [(- 1), (- 1)]) sampled_b = array_ops.slice(all_b, array_ops.shape(labels_flat), [(- 1)]) sampled_logits = (math_ops.matmul(inputs, sampled_w, transpose_b=True) + sampled_b) if remove_accidental_hits: acc_hits = candidate_sampling_ops.compute_accidental_hits(labels, sampled, num_true=num_true) (acc_indices, acc_ids, acc_weights) = acc_hits acc_indices_2d = array_ops.reshape(acc_indices, [(- 1), 1]) acc_ids_2d_int32 = array_ops.reshape(math_ops.cast(acc_ids, dtypes.int32), [(- 1), 1]) sparse_indices = array_ops.concat(1, [acc_indices_2d, acc_ids_2d_int32], 'sparse_indices') sampled_logits_shape = array_ops.concat(0, [array_ops.shape(labels)[:1], array_ops.expand_dims(num_sampled, 0)]) if (sampled_logits.dtype != acc_weights.dtype): acc_weights = math_ops.cast(acc_weights, sampled_logits.dtype) sampled_logits += sparse_ops.sparse_to_dense(sparse_indices, sampled_logits_shape, acc_weights, default_value=0.0, validate_indices=False) if subtract_log_q: true_logits -= math_ops.log(true_expected_count) sampled_logits -= math_ops.log(sampled_expected_count) out_logits = array_ops.concat(1, [true_logits, sampled_logits]) out_labels = array_ops.concat(1, [(array_ops.ones_like(true_logits) / num_true), array_ops.zeros_like(sampled_logits)]) return (out_logits, out_labels)
def nce_loss(weights, biases, inputs, labels, num_sampled, num_classes, num_true=1, sampled_values=None, remove_accidental_hits=False, partition_strategy='mod', name='nce_loss'): 'Computes and returns the noise-contrastive estimation training loss.\n\n See [Noise-contrastive estimation: A new estimation principle for\n unnormalized statistical models]\n (http://www.jmlr.org/proceedings/papers/v9/gutmann10a/gutmann10a.pdf).\n Also see our [Candidate Sampling Algorithms Reference]\n (../../extras/candidate_sampling.pdf)\n\n Note: In the case where `num_true` > 1, we assign to each target class\n the target probability 1 / `num_true` so that the target probabilities\n sum to 1 per-example.\n\n Note: It would be useful to allow a variable number of target classes per\n example. We hope to provide this functionality in a future release.\n For now, if you have a variable number of target classes, you can pad them\n out to a constant number by either repeating them or by padding\n with an otherwise unused class.\n\n Args:\n weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`\n objects whose concatenation along dimension 0 has shape\n [num_classes, dim]. The (possibly-partitioned) class embeddings.\n biases: A `Tensor` of shape `[num_classes]`. The class biases.\n inputs: A `Tensor` of shape `[batch_size, dim]`. The forward\n activations of the input network.\n labels: A `Tensor` of type `int64` and shape `[batch_size,\n num_true]`. The target classes.\n num_sampled: An `int`. The number of classes to randomly sample per batch.\n num_classes: An `int`. The number of possible classes.\n num_true: An `int`. The number of target classes per training example.\n sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,\n `sampled_expected_count`) returned by a `*_candidate_sampler` function.\n (if None, we default to `log_uniform_candidate_sampler`)\n remove_accidental_hits: A `bool`. Whether to remove "accidental hits"\n where a sampled class equals one of the target classes. If set to\n `True`, this is a "Sampled Logistic" loss instead of NCE, and we are\n learning to generate log-odds instead of log probabilities. See\n our [Candidate Sampling Algorithms Reference]\n (../../extras/candidate_sampling.pdf).\n Default is False.\n partition_strategy: A string specifying the partitioning strategy, relevant\n if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported.\n Default is `"mod"`. See `tf.nn.embedding_lookup` for more details.\n name: A name for the operation (optional).\n\n Returns:\n A `batch_size` 1-D tensor of per-example NCE losses.\n ' (logits, labels) = _compute_sampled_logits(weights, biases, inputs, labels, num_sampled, num_classes, num_true=num_true, sampled_values=sampled_values, subtract_log_q=True, remove_accidental_hits=remove_accidental_hits, partition_strategy=partition_strategy, name=name) sampled_losses = sigmoid_cross_entropy_with_logits(logits, labels, name='sampled_losses') return _sum_rows(sampled_losses)
5,876,890,148,579,109,000
Computes and returns the noise-contrastive estimation training loss. See [Noise-contrastive estimation: A new estimation principle for unnormalized statistical models] (http://www.jmlr.org/proceedings/papers/v9/gutmann10a/gutmann10a.pdf). Also see our [Candidate Sampling Algorithms Reference] (../../extras/candidate_sampling.pdf) Note: In the case where `num_true` > 1, we assign to each target class the target probability 1 / `num_true` so that the target probabilities sum to 1 per-example. Note: It would be useful to allow a variable number of target classes per example. We hope to provide this functionality in a future release. For now, if you have a variable number of target classes, you can pad them out to a constant number by either repeating them or by padding with an otherwise unused class. Args: weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor` objects whose concatenation along dimension 0 has shape [num_classes, dim]. The (possibly-partitioned) class embeddings. biases: A `Tensor` of shape `[num_classes]`. The class biases. inputs: A `Tensor` of shape `[batch_size, dim]`. The forward activations of the input network. labels: A `Tensor` of type `int64` and shape `[batch_size, num_true]`. The target classes. num_sampled: An `int`. The number of classes to randomly sample per batch. num_classes: An `int`. The number of possible classes. num_true: An `int`. The number of target classes per training example. sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`, `sampled_expected_count`) returned by a `*_candidate_sampler` function. (if None, we default to `log_uniform_candidate_sampler`) remove_accidental_hits: A `bool`. Whether to remove "accidental hits" where a sampled class equals one of the target classes. If set to `True`, this is a "Sampled Logistic" loss instead of NCE, and we are learning to generate log-odds instead of log probabilities. See our [Candidate Sampling Algorithms Reference] (../../extras/candidate_sampling.pdf). Default is False. partition_strategy: A string specifying the partitioning strategy, relevant if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported. Default is `"mod"`. See `tf.nn.embedding_lookup` for more details. name: A name for the operation (optional). Returns: A `batch_size` 1-D tensor of per-example NCE losses.
tensorflow/python/ops/nn.py
nce_loss
AdityaPai2398/tensorflow
python
def nce_loss(weights, biases, inputs, labels, num_sampled, num_classes, num_true=1, sampled_values=None, remove_accidental_hits=False, partition_strategy='mod', name='nce_loss'): 'Computes and returns the noise-contrastive estimation training loss.\n\n See [Noise-contrastive estimation: A new estimation principle for\n unnormalized statistical models]\n (http://www.jmlr.org/proceedings/papers/v9/gutmann10a/gutmann10a.pdf).\n Also see our [Candidate Sampling Algorithms Reference]\n (../../extras/candidate_sampling.pdf)\n\n Note: In the case where `num_true` > 1, we assign to each target class\n the target probability 1 / `num_true` so that the target probabilities\n sum to 1 per-example.\n\n Note: It would be useful to allow a variable number of target classes per\n example. We hope to provide this functionality in a future release.\n For now, if you have a variable number of target classes, you can pad them\n out to a constant number by either repeating them or by padding\n with an otherwise unused class.\n\n Args:\n weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`\n objects whose concatenation along dimension 0 has shape\n [num_classes, dim]. The (possibly-partitioned) class embeddings.\n biases: A `Tensor` of shape `[num_classes]`. The class biases.\n inputs: A `Tensor` of shape `[batch_size, dim]`. The forward\n activations of the input network.\n labels: A `Tensor` of type `int64` and shape `[batch_size,\n num_true]`. The target classes.\n num_sampled: An `int`. The number of classes to randomly sample per batch.\n num_classes: An `int`. The number of possible classes.\n num_true: An `int`. The number of target classes per training example.\n sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,\n `sampled_expected_count`) returned by a `*_candidate_sampler` function.\n (if None, we default to `log_uniform_candidate_sampler`)\n remove_accidental_hits: A `bool`. Whether to remove "accidental hits"\n where a sampled class equals one of the target classes. If set to\n `True`, this is a "Sampled Logistic" loss instead of NCE, and we are\n learning to generate log-odds instead of log probabilities. See\n our [Candidate Sampling Algorithms Reference]\n (../../extras/candidate_sampling.pdf).\n Default is False.\n partition_strategy: A string specifying the partitioning strategy, relevant\n if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported.\n Default is `"mod"`. See `tf.nn.embedding_lookup` for more details.\n name: A name for the operation (optional).\n\n Returns:\n A `batch_size` 1-D tensor of per-example NCE losses.\n ' (logits, labels) = _compute_sampled_logits(weights, biases, inputs, labels, num_sampled, num_classes, num_true=num_true, sampled_values=sampled_values, subtract_log_q=True, remove_accidental_hits=remove_accidental_hits, partition_strategy=partition_strategy, name=name) sampled_losses = sigmoid_cross_entropy_with_logits(logits, labels, name='sampled_losses') return _sum_rows(sampled_losses)
def sampled_softmax_loss(weights, biases, inputs, labels, num_sampled, num_classes, num_true=1, sampled_values=None, remove_accidental_hits=True, partition_strategy='mod', name='sampled_softmax_loss'): 'Computes and returns the sampled softmax training loss.\n\n This is a faster way to train a softmax classifier over a huge number of\n classes.\n\n This operation is for training only. It is generally an underestimate of\n the full softmax loss.\n\n At inference time, you can compute full softmax probabilities with the\n expression `tf.nn.softmax(tf.matmul(inputs, tf.transpose(weights)) + biases)`.\n\n See our [Candidate Sampling Algorithms Reference]\n (../../extras/candidate_sampling.pdf)\n\n Also see Section 3 of [Jean et al., 2014](http://arxiv.org/abs/1412.2007)\n ([pdf](http://arxiv.org/pdf/1412.2007.pdf)) for the math.\n\n Args:\n weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`\n objects whose concatenation along dimension 0 has shape\n [num_classes, dim]. The (possibly-sharded) class embeddings.\n biases: A `Tensor` of shape `[num_classes]`. The class biases.\n inputs: A `Tensor` of shape `[batch_size, dim]`. The forward\n activations of the input network.\n labels: A `Tensor` of type `int64` and shape `[batch_size,\n num_true]`. The target classes. Note that this format differs from\n the `labels` argument of `nn.softmax_cross_entropy_with_logits`.\n num_sampled: An `int`. The number of classes to randomly sample per batch.\n num_classes: An `int`. The number of possible classes.\n num_true: An `int`. The number of target classes per training example.\n sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,\n `sampled_expected_count`) returned by a `*_candidate_sampler` function.\n (if None, we default to `log_uniform_candidate_sampler`)\n remove_accidental_hits: A `bool`. whether to remove "accidental hits"\n where a sampled class equals one of the target classes. Default is\n True.\n partition_strategy: A string specifying the partitioning strategy, relevant\n if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported.\n Default is `"mod"`. See `tf.nn.embedding_lookup` for more details.\n name: A name for the operation (optional).\n\n Returns:\n A `batch_size` 1-D tensor of per-example sampled softmax losses.\n\n ' (logits, labels) = _compute_sampled_logits(weights, biases, inputs, labels, num_sampled, num_classes, num_true=num_true, sampled_values=sampled_values, subtract_log_q=True, remove_accidental_hits=remove_accidental_hits, partition_strategy=partition_strategy, name=name) sampled_losses = nn_ops.softmax_cross_entropy_with_logits(logits, labels) return sampled_losses
-82,977,646,637,382,370
Computes and returns the sampled softmax training loss. This is a faster way to train a softmax classifier over a huge number of classes. This operation is for training only. It is generally an underestimate of the full softmax loss. At inference time, you can compute full softmax probabilities with the expression `tf.nn.softmax(tf.matmul(inputs, tf.transpose(weights)) + biases)`. See our [Candidate Sampling Algorithms Reference] (../../extras/candidate_sampling.pdf) Also see Section 3 of [Jean et al., 2014](http://arxiv.org/abs/1412.2007) ([pdf](http://arxiv.org/pdf/1412.2007.pdf)) for the math. Args: weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor` objects whose concatenation along dimension 0 has shape [num_classes, dim]. The (possibly-sharded) class embeddings. biases: A `Tensor` of shape `[num_classes]`. The class biases. inputs: A `Tensor` of shape `[batch_size, dim]`. The forward activations of the input network. labels: A `Tensor` of type `int64` and shape `[batch_size, num_true]`. The target classes. Note that this format differs from the `labels` argument of `nn.softmax_cross_entropy_with_logits`. num_sampled: An `int`. The number of classes to randomly sample per batch. num_classes: An `int`. The number of possible classes. num_true: An `int`. The number of target classes per training example. sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`, `sampled_expected_count`) returned by a `*_candidate_sampler` function. (if None, we default to `log_uniform_candidate_sampler`) remove_accidental_hits: A `bool`. whether to remove "accidental hits" where a sampled class equals one of the target classes. Default is True. partition_strategy: A string specifying the partitioning strategy, relevant if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported. Default is `"mod"`. See `tf.nn.embedding_lookup` for more details. name: A name for the operation (optional). Returns: A `batch_size` 1-D tensor of per-example sampled softmax losses.
tensorflow/python/ops/nn.py
sampled_softmax_loss
AdityaPai2398/tensorflow
python
def sampled_softmax_loss(weights, biases, inputs, labels, num_sampled, num_classes, num_true=1, sampled_values=None, remove_accidental_hits=True, partition_strategy='mod', name='sampled_softmax_loss'): 'Computes and returns the sampled softmax training loss.\n\n This is a faster way to train a softmax classifier over a huge number of\n classes.\n\n This operation is for training only. It is generally an underestimate of\n the full softmax loss.\n\n At inference time, you can compute full softmax probabilities with the\n expression `tf.nn.softmax(tf.matmul(inputs, tf.transpose(weights)) + biases)`.\n\n See our [Candidate Sampling Algorithms Reference]\n (../../extras/candidate_sampling.pdf)\n\n Also see Section 3 of [Jean et al., 2014](http://arxiv.org/abs/1412.2007)\n ([pdf](http://arxiv.org/pdf/1412.2007.pdf)) for the math.\n\n Args:\n weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`\n objects whose concatenation along dimension 0 has shape\n [num_classes, dim]. The (possibly-sharded) class embeddings.\n biases: A `Tensor` of shape `[num_classes]`. The class biases.\n inputs: A `Tensor` of shape `[batch_size, dim]`. The forward\n activations of the input network.\n labels: A `Tensor` of type `int64` and shape `[batch_size,\n num_true]`. The target classes. Note that this format differs from\n the `labels` argument of `nn.softmax_cross_entropy_with_logits`.\n num_sampled: An `int`. The number of classes to randomly sample per batch.\n num_classes: An `int`. The number of possible classes.\n num_true: An `int`. The number of target classes per training example.\n sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,\n `sampled_expected_count`) returned by a `*_candidate_sampler` function.\n (if None, we default to `log_uniform_candidate_sampler`)\n remove_accidental_hits: A `bool`. whether to remove "accidental hits"\n where a sampled class equals one of the target classes. Default is\n True.\n partition_strategy: A string specifying the partitioning strategy, relevant\n if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported.\n Default is `"mod"`. See `tf.nn.embedding_lookup` for more details.\n name: A name for the operation (optional).\n\n Returns:\n A `batch_size` 1-D tensor of per-example sampled softmax losses.\n\n ' (logits, labels) = _compute_sampled_logits(weights, biases, inputs, labels, num_sampled, num_classes, num_true=num_true, sampled_values=sampled_values, subtract_log_q=True, remove_accidental_hits=remove_accidental_hits, partition_strategy=partition_strategy, name=name) sampled_losses = nn_ops.softmax_cross_entropy_with_logits(logits, labels) return sampled_losses
def get_data(img_pth: Union[(str, os.PathLike)]) -> dict: 'Get a single data from the given file.json path' with open(img_pth, 'r') as f: data = json.load(f) return data
6,528,922,999,603,494,000
Get a single data from the given file.json path
analyze_dataset.py
get_data
PDillis/coiltraine
python
def get_data(img_pth: Union[(str, os.PathLike)]) -> dict: with open(img_pth, 'r') as f: data = json.load(f) return data
def get_original_df(path: Union[(str, os.PathLike)], filename: str, processes_per_cpu: int=2) -> Tuple[(pd.DataFrame, bool)]: 'Get a DataFrame from all the can_bus*.json files in the dataset' save_path = os.path.join(os.getcwd(), 'data_analysis', filename) if os.path.isfile(save_path): print('.npy file exists, loading it...') data = list(np.load(save_path, allow_pickle=True)) else: print('.npy file not found, constructing it...') all_data_paths = sorted(glob.glob(os.path.join(path, '**/can_bus*.json'), recursive=True)) with Pool((os.cpu_count() * processes_per_cpu)) as p: data = list(tqdm(p.imap(get_data, all_data_paths), total=len(all_data_paths))) np.save(save_path, data) df = pd.DataFrame(data) print(df.describe()) return (df, False)
-2,909,380,231,971,924,000
Get a DataFrame from all the can_bus*.json files in the dataset
analyze_dataset.py
get_original_df
PDillis/coiltraine
python
def get_original_df(path: Union[(str, os.PathLike)], filename: str, processes_per_cpu: int=2) -> Tuple[(pd.DataFrame, bool)]: save_path = os.path.join(os.getcwd(), 'data_analysis', filename) if os.path.isfile(save_path): print('.npy file exists, loading it...') data = list(np.load(save_path, allow_pickle=True)) else: print('.npy file not found, constructing it...') all_data_paths = sorted(glob.glob(os.path.join(path, '**/can_bus*.json'), recursive=True)) with Pool((os.cpu_count() * processes_per_cpu)) as p: data = list(tqdm(p.imap(get_data, all_data_paths), total=len(all_data_paths))) np.save(save_path, data) df = pd.DataFrame(data) print(df.describe()) return (df, False)
def get_augmented_df(preloads_name: str) -> Tuple[(pd.DataFrame, bool)]: "Use the preloads file to load the data; will be augmented, as that's what we did" assert preloads_name.endswith('.npy') data = np.load(os.path.join(os.getcwd(), '_preloads', preloads_name), allow_pickle=True)[1] df = pd.DataFrame(data) print(df.describe()) return (df, True)
6,811,287,361,663,459,000
Use the preloads file to load the data; will be augmented, as that's what we did
analyze_dataset.py
get_augmented_df
PDillis/coiltraine
python
def get_augmented_df(preloads_name: str) -> Tuple[(pd.DataFrame, bool)]: assert preloads_name.endswith('.npy') data = np.load(os.path.join(os.getcwd(), '_preloads', preloads_name), allow_pickle=True)[1] df = pd.DataFrame(data) print(df.describe()) return (df, True)
def violin_plot(df: pd.DataFrame, save_name: str, augmented: bool) -> None: 'Save violin plot for the interesting parameters using df' directions_dict = {'No Action': 2.0, 'Turn Left': 3.0, 'Turn Right': 4.0, 'Continue Straight': 5.0} def set_lines(ax): for l in ax.lines: l.set_linestyle('--') l.set_linewidth(0.6) l.set_color('white') l.set_alpha(0.7) for l in ax.lines[1::3]: l.set_linestyle('-') l.set_linewidth(1.3) l.set_color('black') l.set_alpha(0.8) for key in directions_dict: data = df[(df['directions'] == directions_dict[key])] fig = plt.figure(figsize=(8, 6)) gs = fig.add_gridspec(1, 4) fig.add_subplot(gs[(0, 0)]) ax = sns.violinplot(y='steer', data=data, color='r', inner='quartile') set_lines(ax) fig.add_subplot(gs[(0, 1)]) ax = sns.violinplot(y='throttle', data=data, color='g', inner='quartile') set_lines(ax) fig.add_subplot(gs[(0, 2)]) ax = sns.violinplot(y='brake', data=data, color='b', inner='quartile') set_lines(ax) fig.add_subplot(gs[(0, 3)]) ax = sns.violinplot(y='speed', data=data, color='m', inner='quartile') set_lines(ax) fig.tight_layout() fig.subplots_adjust(top=0.88) stitle = f'Direction: {key} - $N={len(data)}$ - ${((100 * len(data)) / len(df)):6.3f}$% of total' stitle = (f'{stitle} - Augmented' if augmented else stitle) fig.suptitle(stitle, fontsize=16) fname = f"{save_name}-{key.replace(' ', '')}" fname = (f'{fname}-aug' if augmented else fname) fig_name = os.path.join(os.getcwd(), 'data_analysis', save_name, 'violin_plots', f'{fname}.png') os.makedirs(os.path.join(os.getcwd(), 'data_analysis', save_name, 'violin_plots'), exist_ok=True) plt.savefig(fig_name) plt.close()
3,672,524,993,753,016,000
Save violin plot for the interesting parameters using df
analyze_dataset.py
violin_plot
PDillis/coiltraine
python
def violin_plot(df: pd.DataFrame, save_name: str, augmented: bool) -> None: directions_dict = {'No Action': 2.0, 'Turn Left': 3.0, 'Turn Right': 4.0, 'Continue Straight': 5.0} def set_lines(ax): for l in ax.lines: l.set_linestyle('--') l.set_linewidth(0.6) l.set_color('white') l.set_alpha(0.7) for l in ax.lines[1::3]: l.set_linestyle('-') l.set_linewidth(1.3) l.set_color('black') l.set_alpha(0.8) for key in directions_dict: data = df[(df['directions'] == directions_dict[key])] fig = plt.figure(figsize=(8, 6)) gs = fig.add_gridspec(1, 4) fig.add_subplot(gs[(0, 0)]) ax = sns.violinplot(y='steer', data=data, color='r', inner='quartile') set_lines(ax) fig.add_subplot(gs[(0, 1)]) ax = sns.violinplot(y='throttle', data=data, color='g', inner='quartile') set_lines(ax) fig.add_subplot(gs[(0, 2)]) ax = sns.violinplot(y='brake', data=data, color='b', inner='quartile') set_lines(ax) fig.add_subplot(gs[(0, 3)]) ax = sns.violinplot(y='speed', data=data, color='m', inner='quartile') set_lines(ax) fig.tight_layout() fig.subplots_adjust(top=0.88) stitle = f'Direction: {key} - $N={len(data)}$ - ${((100 * len(data)) / len(df)):6.3f}$% of total' stitle = (f'{stitle} - Augmented' if augmented else stitle) fig.suptitle(stitle, fontsize=16) fname = f"{save_name}-{key.replace(' ', )}" fname = (f'{fname}-aug' if augmented else fname) fig_name = os.path.join(os.getcwd(), 'data_analysis', save_name, 'violin_plots', f'{fname}.png') os.makedirs(os.path.join(os.getcwd(), 'data_analysis', save_name, 'violin_plots'), exist_ok=True) plt.savefig(fig_name) plt.close()
def plot_clients(path: Union[(str, os.PathLike)], df: pd.DataFrame, augmented: bool, speed_factor: float) -> None: 'Plot the steer, throttle, brake, and speed of a client during its data collection' if path.endswith(os.sep): path = path[:(- 1)] dataset_name = os.path.basename(path) s_path = os.path.join(os.getcwd(), 'data_analysis', dataset_name, 'clients') os.makedirs(s_path, exist_ok=True) clients = glob.glob(os.path.join(path, '**/*')) clients = [cl for cl in clients if os.path.isdir(cl)] num_clients = len(clients) num_frames = len(df) num_frames_per_client = (num_frames // num_clients) def get_change_locs(df: pd.DataFrame, cli: int) -> Tuple[(List[int], List[float])]: 'Get the index and directions from the df of the actions taken by the client' df['directions_str'] = df['directions'].astype(str) df['change'] = (df['directions_str'].shift(1, fill_value=df['directions_str'].head(1)) != df['directions_str']) index_change = list(df.loc[(df['change'] == True)].index.values) index_change = ([((cli - 1) * len(df))] + index_change) dirs = list(df['directions'][index_change].values) index_change = (index_change + [((cli * len(df)) - 1)]) return (index_change, dirs) my_labels = {2.0: 'No Action', 3.0: 'Turn Left', 4.0: 'Turn Right', 5.0: 'Continue Straight'} colors = {2.0: 'gold', 3.0: 'gray', 4.0: 'cyan', 5.0: 'magenta'} total_action_counts = Counter({2.0: 0, 3.0: 0, 4.0: 0, 5.0: 0}) max_speed_clients = {} idx_change_clients = {} dirs_clients = {} for client in tqdm(range(1, (num_clients + 1)), total=num_clients, unit='clients'): if augmented: df_client = df[((client - 1) * num_frames_per_client):(client * num_frames_per_client):3] else: df_client = df[((client - 1) * num_frames_per_client):(client * num_frames_per_client)] df_client['speed'] = df_client['speed'].div(speed_factor) actual_max_speed = df_client['speed'].max() max_speed_clients[client] = actual_max_speed (fig, ax) = plt.subplots(figsize=(48, 16)) fig.tight_layout(rect=[0, 0.03, 1, 0.95]) df_client.plot(y=['steer', 'throttle', 'brake', 'speed'], ax=ax) (idx_change, dirs) = get_change_locs(df_client, client) for (idx, dir) in enumerate(dirs): ax.axvspan(idx_change[idx], idx_change[(idx + 1)], facecolor=colors[dir], alpha=0.5, label=my_labels[dir]) idx_change_clients[f'client_{client:02d}'] = [int(idx) for idx in idx_change] dirs_clients[f'client_{client:02d}'] = [float(d) for d in dirs] dirs_count = Counter(dirs) total_action_counts += dirs_count total_actions = '' for key in my_labels: total_actions += f' - {my_labels[key]}: {dirs_count[key]}' suptitle = f'Client {client} - Actual max speed: {actual_max_speed:.4f}' suptitle = (f'{suptitle} - Augmented' if augmented else suptitle) suptitle = f'{suptitle}{total_actions}' plt.suptitle(suptitle, fontsize=30) plt.xlabel('Frame idx', fontsize=22) plt.ylabel('Normed value', fontsize=22) plt.xticks(list(range(((client - 1) * num_frames_per_client), ((client * num_frames_per_client) + 1), (len(df_client) // 20)))) (hand, labl) = ax.get_legend_handles_labels() handout = [] lablout = [] for (h, l) in zip(hand, labl): if (l not in lablout): lablout.append(l) handout.append(h) ax.legend(handout, lablout, fontsize='x-large') sname = os.path.join(s_path, f'{dataset_name}_Client{client:02d}') sname = (f'{sname}-aug' if augmented else sname) plt.savefig(f'{sname}.png', dpi=300) plt.close() actions_summary = {'avg_no_action': (total_action_counts[2.0] / num_clients), 'avg_turn_left': (total_action_counts[3.0] / num_clients), 'avg_turn_right': (total_action_counts[4.0] / num_clients), 'avg_continue_straight': (total_action_counts[5.0] / num_clients)} summary = {'num_clients': num_clients, 'num_frames_per_client': num_frames_per_client, 'hours_per_client': (num_frames_per_client / ((20 * 60) * 60)), 'total_action_counts': total_action_counts, 'actions_summary': actions_summary, 'max_speed_clients': max_speed_clients, 'idx_change_clients': idx_change_clients, 'dirs_clients': dirs_clients} with open(os.path.join(s_path, f'{dataset_name}-summary.json'), 'w') as f: json.dump(summary, f, indent=4)
-3,650,115,691,062,344,700
Plot the steer, throttle, brake, and speed of a client during its data collection
analyze_dataset.py
plot_clients
PDillis/coiltraine
python
def plot_clients(path: Union[(str, os.PathLike)], df: pd.DataFrame, augmented: bool, speed_factor: float) -> None: if path.endswith(os.sep): path = path[:(- 1)] dataset_name = os.path.basename(path) s_path = os.path.join(os.getcwd(), 'data_analysis', dataset_name, 'clients') os.makedirs(s_path, exist_ok=True) clients = glob.glob(os.path.join(path, '**/*')) clients = [cl for cl in clients if os.path.isdir(cl)] num_clients = len(clients) num_frames = len(df) num_frames_per_client = (num_frames // num_clients) def get_change_locs(df: pd.DataFrame, cli: int) -> Tuple[(List[int], List[float])]: 'Get the index and directions from the df of the actions taken by the client' df['directions_str'] = df['directions'].astype(str) df['change'] = (df['directions_str'].shift(1, fill_value=df['directions_str'].head(1)) != df['directions_str']) index_change = list(df.loc[(df['change'] == True)].index.values) index_change = ([((cli - 1) * len(df))] + index_change) dirs = list(df['directions'][index_change].values) index_change = (index_change + [((cli * len(df)) - 1)]) return (index_change, dirs) my_labels = {2.0: 'No Action', 3.0: 'Turn Left', 4.0: 'Turn Right', 5.0: 'Continue Straight'} colors = {2.0: 'gold', 3.0: 'gray', 4.0: 'cyan', 5.0: 'magenta'} total_action_counts = Counter({2.0: 0, 3.0: 0, 4.0: 0, 5.0: 0}) max_speed_clients = {} idx_change_clients = {} dirs_clients = {} for client in tqdm(range(1, (num_clients + 1)), total=num_clients, unit='clients'): if augmented: df_client = df[((client - 1) * num_frames_per_client):(client * num_frames_per_client):3] else: df_client = df[((client - 1) * num_frames_per_client):(client * num_frames_per_client)] df_client['speed'] = df_client['speed'].div(speed_factor) actual_max_speed = df_client['speed'].max() max_speed_clients[client] = actual_max_speed (fig, ax) = plt.subplots(figsize=(48, 16)) fig.tight_layout(rect=[0, 0.03, 1, 0.95]) df_client.plot(y=['steer', 'throttle', 'brake', 'speed'], ax=ax) (idx_change, dirs) = get_change_locs(df_client, client) for (idx, dir) in enumerate(dirs): ax.axvspan(idx_change[idx], idx_change[(idx + 1)], facecolor=colors[dir], alpha=0.5, label=my_labels[dir]) idx_change_clients[f'client_{client:02d}'] = [int(idx) for idx in idx_change] dirs_clients[f'client_{client:02d}'] = [float(d) for d in dirs] dirs_count = Counter(dirs) total_action_counts += dirs_count total_actions = for key in my_labels: total_actions += f' - {my_labels[key]}: {dirs_count[key]}' suptitle = f'Client {client} - Actual max speed: {actual_max_speed:.4f}' suptitle = (f'{suptitle} - Augmented' if augmented else suptitle) suptitle = f'{suptitle}{total_actions}' plt.suptitle(suptitle, fontsize=30) plt.xlabel('Frame idx', fontsize=22) plt.ylabel('Normed value', fontsize=22) plt.xticks(list(range(((client - 1) * num_frames_per_client), ((client * num_frames_per_client) + 1), (len(df_client) // 20)))) (hand, labl) = ax.get_legend_handles_labels() handout = [] lablout = [] for (h, l) in zip(hand, labl): if (l not in lablout): lablout.append(l) handout.append(h) ax.legend(handout, lablout, fontsize='x-large') sname = os.path.join(s_path, f'{dataset_name}_Client{client:02d}') sname = (f'{sname}-aug' if augmented else sname) plt.savefig(f'{sname}.png', dpi=300) plt.close() actions_summary = {'avg_no_action': (total_action_counts[2.0] / num_clients), 'avg_turn_left': (total_action_counts[3.0] / num_clients), 'avg_turn_right': (total_action_counts[4.0] / num_clients), 'avg_continue_straight': (total_action_counts[5.0] / num_clients)} summary = {'num_clients': num_clients, 'num_frames_per_client': num_frames_per_client, 'hours_per_client': (num_frames_per_client / ((20 * 60) * 60)), 'total_action_counts': total_action_counts, 'actions_summary': actions_summary, 'max_speed_clients': max_speed_clients, 'idx_change_clients': idx_change_clients, 'dirs_clients': dirs_clients} with open(os.path.join(s_path, f'{dataset_name}-summary.json'), 'w') as f: json.dump(summary, f, indent=4)
def get_change_locs(df: pd.DataFrame, cli: int) -> Tuple[(List[int], List[float])]: 'Get the index and directions from the df of the actions taken by the client' df['directions_str'] = df['directions'].astype(str) df['change'] = (df['directions_str'].shift(1, fill_value=df['directions_str'].head(1)) != df['directions_str']) index_change = list(df.loc[(df['change'] == True)].index.values) index_change = ([((cli - 1) * len(df))] + index_change) dirs = list(df['directions'][index_change].values) index_change = (index_change + [((cli * len(df)) - 1)]) return (index_change, dirs)
-2,207,295,983,396,975,400
Get the index and directions from the df of the actions taken by the client
analyze_dataset.py
get_change_locs
PDillis/coiltraine
python
def get_change_locs(df: pd.DataFrame, cli: int) -> Tuple[(List[int], List[float])]: df['directions_str'] = df['directions'].astype(str) df['change'] = (df['directions_str'].shift(1, fill_value=df['directions_str'].head(1)) != df['directions_str']) index_change = list(df.loc[(df['change'] == True)].index.values) index_change = ([((cli - 1) * len(df))] + index_change) dirs = list(df['directions'][index_change].values) index_change = (index_change + [((cli * len(df)) - 1)]) return (index_change, dirs)
def t_NUMBER(t): '[0-9]+' return t
-5,521,826,655,453,105,000
[0-9]+
py_lex.py
t_NUMBER
Spico197/PythonCompilerPrinciplesExp
python
def t_NUMBER(t): return t
def t_PRINT(t): 'print' return t
-3,596,005,817,379,416,000
print
py_lex.py
t_PRINT
Spico197/PythonCompilerPrinciplesExp
python
def t_PRINT(t): return t
def t_IF(t): 'if' return t
2,975,524,291,271,362,600
if
py_lex.py
t_IF
Spico197/PythonCompilerPrinciplesExp
python
def t_IF(t): return t
def t_WHILE(t): 'while' return t
-8,815,080,414,704,908,000
while
py_lex.py
t_WHILE
Spico197/PythonCompilerPrinciplesExp
python
def t_WHILE(t): return t
def t_FOR(t): 'for' return t
-2,868,480,328,159,569,400
for
py_lex.py
t_FOR
Spico197/PythonCompilerPrinciplesExp
python
def t_FOR(t): return t
def t_LEN(t): 'len' return t
995,836,586,919,926,800
len
py_lex.py
t_LEN
Spico197/PythonCompilerPrinciplesExp
python
def t_LEN(t): return t
def t_INC(t): '\\+\\+' return t
4,309,525,618,600,526,300
\+\+
py_lex.py
t_INC
Spico197/PythonCompilerPrinciplesExp
python
def t_INC(t): '\\+\\+' return t
def t_GDIV(t): '//' return t
153,917,572,362,196,000
//
py_lex.py
t_GDIV
Spico197/PythonCompilerPrinciplesExp
python
def t_GDIV(t): return t
def t_BREAK(t): 'break' return t
5,680,340,504,264,076,000
break
py_lex.py
t_BREAK
Spico197/PythonCompilerPrinciplesExp
python
def t_BREAK(t): return t
def t_LET(t): '<=' return t
-8,775,522,863,221,156,000
<=
py_lex.py
t_LET
Spico197/PythonCompilerPrinciplesExp
python
def t_LET(t): return t
def t_ELIF(t): 'elif' return t
-4,815,384,646,013,666,000
elif
py_lex.py
t_ELIF
Spico197/PythonCompilerPrinciplesExp
python
def t_ELIF(t): return t
def t_ELSE(t): 'else' return t
-4,633,063,001,006,124,000
else
py_lex.py
t_ELSE
Spico197/PythonCompilerPrinciplesExp
python
def t_ELSE(t): return t
def t_VARIABLE(t): '[a-zA-Z_]+' return t
2,083,747,938,742,166,500
[a-zA-Z_]+
py_lex.py
t_VARIABLE
Spico197/PythonCompilerPrinciplesExp
python
def t_VARIABLE(t): return t
def run_task(task_message: str, command: str) -> None: 'Run a task in the shell, defined by a task message and its associated\n command.' print(blue_bold(task_message)) print(light(f'$ {command}')) subprocess.call(command, shell=True) print()
-3,654,012,546,749,389,000
Run a task in the shell, defined by a task message and its associated command.
check_commit.py
run_task
Cocopyth/foodshare
python
def run_task(task_message: str, command: str) -> None: 'Run a task in the shell, defined by a task message and its associated\n command.' print(blue_bold(task_message)) print(light(f'$ {command}')) subprocess.call(command, shell=True) print()
def _uniqueColumns(self): '\n raise exception if column names (cnames) are not unique\n ' if (len(set(self.table[0])) != len(self.table[0])): raise Exception('Column names not unique')
-7,075,752,451,378,640,000
raise exception if column names (cnames) are not unique
TableData.py
_uniqueColumns
mokko/Py-TableData
python
def _uniqueColumns(self): '\n \n ' if (len(set(self.table[0])) != len(self.table[0])): raise Exception('Column names not unique')
def load_table(path, verbose=None): '\n File extension aware ingester\n\n td=TableData.load_table(path)\n \n This is an alternative to _init_. Is this pythonic enough? \n ' ext = os.path.splitext(path)[1][1:] return TableData(ext, path, verbose)
-6,098,475,671,010,790,000
File extension aware ingester td=TableData.load_table(path) This is an alternative to _init_. Is this pythonic enough?
TableData.py
load_table
mokko/Py-TableData
python
def load_table(path, verbose=None): '\n File extension aware ingester\n\n td=TableData.load_table(path)\n \n This is an alternative to _init_. Is this pythonic enough? \n ' ext = os.path.splitext(path)[1][1:] return TableData(ext, path, verbose)
def XLRDParser(self, infile): "\n Parses old excel file into tableData object. Only first sheet.\n\n Dont use this directly, use \n td=TableData('xsl', infile)\n td=TableData.load=table(infile)\n instead\n \n xlrd uses UTF16. What comes out of here?\n \n TO DO: \n 1. better tests for\n -Unicode issues not tested\n -Excel data fields change appearance\n 2. conversion/transformation stuff\n " import xlrd import xlrd.sheet from xlrd.sheet import ctype_text self.table = [] self.verbose(('xlrd infile %s' % infile)) wb = xlrd.open_workbook(filename=infile, on_demand=True) sheet = wb.sheet_by_index(0) for r in range(0, sheet.nrows): row = [] for c in range(sheet.ncols): cell = sheet.cell(r, c) cellTypeStr = ctype_text.get(cell.ctype, 'unknown type') val = cell.value if (cellTypeStr == 'number'): val = int(float(val)) elif (cellTypeStr == 'xldate'): val = xlrd.xldate.xldate_as_datetime(val, 0) row.append(val) self.table.append(row) wb.unload_sheet(0)
-2,494,149,109,274,382,000
Parses old excel file into tableData object. Only first sheet. Dont use this directly, use td=TableData('xsl', infile) td=TableData.load=table(infile) instead xlrd uses UTF16. What comes out of here? TO DO: 1. better tests for -Unicode issues not tested -Excel data fields change appearance 2. conversion/transformation stuff
TableData.py
XLRDParser
mokko/Py-TableData
python
def XLRDParser(self, infile): "\n Parses old excel file into tableData object. Only first sheet.\n\n Dont use this directly, use \n td=TableData('xsl', infile)\n td=TableData.load=table(infile)\n instead\n \n xlrd uses UTF16. What comes out of here?\n \n TO DO: \n 1. better tests for\n -Unicode issues not tested\n -Excel data fields change appearance\n 2. conversion/transformation stuff\n " import xlrd import xlrd.sheet from xlrd.sheet import ctype_text self.table = [] self.verbose(('xlrd infile %s' % infile)) wb = xlrd.open_workbook(filename=infile, on_demand=True) sheet = wb.sheet_by_index(0) for r in range(0, sheet.nrows): row = [] for c in range(sheet.ncols): cell = sheet.cell(r, c) cellTypeStr = ctype_text.get(cell.ctype, 'unknown type') val = cell.value if (cellTypeStr == 'number'): val = int(float(val)) elif (cellTypeStr == 'xldate'): val = xlrd.xldate.xldate_as_datetime(val, 0) row.append(val) self.table.append(row) wb.unload_sheet(0)
def ncols(self): '\n Returns integer with number of columns in table data\n ' return len(self.table[0])
-1,986,639,562,952,319,500
Returns integer with number of columns in table data
TableData.py
ncols
mokko/Py-TableData
python
def ncols(self): '\n \n ' return len(self.table[0])
def nrows(self): '\n Returns integer with number of rows in table data\n ' return len(self.table)
3,428,862,989,251,994,600
Returns integer with number of rows in table data
TableData.py
nrows
mokko/Py-TableData
python
def nrows(self): '\n \n ' return len(self.table)
def cell(self, col, row): "\n Return a cell for col,row.\n td.cell(col,row)\n\n Throws exception if col or row are not integer or out of range.\n What happens on empty cell?\n \n I stick to x|y format, although row|col might be more pythonic.\n \n Empty cell is '' not None.\n " try: return self.table[row][col] except: self.verbose(('%i|%i doesnt exist' % (col, row))) exit(1)
5,786,433,765,263,158,000
Return a cell for col,row. td.cell(col,row) Throws exception if col or row are not integer or out of range. What happens on empty cell? I stick to x|y format, although row|col might be more pythonic. Empty cell is '' not None.
TableData.py
cell
mokko/Py-TableData
python
def cell(self, col, row): "\n Return a cell for col,row.\n td.cell(col,row)\n\n Throws exception if col or row are not integer or out of range.\n What happens on empty cell?\n \n I stick to x|y format, although row|col might be more pythonic.\n \n Empty cell is not None.\n " try: return self.table[row][col] except: self.verbose(('%i|%i doesnt exist' % (col, row))) exit(1)
def cindex(self, needle): "\n Returns the column index (c) for column name 'needle'.\n \n Throws 'not in list' if 'needle' is not a column name (cname).\n " return self.table[0].index(needle)
-5,242,650,936,641,615,000
Returns the column index (c) for column name 'needle'. Throws 'not in list' if 'needle' is not a column name (cname).
TableData.py
cindex
mokko/Py-TableData
python
def cindex(self, needle): "\n Returns the column index (c) for column name 'needle'.\n \n Throws 'not in list' if 'needle' is not a column name (cname).\n " return self.table[0].index(needle)
def search(self, needle): '\n Returns list of cells [cid,rid] that contain the needle.\n r=td.search(needle) # (1,1)\n \n \n tuples, lists? I am not quite sure! \n ' results = [] for rid in range(0, self.nrows()): for cid in range(0, self.ncols()): cell = self.cell(cid, rid) if (str(needle) in str(cell)): results.append((cid, rid)) return results
-9,116,779,920,000,777,000
Returns list of cells [cid,rid] that contain the needle. r=td.search(needle) # (1,1) tuples, lists? I am not quite sure!
TableData.py
search
mokko/Py-TableData
python
def search(self, needle): '\n Returns list of cells [cid,rid] that contain the needle.\n r=td.search(needle) # (1,1)\n \n \n tuples, lists? I am not quite sure! \n ' results = [] for rid in range(0, self.nrows()): for cid in range(0, self.ncols()): cell = self.cell(cid, rid) if (str(needle) in str(cell)): results.append((cid, rid)) return results
def search_col(self, cname, needle): '\n Returns list/set of rows that contain the needle for the given col.\n td.search(cname, needle)\n ' results = () c = cindex(cname) for rid in range(0, self.nrows()): if (needle in self.cell(c, rid)): results.append(rid)
5,397,219,895,814,539,000
Returns list/set of rows that contain the needle for the given col. td.search(cname, needle)
TableData.py
search_col
mokko/Py-TableData
python
def search_col(self, cname, needle): '\n Returns list/set of rows that contain the needle for the given col.\n td.search(cname, needle)\n ' results = () c = cindex(cname) for rid in range(0, self.nrows()): if (needle in self.cell(c, rid)): results.append(rid)
def show(self): '\n print representation of table\n \n Really print? Why not.\n ' for row in self.table: print(row) print(('Table size is %i x %i (cols x rows)' % (self.ncols(), self.nrows())))
6,122,015,028,421,865,000
print representation of table Really print? Why not.
TableData.py
show
mokko/Py-TableData
python
def show(self): '\n print representation of table\n \n Really print? Why not.\n ' for row in self.table: print(row) print(('Table size is %i x %i (cols x rows)' % (self.ncols(), self.nrows())))
def delRow(self, r): '\n Drop a row by number.\n \n Need to remake the index to cover the hole.\n ' self.table.pop(r)
-463,386,055,434,054,660
Drop a row by number. Need to remake the index to cover the hole.
TableData.py
delRow
mokko/Py-TableData
python
def delRow(self, r): '\n Drop a row by number.\n \n Need to remake the index to cover the hole.\n ' self.table.pop(r)
def delCol(self, cname): '\n Drop a column by cname\n \n (Not tested.)\n ' c = self.cindex(cname) for r in range(0, self.nrows()): self.table[r].pop(c)
726,440,151,422,467,200
Drop a column by cname (Not tested.)
TableData.py
delCol
mokko/Py-TableData
python
def delCol(self, cname): '\n Drop a column by cname\n \n (Not tested.)\n ' c = self.cindex(cname) for r in range(0, self.nrows()): self.table[r].pop(c)
def addCol(self, name): '\n Add a new column called name at the end of the row. \n Cells with be empty.\n\n Returns the cid of the new column, same as cindex(cname).\n ' self.table[0].append(name) self._uniqueColumns() for rid in range(1, self.nrows()): self.table[rid].append('') return (len(self.table[0]) - 1)
1,757,653,220,642,044,200
Add a new column called name at the end of the row. Cells with be empty. Returns the cid of the new column, same as cindex(cname).
TableData.py
addCol
mokko/Py-TableData
python
def addCol(self, name): '\n Add a new column called name at the end of the row. \n Cells with be empty.\n\n Returns the cid of the new column, same as cindex(cname).\n ' self.table[0].append(name) self._uniqueColumns() for rid in range(1, self.nrows()): self.table[rid].append() return (len(self.table[0]) - 1)
def delCellAIfColBEq(self, cnameA, cnameB, needle): '\n empty cell in column cnameA if value in column cnameB equals needle in every row\n \n untested\n ' colA = self.cindex(cnameA) colB = self.cindex(cnameB) for rid in range(1, self.nrows()): if (self.table[rid][colB] == needle): self.verbose(('delCellAifColBEq A:%s, B:%s, needle %s' % (cnameA, cnameB, needle))) selt.table[rid][colA] = ''
4,673,846,665,272,713,000
empty cell in column cnameA if value in column cnameB equals needle in every row untested
TableData.py
delCellAIfColBEq
mokko/Py-TableData
python
def delCellAIfColBEq(self, cnameA, cnameB, needle): '\n empty cell in column cnameA if value in column cnameB equals needle in every row\n \n untested\n ' colA = self.cindex(cnameA) colB = self.cindex(cnameB) for rid in range(1, self.nrows()): if (self.table[rid][colB] == needle): self.verbose(('delCellAifColBEq A:%s, B:%s, needle %s' % (cnameA, cnameB, needle))) selt.table[rid][colA] =
def delRowIfColContains(self, cname, needle): "\n Delete row if column equals the value 'needle'\n\n Should we use cname or c (colId)?\n " col = self.cindex(cname) r = (self.nrows() - 1) while (r > 1): cell = self.cell(r, col) if (needle in str(cell)): self.delRow(r) r -= 1
2,724,569,938,249,150,500
Delete row if column equals the value 'needle' Should we use cname or c (colId)?
TableData.py
delRowIfColContains
mokko/Py-TableData
python
def delRowIfColContains(self, cname, needle): "\n Delete row if column equals the value 'needle'\n\n Should we use cname or c (colId)?\n " col = self.cindex(cname) r = (self.nrows() - 1) while (r > 1): cell = self.cell(r, col) if (needle in str(cell)): self.delRow(r) r -= 1
def renameCol(self, cnameOld, cnameNew): '\n renames column cnameOld into cnameNew\n ' c = self.cindex(cnameOld) self.table[0][c] = cnameNew
150,716,984,456,689,950
renames column cnameOld into cnameNew
TableData.py
renameCol
mokko/Py-TableData
python
def renameCol(self, cnameOld, cnameNew): '\n \n ' c = self.cindex(cnameOld) self.table[0][c] = cnameNew
def default_per_col(cname, default_value): "\n Default Value: if cell is empty replace with default value\n self.default_per_col ('status', 'filled')\n " cid = td.cindex(cname) for rid in range(1, td.nrows()): if (not td.cell(cid, rid)): self.table[rid][cid] = default_value
3,105,138,167,014,666,000
Default Value: if cell is empty replace with default value self.default_per_col ('status', 'filled')
TableData.py
default_per_col
mokko/Py-TableData
python
def default_per_col(cname, default_value): "\n Default Value: if cell is empty replace with default value\n self.default_per_col ('status', 'filled')\n " cid = td.cindex(cname) for rid in range(1, td.nrows()): if (not td.cell(cid, rid)): self.table[rid][cid] = default_value
def write(self, out): '\n write to file with extension-awareness\n ' ext = os.path.splitext(out)[1][1:].lower() if (ext == 'xml'): self.writeXML(out) elif (ext == 'csv'): self.writeCSV(out) elif (ext == 'json'): self.writeJSON(out) else: print(('Format %s not recognized' % ext))
-4,998,446,517,376,200,000
write to file with extension-awareness
TableData.py
write
mokko/Py-TableData
python
def write(self, out): '\n \n ' ext = os.path.splitext(out)[1][1:].lower() if (ext == 'xml'): self.writeXML(out) elif (ext == 'csv'): self.writeCSV(out) elif (ext == 'json'): self.writeJSON(out) else: print(('Format %s not recognized' % ext))
def writeCSV(self, outfile): '\n writes data in tableData object to outfile in csv format\n \n Values with commas are quoted. \n ' import csv self._outTest(outfile) with open(outfile, mode='w', newline='', encoding='utf-8') as csvfile: out = csv.writer(csvfile, dialect='excel') for r in range(0, self.nrows()): row = self.table[r] out.writerow(row) self.verbose(('csv written to %s' % outfile))
2,598,109,210,853,169,000
writes data in tableData object to outfile in csv format Values with commas are quoted.
TableData.py
writeCSV
mokko/Py-TableData
python
def writeCSV(self, outfile): '\n writes data in tableData object to outfile in csv format\n \n Values with commas are quoted. \n ' import csv self._outTest(outfile) with open(outfile, mode='w', newline=, encoding='utf-8') as csvfile: out = csv.writer(csvfile, dialect='excel') for r in range(0, self.nrows()): row = self.table[r] out.writerow(row) self.verbose(('csv written to %s' % outfile))
def writeXML(self, out): '\n writes table data to file out in xml format\n ' import xml.etree.ElementTree as ET from xml.sax.saxutils import escape root = ET.Element('tdx') self._outTest(out) def _indent(elem, level=0): i = ('\n' + (level * ' ')) if len(elem): if ((not elem.text) or (not elem.text.strip())): elem.text = (i + ' ') if ((not elem.tail) or (not elem.tail.strip())): elem.tail = i for elem in elem: _indent(elem, (level + 1)) if ((not elem.tail) or (not elem.tail.strip())): elem.tail = i elif (level and ((not elem.tail) or (not elem.tail.strip()))): elem.tail = i for r in range(1, self.nrows()): doc = ET.SubElement(root, 'row') for c in range(0, self.ncols()): cell = self.cell(c, r) if (cell or (r == 1)): ET.SubElement(doc, self.table[0][c]).text = escape(str(cell)) tree = ET.ElementTree(root) _indent(root) tree.write(out, encoding='UTF-8', xml_declaration=True) self.verbose(('xml written to %s' % out))
-4,343,073,205,336,348,700
writes table data to file out in xml format
TableData.py
writeXML
mokko/Py-TableData
python
def writeXML(self, out): '\n \n ' import xml.etree.ElementTree as ET from xml.sax.saxutils import escape root = ET.Element('tdx') self._outTest(out) def _indent(elem, level=0): i = ('\n' + (level * ' ')) if len(elem): if ((not elem.text) or (not elem.text.strip())): elem.text = (i + ' ') if ((not elem.tail) or (not elem.tail.strip())): elem.tail = i for elem in elem: _indent(elem, (level + 1)) if ((not elem.tail) or (not elem.tail.strip())): elem.tail = i elif (level and ((not elem.tail) or (not elem.tail.strip()))): elem.tail = i for r in range(1, self.nrows()): doc = ET.SubElement(root, 'row') for c in range(0, self.ncols()): cell = self.cell(c, r) if (cell or (r == 1)): ET.SubElement(doc, self.table[0][c]).text = escape(str(cell)) tree = ET.ElementTree(root) _indent(root) tree.write(out, encoding='UTF-8', xml_declaration=True) self.verbose(('xml written to %s' % out))
def writeJSON(self, out): "\n Writes table data in json to file out\n \n JSON doesn't have date type, hence default=str\n " import json self._outTest(out) f = open(out, 'w') with f as outfile: json.dump(self.table, outfile, default=str) self.verbose(('json written to %s' % out))
-8,355,916,370,640,608,000
Writes table data in json to file out JSON doesn't have date type, hence default=str
TableData.py
writeJSON
mokko/Py-TableData
python
def writeJSON(self, out): "\n Writes table data in json to file out\n \n JSON doesn't have date type, hence default=str\n " import json self._outTest(out) f = open(out, 'w') with f as outfile: json.dump(self.table, outfile, default=str) self.verbose(('json written to %s' % out))
def teams_add_user_to_team_by_batch_v1(self, add_user_to_team_by_batch_request, **kwargs): 'Add users to a team by batch # noqa: E501\n\n Operation to add users to a Team by batch. To get internal IDs for users, use “Get User Information in a Batch.” To get a Team\'s internal ID, use "Get all available Teams." # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_add_user_to_team_by_batch_v1(add_user_to_team_by_batch_request, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param AddUserToTeamByBatchRequest add_user_to_team_by_batch_request: Request object to specify a list of add user to team request objects. (required)\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: AddUserToTeamByBatchResponse\n If the method is called asynchronously,\n returns the request thread.\n ' kwargs['_return_http_data_only'] = True return self.teams_add_user_to_team_by_batch_v1_with_http_info(add_user_to_team_by_batch_request, **kwargs)
7,479,452,620,593,596,000
Add users to a team by batch # noqa: E501 Operation to add users to a Team by batch. To get internal IDs for users, use “Get User Information in a Batch.” To get a Team's internal ID, use "Get all available Teams." # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_add_user_to_team_by_batch_v1(add_user_to_team_by_batch_request, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param AddUserToTeamByBatchRequest add_user_to_team_by_batch_request: Request object to specify a list of add user to team request objects. (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: AddUserToTeamByBatchResponse If the method is called asynchronously, returns the request thread.
pycherwell/api/teams_api.py
teams_add_user_to_team_by_batch_v1
greenpau/pycherwell
python
def teams_add_user_to_team_by_batch_v1(self, add_user_to_team_by_batch_request, **kwargs): 'Add users to a team by batch # noqa: E501\n\n Operation to add users to a Team by batch. To get internal IDs for users, use “Get User Information in a Batch.” To get a Team\'s internal ID, use "Get all available Teams." # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_add_user_to_team_by_batch_v1(add_user_to_team_by_batch_request, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param AddUserToTeamByBatchRequest add_user_to_team_by_batch_request: Request object to specify a list of add user to team request objects. (required)\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: AddUserToTeamByBatchResponse\n If the method is called asynchronously,\n returns the request thread.\n ' kwargs['_return_http_data_only'] = True return self.teams_add_user_to_team_by_batch_v1_with_http_info(add_user_to_team_by_batch_request, **kwargs)
def teams_add_user_to_team_by_batch_v1_with_http_info(self, add_user_to_team_by_batch_request, **kwargs): 'Add users to a team by batch # noqa: E501\n\n Operation to add users to a Team by batch. To get internal IDs for users, use “Get User Information in a Batch.” To get a Team\'s internal ID, use "Get all available Teams." # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_add_user_to_team_by_batch_v1_with_http_info(add_user_to_team_by_batch_request, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param AddUserToTeamByBatchRequest add_user_to_team_by_batch_request: Request object to specify a list of add user to team request objects. (required)\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(AddUserToTeamByBatchResponse, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n ' local_var_params = locals() all_params = ['add_user_to_team_by_batch_request'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for (key, val) in six.iteritems(local_var_params['kwargs']): if (key not in all_params): raise ApiTypeError(("Got an unexpected keyword argument '%s' to method teams_add_user_to_team_by_batch_v1" % key)) local_var_params[key] = val del local_var_params['kwargs'] if (self.api_client.client_side_validation and (('add_user_to_team_by_batch_request' not in local_var_params) or (local_var_params['add_user_to_team_by_batch_request'] is None))): raise ApiValueError('Missing the required parameter `add_user_to_team_by_batch_request` when calling `teams_add_user_to_team_by_batch_v1`') collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if ('add_user_to_team_by_batch_request' in local_var_params): body_params = local_var_params['add_user_to_team_by_batch_request'] header_params['Accept'] = self.api_client.select_header_accept(['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json']) auth_settings = [] return self.api_client.call_api('/api/V1/addusertoteambybatch', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='AddUserToTeamByBatchResponse', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)
-6,559,755,752,125,099,000
Add users to a team by batch # noqa: E501 Operation to add users to a Team by batch. To get internal IDs for users, use “Get User Information in a Batch.” To get a Team's internal ID, use "Get all available Teams." # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_add_user_to_team_by_batch_v1_with_http_info(add_user_to_team_by_batch_request, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param AddUserToTeamByBatchRequest add_user_to_team_by_batch_request: Request object to specify a list of add user to team request objects. (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(AddUserToTeamByBatchResponse, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread.
pycherwell/api/teams_api.py
teams_add_user_to_team_by_batch_v1_with_http_info
greenpau/pycherwell
python
def teams_add_user_to_team_by_batch_v1_with_http_info(self, add_user_to_team_by_batch_request, **kwargs): 'Add users to a team by batch # noqa: E501\n\n Operation to add users to a Team by batch. To get internal IDs for users, use “Get User Information in a Batch.” To get a Team\'s internal ID, use "Get all available Teams." # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_add_user_to_team_by_batch_v1_with_http_info(add_user_to_team_by_batch_request, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param AddUserToTeamByBatchRequest add_user_to_team_by_batch_request: Request object to specify a list of add user to team request objects. (required)\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(AddUserToTeamByBatchResponse, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n ' local_var_params = locals() all_params = ['add_user_to_team_by_batch_request'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for (key, val) in six.iteritems(local_var_params['kwargs']): if (key not in all_params): raise ApiTypeError(("Got an unexpected keyword argument '%s' to method teams_add_user_to_team_by_batch_v1" % key)) local_var_params[key] = val del local_var_params['kwargs'] if (self.api_client.client_side_validation and (('add_user_to_team_by_batch_request' not in local_var_params) or (local_var_params['add_user_to_team_by_batch_request'] is None))): raise ApiValueError('Missing the required parameter `add_user_to_team_by_batch_request` when calling `teams_add_user_to_team_by_batch_v1`') collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if ('add_user_to_team_by_batch_request' in local_var_params): body_params = local_var_params['add_user_to_team_by_batch_request'] header_params['Accept'] = self.api_client.select_header_accept(['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json']) auth_settings = [] return self.api_client.call_api('/api/V1/addusertoteambybatch', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='AddUserToTeamByBatchResponse', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)
def teams_add_user_to_team_v1(self, add_user_to_team_request, **kwargs): 'Add a user to a team # noqa: E501\n\n Operation to add a user to a Team. To get the user\'s internal ID, use "Get a user by login ID" or "Get a user by public ID." To get a Team\'s internal ID, use "Get all available Teams." # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_add_user_to_team_v1(add_user_to_team_request, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param AddUserToTeamRequest add_user_to_team_request: Request object to specify user and team values. (required)\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n ' kwargs['_return_http_data_only'] = True return self.teams_add_user_to_team_v1_with_http_info(add_user_to_team_request, **kwargs)
5,251,959,941,015,588,000
Add a user to a team # noqa: E501 Operation to add a user to a Team. To get the user's internal ID, use "Get a user by login ID" or "Get a user by public ID." To get a Team's internal ID, use "Get all available Teams." # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_add_user_to_team_v1(add_user_to_team_request, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param AddUserToTeamRequest add_user_to_team_request: Request object to specify user and team values. (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: None If the method is called asynchronously, returns the request thread.
pycherwell/api/teams_api.py
teams_add_user_to_team_v1
greenpau/pycherwell
python
def teams_add_user_to_team_v1(self, add_user_to_team_request, **kwargs): 'Add a user to a team # noqa: E501\n\n Operation to add a user to a Team. To get the user\'s internal ID, use "Get a user by login ID" or "Get a user by public ID." To get a Team\'s internal ID, use "Get all available Teams." # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_add_user_to_team_v1(add_user_to_team_request, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param AddUserToTeamRequest add_user_to_team_request: Request object to specify user and team values. (required)\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n ' kwargs['_return_http_data_only'] = True return self.teams_add_user_to_team_v1_with_http_info(add_user_to_team_request, **kwargs)
def teams_add_user_to_team_v1_with_http_info(self, add_user_to_team_request, **kwargs): 'Add a user to a team # noqa: E501\n\n Operation to add a user to a Team. To get the user\'s internal ID, use "Get a user by login ID" or "Get a user by public ID." To get a Team\'s internal ID, use "Get all available Teams." # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_add_user_to_team_v1_with_http_info(add_user_to_team_request, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param AddUserToTeamRequest add_user_to_team_request: Request object to specify user and team values. (required)\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n ' local_var_params = locals() all_params = ['add_user_to_team_request'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for (key, val) in six.iteritems(local_var_params['kwargs']): if (key not in all_params): raise ApiTypeError(("Got an unexpected keyword argument '%s' to method teams_add_user_to_team_v1" % key)) local_var_params[key] = val del local_var_params['kwargs'] if (self.api_client.client_side_validation and (('add_user_to_team_request' not in local_var_params) or (local_var_params['add_user_to_team_request'] is None))): raise ApiValueError('Missing the required parameter `add_user_to_team_request` when calling `teams_add_user_to_team_v1`') collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if ('add_user_to_team_request' in local_var_params): body_params = local_var_params['add_user_to_team_request'] header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json']) auth_settings = [] return self.api_client.call_api('/api/V1/addusertoteam', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)
-369,934,815,964,429,950
Add a user to a team # noqa: E501 Operation to add a user to a Team. To get the user's internal ID, use "Get a user by login ID" or "Get a user by public ID." To get a Team's internal ID, use "Get all available Teams." # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_add_user_to_team_v1_with_http_info(add_user_to_team_request, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param AddUserToTeamRequest add_user_to_team_request: Request object to specify user and team values. (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: None If the method is called asynchronously, returns the request thread.
pycherwell/api/teams_api.py
teams_add_user_to_team_v1_with_http_info
greenpau/pycherwell
python
def teams_add_user_to_team_v1_with_http_info(self, add_user_to_team_request, **kwargs): 'Add a user to a team # noqa: E501\n\n Operation to add a user to a Team. To get the user\'s internal ID, use "Get a user by login ID" or "Get a user by public ID." To get a Team\'s internal ID, use "Get all available Teams." # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_add_user_to_team_v1_with_http_info(add_user_to_team_request, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param AddUserToTeamRequest add_user_to_team_request: Request object to specify user and team values. (required)\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n ' local_var_params = locals() all_params = ['add_user_to_team_request'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for (key, val) in six.iteritems(local_var_params['kwargs']): if (key not in all_params): raise ApiTypeError(("Got an unexpected keyword argument '%s' to method teams_add_user_to_team_v1" % key)) local_var_params[key] = val del local_var_params['kwargs'] if (self.api_client.client_side_validation and (('add_user_to_team_request' not in local_var_params) or (local_var_params['add_user_to_team_request'] is None))): raise ApiValueError('Missing the required parameter `add_user_to_team_request` when calling `teams_add_user_to_team_v1`') collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if ('add_user_to_team_request' in local_var_params): body_params = local_var_params['add_user_to_team_request'] header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json']) auth_settings = [] return self.api_client.call_api('/api/V1/addusertoteam', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)
def teams_add_user_to_team_v2(self, add_user_to_team_request, **kwargs): 'Add a user to a team # noqa: E501\n\n Operation to add a user to a Team. To get the user\'s internal ID, use "Get a user by login ID" or "Get a user by public ID." To get a Team\'s internal ID, use "Get all available Teams." # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_add_user_to_team_v2(add_user_to_team_request, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param AddUserToTeamRequest add_user_to_team_request: Request object to specify user and team values. (required)\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: AddUserToTeamResponse\n If the method is called asynchronously,\n returns the request thread.\n ' kwargs['_return_http_data_only'] = True return self.teams_add_user_to_team_v2_with_http_info(add_user_to_team_request, **kwargs)
-794,480,889,220,743,600
Add a user to a team # noqa: E501 Operation to add a user to a Team. To get the user's internal ID, use "Get a user by login ID" or "Get a user by public ID." To get a Team's internal ID, use "Get all available Teams." # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_add_user_to_team_v2(add_user_to_team_request, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param AddUserToTeamRequest add_user_to_team_request: Request object to specify user and team values. (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: AddUserToTeamResponse If the method is called asynchronously, returns the request thread.
pycherwell/api/teams_api.py
teams_add_user_to_team_v2
greenpau/pycherwell
python
def teams_add_user_to_team_v2(self, add_user_to_team_request, **kwargs): 'Add a user to a team # noqa: E501\n\n Operation to add a user to a Team. To get the user\'s internal ID, use "Get a user by login ID" or "Get a user by public ID." To get a Team\'s internal ID, use "Get all available Teams." # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_add_user_to_team_v2(add_user_to_team_request, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param AddUserToTeamRequest add_user_to_team_request: Request object to specify user and team values. (required)\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: AddUserToTeamResponse\n If the method is called asynchronously,\n returns the request thread.\n ' kwargs['_return_http_data_only'] = True return self.teams_add_user_to_team_v2_with_http_info(add_user_to_team_request, **kwargs)
def teams_add_user_to_team_v2_with_http_info(self, add_user_to_team_request, **kwargs): 'Add a user to a team # noqa: E501\n\n Operation to add a user to a Team. To get the user\'s internal ID, use "Get a user by login ID" or "Get a user by public ID." To get a Team\'s internal ID, use "Get all available Teams." # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_add_user_to_team_v2_with_http_info(add_user_to_team_request, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param AddUserToTeamRequest add_user_to_team_request: Request object to specify user and team values. (required)\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(AddUserToTeamResponse, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n ' local_var_params = locals() all_params = ['add_user_to_team_request'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for (key, val) in six.iteritems(local_var_params['kwargs']): if (key not in all_params): raise ApiTypeError(("Got an unexpected keyword argument '%s' to method teams_add_user_to_team_v2" % key)) local_var_params[key] = val del local_var_params['kwargs'] if (self.api_client.client_side_validation and (('add_user_to_team_request' not in local_var_params) or (local_var_params['add_user_to_team_request'] is None))): raise ApiValueError('Missing the required parameter `add_user_to_team_request` when calling `teams_add_user_to_team_v2`') collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if ('add_user_to_team_request' in local_var_params): body_params = local_var_params['add_user_to_team_request'] header_params['Accept'] = self.api_client.select_header_accept(['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json']) auth_settings = [] return self.api_client.call_api('/api/V2/addusertoteam', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='AddUserToTeamResponse', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)
6,207,243,840,170,479,000
Add a user to a team # noqa: E501 Operation to add a user to a Team. To get the user's internal ID, use "Get a user by login ID" or "Get a user by public ID." To get a Team's internal ID, use "Get all available Teams." # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_add_user_to_team_v2_with_http_info(add_user_to_team_request, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param AddUserToTeamRequest add_user_to_team_request: Request object to specify user and team values. (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(AddUserToTeamResponse, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread.
pycherwell/api/teams_api.py
teams_add_user_to_team_v2_with_http_info
greenpau/pycherwell
python
def teams_add_user_to_team_v2_with_http_info(self, add_user_to_team_request, **kwargs): 'Add a user to a team # noqa: E501\n\n Operation to add a user to a Team. To get the user\'s internal ID, use "Get a user by login ID" or "Get a user by public ID." To get a Team\'s internal ID, use "Get all available Teams." # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_add_user_to_team_v2_with_http_info(add_user_to_team_request, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param AddUserToTeamRequest add_user_to_team_request: Request object to specify user and team values. (required)\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(AddUserToTeamResponse, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n ' local_var_params = locals() all_params = ['add_user_to_team_request'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for (key, val) in six.iteritems(local_var_params['kwargs']): if (key not in all_params): raise ApiTypeError(("Got an unexpected keyword argument '%s' to method teams_add_user_to_team_v2" % key)) local_var_params[key] = val del local_var_params['kwargs'] if (self.api_client.client_side_validation and (('add_user_to_team_request' not in local_var_params) or (local_var_params['add_user_to_team_request'] is None))): raise ApiValueError('Missing the required parameter `add_user_to_team_request` when calling `teams_add_user_to_team_v2`') collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if ('add_user_to_team_request' in local_var_params): body_params = local_var_params['add_user_to_team_request'] header_params['Accept'] = self.api_client.select_header_accept(['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json']) auth_settings = [] return self.api_client.call_api('/api/V2/addusertoteam', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='AddUserToTeamResponse', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)
def teams_delete_team_v1(self, teamid, **kwargs): 'Delete a Team # noqa: E501\n\n Operation to delete a Team by Team ID. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_delete_team_v1(teamid, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str teamid: Specify the Team ID. (required)\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n ' kwargs['_return_http_data_only'] = True return self.teams_delete_team_v1_with_http_info(teamid, **kwargs)
-7,163,944,717,466,169,000
Delete a Team # noqa: E501 Operation to delete a Team by Team ID. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_delete_team_v1(teamid, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str teamid: Specify the Team ID. (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: None If the method is called asynchronously, returns the request thread.
pycherwell/api/teams_api.py
teams_delete_team_v1
greenpau/pycherwell
python
def teams_delete_team_v1(self, teamid, **kwargs): 'Delete a Team # noqa: E501\n\n Operation to delete a Team by Team ID. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_delete_team_v1(teamid, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str teamid: Specify the Team ID. (required)\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n ' kwargs['_return_http_data_only'] = True return self.teams_delete_team_v1_with_http_info(teamid, **kwargs)
def teams_delete_team_v1_with_http_info(self, teamid, **kwargs): 'Delete a Team # noqa: E501\n\n Operation to delete a Team by Team ID. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_delete_team_v1_with_http_info(teamid, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str teamid: Specify the Team ID. (required)\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n ' local_var_params = locals() all_params = ['teamid'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for (key, val) in six.iteritems(local_var_params['kwargs']): if (key not in all_params): raise ApiTypeError(("Got an unexpected keyword argument '%s' to method teams_delete_team_v1" % key)) local_var_params[key] = val del local_var_params['kwargs'] if (self.api_client.client_side_validation and (('teamid' not in local_var_params) or (local_var_params['teamid'] is None))): raise ApiValueError('Missing the required parameter `teamid` when calling `teams_delete_team_v1`') collection_formats = {} path_params = {} if ('teamid' in local_var_params): path_params['teamid'] = local_var_params['teamid'] query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None auth_settings = [] return self.api_client.call_api('/api/V1/deleteteam/{teamid}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)
-2,856,693,656,659,794,000
Delete a Team # noqa: E501 Operation to delete a Team by Team ID. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_delete_team_v1_with_http_info(teamid, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str teamid: Specify the Team ID. (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: None If the method is called asynchronously, returns the request thread.
pycherwell/api/teams_api.py
teams_delete_team_v1_with_http_info
greenpau/pycherwell
python
def teams_delete_team_v1_with_http_info(self, teamid, **kwargs): 'Delete a Team # noqa: E501\n\n Operation to delete a Team by Team ID. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_delete_team_v1_with_http_info(teamid, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str teamid: Specify the Team ID. (required)\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n ' local_var_params = locals() all_params = ['teamid'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for (key, val) in six.iteritems(local_var_params['kwargs']): if (key not in all_params): raise ApiTypeError(("Got an unexpected keyword argument '%s' to method teams_delete_team_v1" % key)) local_var_params[key] = val del local_var_params['kwargs'] if (self.api_client.client_side_validation and (('teamid' not in local_var_params) or (local_var_params['teamid'] is None))): raise ApiValueError('Missing the required parameter `teamid` when calling `teams_delete_team_v1`') collection_formats = {} path_params = {} if ('teamid' in local_var_params): path_params['teamid'] = local_var_params['teamid'] query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None auth_settings = [] return self.api_client.call_api('/api/V1/deleteteam/{teamid}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)
def teams_get_team_v1(self, teamid, **kwargs): 'Get a team by its TeamId # noqa: E501\n\n Operation to get Team Info for a single Team using its Team ID. To get a Team\'s internal ID, use "Get all available Teams." Note that TeamType has two possible values, where TeamType = 0 for User (CSM Users), or TeamType = 1 for Workgroup (CSM Customers). # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_get_team_v1(teamid, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str teamid: The Team ID of the Team to get. (required)\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: TeamResponse\n If the method is called asynchronously,\n returns the request thread.\n ' kwargs['_return_http_data_only'] = True return self.teams_get_team_v1_with_http_info(teamid, **kwargs)
1,252,965,615,686,424,600
Get a team by its TeamId # noqa: E501 Operation to get Team Info for a single Team using its Team ID. To get a Team's internal ID, use "Get all available Teams." Note that TeamType has two possible values, where TeamType = 0 for User (CSM Users), or TeamType = 1 for Workgroup (CSM Customers). # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_get_team_v1(teamid, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str teamid: The Team ID of the Team to get. (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: TeamResponse If the method is called asynchronously, returns the request thread.
pycherwell/api/teams_api.py
teams_get_team_v1
greenpau/pycherwell
python
def teams_get_team_v1(self, teamid, **kwargs): 'Get a team by its TeamId # noqa: E501\n\n Operation to get Team Info for a single Team using its Team ID. To get a Team\'s internal ID, use "Get all available Teams." Note that TeamType has two possible values, where TeamType = 0 for User (CSM Users), or TeamType = 1 for Workgroup (CSM Customers). # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_get_team_v1(teamid, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str teamid: The Team ID of the Team to get. (required)\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: TeamResponse\n If the method is called asynchronously,\n returns the request thread.\n ' kwargs['_return_http_data_only'] = True return self.teams_get_team_v1_with_http_info(teamid, **kwargs)
def teams_get_team_v1_with_http_info(self, teamid, **kwargs): 'Get a team by its TeamId # noqa: E501\n\n Operation to get Team Info for a single Team using its Team ID. To get a Team\'s internal ID, use "Get all available Teams." Note that TeamType has two possible values, where TeamType = 0 for User (CSM Users), or TeamType = 1 for Workgroup (CSM Customers). # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_get_team_v1_with_http_info(teamid, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str teamid: The Team ID of the Team to get. (required)\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(TeamResponse, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n ' local_var_params = locals() all_params = ['teamid'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for (key, val) in six.iteritems(local_var_params['kwargs']): if (key not in all_params): raise ApiTypeError(("Got an unexpected keyword argument '%s' to method teams_get_team_v1" % key)) local_var_params[key] = val del local_var_params['kwargs'] if (self.api_client.client_side_validation and (('teamid' not in local_var_params) or (local_var_params['teamid'] is None))): raise ApiValueError('Missing the required parameter `teamid` when calling `teams_get_team_v1`') collection_formats = {} path_params = {} if ('teamid' in local_var_params): path_params['teamid'] = local_var_params['teamid'] query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept(['application/json']) auth_settings = [] return self.api_client.call_api('/api/V1/getteam/{teamid}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='TeamResponse', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)
-6,890,980,933,143,454,000
Get a team by its TeamId # noqa: E501 Operation to get Team Info for a single Team using its Team ID. To get a Team's internal ID, use "Get all available Teams." Note that TeamType has two possible values, where TeamType = 0 for User (CSM Users), or TeamType = 1 for Workgroup (CSM Customers). # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_get_team_v1_with_http_info(teamid, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str teamid: The Team ID of the Team to get. (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(TeamResponse, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread.
pycherwell/api/teams_api.py
teams_get_team_v1_with_http_info
greenpau/pycherwell
python
def teams_get_team_v1_with_http_info(self, teamid, **kwargs): 'Get a team by its TeamId # noqa: E501\n\n Operation to get Team Info for a single Team using its Team ID. To get a Team\'s internal ID, use "Get all available Teams." Note that TeamType has two possible values, where TeamType = 0 for User (CSM Users), or TeamType = 1 for Workgroup (CSM Customers). # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_get_team_v1_with_http_info(teamid, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str teamid: The Team ID of the Team to get. (required)\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(TeamResponse, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n ' local_var_params = locals() all_params = ['teamid'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for (key, val) in six.iteritems(local_var_params['kwargs']): if (key not in all_params): raise ApiTypeError(("Got an unexpected keyword argument '%s' to method teams_get_team_v1" % key)) local_var_params[key] = val del local_var_params['kwargs'] if (self.api_client.client_side_validation and (('teamid' not in local_var_params) or (local_var_params['teamid'] is None))): raise ApiValueError('Missing the required parameter `teamid` when calling `teams_get_team_v1`') collection_formats = {} path_params = {} if ('teamid' in local_var_params): path_params['teamid'] = local_var_params['teamid'] query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept(['application/json']) auth_settings = [] return self.api_client.call_api('/api/V1/getteam/{teamid}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='TeamResponse', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)
def teams_get_teams_v1(self, **kwargs): 'Get all available Teams # noqa: E501\n\n Operation to get IDs and names for all available Teams. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_get_teams_v1(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: TeamsResponse\n If the method is called asynchronously,\n returns the request thread.\n ' kwargs['_return_http_data_only'] = True return self.teams_get_teams_v1_with_http_info(**kwargs)
-6,170,501,011,833,153,000
Get all available Teams # noqa: E501 Operation to get IDs and names for all available Teams. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_get_teams_v1(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: TeamsResponse If the method is called asynchronously, returns the request thread.
pycherwell/api/teams_api.py
teams_get_teams_v1
greenpau/pycherwell
python
def teams_get_teams_v1(self, **kwargs): 'Get all available Teams # noqa: E501\n\n Operation to get IDs and names for all available Teams. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_get_teams_v1(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: TeamsResponse\n If the method is called asynchronously,\n returns the request thread.\n ' kwargs['_return_http_data_only'] = True return self.teams_get_teams_v1_with_http_info(**kwargs)
def teams_get_teams_v1_with_http_info(self, **kwargs): 'Get all available Teams # noqa: E501\n\n Operation to get IDs and names for all available Teams. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_get_teams_v1_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(TeamsResponse, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n ' local_var_params = locals() all_params = [] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for (key, val) in six.iteritems(local_var_params['kwargs']): if (key not in all_params): raise ApiTypeError(("Got an unexpected keyword argument '%s' to method teams_get_teams_v1" % key)) local_var_params[key] = val del local_var_params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept(['application/json']) auth_settings = [] return self.api_client.call_api('/api/V1/getteams', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='TeamsResponse', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)
-2,534,725,942,223,735,000
Get all available Teams # noqa: E501 Operation to get IDs and names for all available Teams. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_get_teams_v1_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(TeamsResponse, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread.
pycherwell/api/teams_api.py
teams_get_teams_v1_with_http_info
greenpau/pycherwell
python
def teams_get_teams_v1_with_http_info(self, **kwargs): 'Get all available Teams # noqa: E501\n\n Operation to get IDs and names for all available Teams. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_get_teams_v1_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(TeamsResponse, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n ' local_var_params = locals() all_params = [] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for (key, val) in six.iteritems(local_var_params['kwargs']): if (key not in all_params): raise ApiTypeError(("Got an unexpected keyword argument '%s' to method teams_get_teams_v1" % key)) local_var_params[key] = val del local_var_params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept(['application/json']) auth_settings = [] return self.api_client.call_api('/api/V1/getteams', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='TeamsResponse', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)
def teams_get_teams_v2(self, **kwargs): 'Get all available Teams # noqa: E501\n\n Operation to get IDs and names for all available Teams. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_get_teams_v2(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: TeamsV2Response\n If the method is called asynchronously,\n returns the request thread.\n ' kwargs['_return_http_data_only'] = True return self.teams_get_teams_v2_with_http_info(**kwargs)
8,171,048,766,474,264,000
Get all available Teams # noqa: E501 Operation to get IDs and names for all available Teams. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_get_teams_v2(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: TeamsV2Response If the method is called asynchronously, returns the request thread.
pycherwell/api/teams_api.py
teams_get_teams_v2
greenpau/pycherwell
python
def teams_get_teams_v2(self, **kwargs): 'Get all available Teams # noqa: E501\n\n Operation to get IDs and names for all available Teams. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_get_teams_v2(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: TeamsV2Response\n If the method is called asynchronously,\n returns the request thread.\n ' kwargs['_return_http_data_only'] = True return self.teams_get_teams_v2_with_http_info(**kwargs)
def teams_get_teams_v2_with_http_info(self, **kwargs): 'Get all available Teams # noqa: E501\n\n Operation to get IDs and names for all available Teams. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_get_teams_v2_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(TeamsV2Response, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n ' local_var_params = locals() all_params = [] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for (key, val) in six.iteritems(local_var_params['kwargs']): if (key not in all_params): raise ApiTypeError(("Got an unexpected keyword argument '%s' to method teams_get_teams_v2" % key)) local_var_params[key] = val del local_var_params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept(['application/json']) auth_settings = [] return self.api_client.call_api('/api/V2/getteams', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='TeamsV2Response', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)
-1,948,090,591,928,988,000
Get all available Teams # noqa: E501 Operation to get IDs and names for all available Teams. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_get_teams_v2_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(TeamsV2Response, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread.
pycherwell/api/teams_api.py
teams_get_teams_v2_with_http_info
greenpau/pycherwell
python
def teams_get_teams_v2_with_http_info(self, **kwargs): 'Get all available Teams # noqa: E501\n\n Operation to get IDs and names for all available Teams. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_get_teams_v2_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(TeamsV2Response, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n ' local_var_params = locals() all_params = [] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for (key, val) in six.iteritems(local_var_params['kwargs']): if (key not in all_params): raise ApiTypeError(("Got an unexpected keyword argument '%s' to method teams_get_teams_v2" % key)) local_var_params[key] = val del local_var_params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept(['application/json']) auth_settings = [] return self.api_client.call_api('/api/V2/getteams', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='TeamsV2Response', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)
def teams_get_users_teams_v1(self, user_record_id, **kwargs): 'Get Team assignments for a user # noqa: E501\n\n Operation to get Team assignments for a user. To get record IDs, use "Get a user by login ID" or "Get a user by public id." # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_get_users_teams_v1(user_record_id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str user_record_id: Specify the user record ID. (required)\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: TeamsResponse\n If the method is called asynchronously,\n returns the request thread.\n ' kwargs['_return_http_data_only'] = True return self.teams_get_users_teams_v1_with_http_info(user_record_id, **kwargs)
-6,103,791,973,675,825,000
Get Team assignments for a user # noqa: E501 Operation to get Team assignments for a user. To get record IDs, use "Get a user by login ID" or "Get a user by public id." # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_get_users_teams_v1(user_record_id, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str user_record_id: Specify the user record ID. (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: TeamsResponse If the method is called asynchronously, returns the request thread.
pycherwell/api/teams_api.py
teams_get_users_teams_v1
greenpau/pycherwell
python
def teams_get_users_teams_v1(self, user_record_id, **kwargs): 'Get Team assignments for a user # noqa: E501\n\n Operation to get Team assignments for a user. To get record IDs, use "Get a user by login ID" or "Get a user by public id." # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_get_users_teams_v1(user_record_id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str user_record_id: Specify the user record ID. (required)\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: TeamsResponse\n If the method is called asynchronously,\n returns the request thread.\n ' kwargs['_return_http_data_only'] = True return self.teams_get_users_teams_v1_with_http_info(user_record_id, **kwargs)
def teams_get_users_teams_v1_with_http_info(self, user_record_id, **kwargs): 'Get Team assignments for a user # noqa: E501\n\n Operation to get Team assignments for a user. To get record IDs, use "Get a user by login ID" or "Get a user by public id." # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_get_users_teams_v1_with_http_info(user_record_id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str user_record_id: Specify the user record ID. (required)\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(TeamsResponse, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n ' local_var_params = locals() all_params = ['user_record_id'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for (key, val) in six.iteritems(local_var_params['kwargs']): if (key not in all_params): raise ApiTypeError(("Got an unexpected keyword argument '%s' to method teams_get_users_teams_v1" % key)) local_var_params[key] = val del local_var_params['kwargs'] if (self.api_client.client_side_validation and (('user_record_id' not in local_var_params) or (local_var_params['user_record_id'] is None))): raise ApiValueError('Missing the required parameter `user_record_id` when calling `teams_get_users_teams_v1`') collection_formats = {} path_params = {} if ('user_record_id' in local_var_params): path_params['userRecordId'] = local_var_params['user_record_id'] query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept(['application/json']) auth_settings = [] return self.api_client.call_api('/api/V1/getusersteams/userrecordid/{userRecordId}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='TeamsResponse', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)
4,781,741,475,574,834,000
Get Team assignments for a user # noqa: E501 Operation to get Team assignments for a user. To get record IDs, use "Get a user by login ID" or "Get a user by public id." # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_get_users_teams_v1_with_http_info(user_record_id, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str user_record_id: Specify the user record ID. (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(TeamsResponse, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread.
pycherwell/api/teams_api.py
teams_get_users_teams_v1_with_http_info
greenpau/pycherwell
python
def teams_get_users_teams_v1_with_http_info(self, user_record_id, **kwargs): 'Get Team assignments for a user # noqa: E501\n\n Operation to get Team assignments for a user. To get record IDs, use "Get a user by login ID" or "Get a user by public id." # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_get_users_teams_v1_with_http_info(user_record_id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str user_record_id: Specify the user record ID. (required)\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(TeamsResponse, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n ' local_var_params = locals() all_params = ['user_record_id'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for (key, val) in six.iteritems(local_var_params['kwargs']): if (key not in all_params): raise ApiTypeError(("Got an unexpected keyword argument '%s' to method teams_get_users_teams_v1" % key)) local_var_params[key] = val del local_var_params['kwargs'] if (self.api_client.client_side_validation and (('user_record_id' not in local_var_params) or (local_var_params['user_record_id'] is None))): raise ApiValueError('Missing the required parameter `user_record_id` when calling `teams_get_users_teams_v1`') collection_formats = {} path_params = {} if ('user_record_id' in local_var_params): path_params['userRecordId'] = local_var_params['user_record_id'] query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept(['application/json']) auth_settings = [] return self.api_client.call_api('/api/V1/getusersteams/userrecordid/{userRecordId}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='TeamsResponse', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)
def teams_get_users_teams_v2(self, user_record_id, **kwargs): 'Get Team assignments for a user # noqa: E501\n\n Operation to get Team assignments for a user. To get record IDs, use "Get a user by login ID" or "Get a user by public id." # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_get_users_teams_v2(user_record_id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str user_record_id: Specify the user record ID. (required)\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: TeamsV2Response\n If the method is called asynchronously,\n returns the request thread.\n ' kwargs['_return_http_data_only'] = True return self.teams_get_users_teams_v2_with_http_info(user_record_id, **kwargs)
-232,844,477,654,339,100
Get Team assignments for a user # noqa: E501 Operation to get Team assignments for a user. To get record IDs, use "Get a user by login ID" or "Get a user by public id." # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_get_users_teams_v2(user_record_id, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str user_record_id: Specify the user record ID. (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: TeamsV2Response If the method is called asynchronously, returns the request thread.
pycherwell/api/teams_api.py
teams_get_users_teams_v2
greenpau/pycherwell
python
def teams_get_users_teams_v2(self, user_record_id, **kwargs): 'Get Team assignments for a user # noqa: E501\n\n Operation to get Team assignments for a user. To get record IDs, use "Get a user by login ID" or "Get a user by public id." # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_get_users_teams_v2(user_record_id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str user_record_id: Specify the user record ID. (required)\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: TeamsV2Response\n If the method is called asynchronously,\n returns the request thread.\n ' kwargs['_return_http_data_only'] = True return self.teams_get_users_teams_v2_with_http_info(user_record_id, **kwargs)
def teams_get_users_teams_v2_with_http_info(self, user_record_id, **kwargs): 'Get Team assignments for a user # noqa: E501\n\n Operation to get Team assignments for a user. To get record IDs, use "Get a user by login ID" or "Get a user by public id." # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_get_users_teams_v2_with_http_info(user_record_id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str user_record_id: Specify the user record ID. (required)\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(TeamsV2Response, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n ' local_var_params = locals() all_params = ['user_record_id'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for (key, val) in six.iteritems(local_var_params['kwargs']): if (key not in all_params): raise ApiTypeError(("Got an unexpected keyword argument '%s' to method teams_get_users_teams_v2" % key)) local_var_params[key] = val del local_var_params['kwargs'] if (self.api_client.client_side_validation and (('user_record_id' not in local_var_params) or (local_var_params['user_record_id'] is None))): raise ApiValueError('Missing the required parameter `user_record_id` when calling `teams_get_users_teams_v2`') collection_formats = {} path_params = {} if ('user_record_id' in local_var_params): path_params['userRecordId'] = local_var_params['user_record_id'] query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept(['application/json']) auth_settings = [] return self.api_client.call_api('/api/V2/getusersteams/userrecordid/{userRecordId}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='TeamsV2Response', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)
6,970,159,966,320,876,000
Get Team assignments for a user # noqa: E501 Operation to get Team assignments for a user. To get record IDs, use "Get a user by login ID" or "Get a user by public id." # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_get_users_teams_v2_with_http_info(user_record_id, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str user_record_id: Specify the user record ID. (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(TeamsV2Response, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread.
pycherwell/api/teams_api.py
teams_get_users_teams_v2_with_http_info
greenpau/pycherwell
python
def teams_get_users_teams_v2_with_http_info(self, user_record_id, **kwargs): 'Get Team assignments for a user # noqa: E501\n\n Operation to get Team assignments for a user. To get record IDs, use "Get a user by login ID" or "Get a user by public id." # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_get_users_teams_v2_with_http_info(user_record_id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str user_record_id: Specify the user record ID. (required)\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(TeamsV2Response, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n ' local_var_params = locals() all_params = ['user_record_id'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for (key, val) in six.iteritems(local_var_params['kwargs']): if (key not in all_params): raise ApiTypeError(("Got an unexpected keyword argument '%s' to method teams_get_users_teams_v2" % key)) local_var_params[key] = val del local_var_params['kwargs'] if (self.api_client.client_side_validation and (('user_record_id' not in local_var_params) or (local_var_params['user_record_id'] is None))): raise ApiValueError('Missing the required parameter `user_record_id` when calling `teams_get_users_teams_v2`') collection_formats = {} path_params = {} if ('user_record_id' in local_var_params): path_params['userRecordId'] = local_var_params['user_record_id'] query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept(['application/json']) auth_settings = [] return self.api_client.call_api('/api/V2/getusersteams/userrecordid/{userRecordId}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='TeamsV2Response', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)
def teams_get_workgroups_v1(self, **kwargs): 'Get all available Workgroups # noqa: E501\n\n Operation to get IDs and names for all available Workgroups. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_get_workgroups_v1(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: TeamsResponse\n If the method is called asynchronously,\n returns the request thread.\n ' kwargs['_return_http_data_only'] = True return self.teams_get_workgroups_v1_with_http_info(**kwargs)
-36,594,141,104,595,010
Get all available Workgroups # noqa: E501 Operation to get IDs and names for all available Workgroups. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_get_workgroups_v1(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: TeamsResponse If the method is called asynchronously, returns the request thread.
pycherwell/api/teams_api.py
teams_get_workgroups_v1
greenpau/pycherwell
python
def teams_get_workgroups_v1(self, **kwargs): 'Get all available Workgroups # noqa: E501\n\n Operation to get IDs and names for all available Workgroups. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_get_workgroups_v1(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: TeamsResponse\n If the method is called asynchronously,\n returns the request thread.\n ' kwargs['_return_http_data_only'] = True return self.teams_get_workgroups_v1_with_http_info(**kwargs)
def teams_get_workgroups_v1_with_http_info(self, **kwargs): 'Get all available Workgroups # noqa: E501\n\n Operation to get IDs and names for all available Workgroups. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_get_workgroups_v1_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(TeamsResponse, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n ' local_var_params = locals() all_params = [] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for (key, val) in six.iteritems(local_var_params['kwargs']): if (key not in all_params): raise ApiTypeError(("Got an unexpected keyword argument '%s' to method teams_get_workgroups_v1" % key)) local_var_params[key] = val del local_var_params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept(['application/json']) auth_settings = [] return self.api_client.call_api('/api/V1/getworkgroups', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='TeamsResponse', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)
-671,416,137,976,133,400
Get all available Workgroups # noqa: E501 Operation to get IDs and names for all available Workgroups. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_get_workgroups_v1_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(TeamsResponse, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread.
pycherwell/api/teams_api.py
teams_get_workgroups_v1_with_http_info
greenpau/pycherwell
python
def teams_get_workgroups_v1_with_http_info(self, **kwargs): 'Get all available Workgroups # noqa: E501\n\n Operation to get IDs and names for all available Workgroups. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_get_workgroups_v1_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(TeamsResponse, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n ' local_var_params = locals() all_params = [] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for (key, val) in six.iteritems(local_var_params['kwargs']): if (key not in all_params): raise ApiTypeError(("Got an unexpected keyword argument '%s' to method teams_get_workgroups_v1" % key)) local_var_params[key] = val del local_var_params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept(['application/json']) auth_settings = [] return self.api_client.call_api('/api/V1/getworkgroups', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='TeamsResponse', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)